1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/arm/macro-assembler-arm.h"
8#include "src/ast/scopes.h"
9#include "src/compiler/code-generator-impl.h"
10#include "src/compiler/gap-resolver.h"
11#include "src/compiler/node-matchers.h"
12#include "src/compiler/osr.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18#define __ masm()->
19
20
21#define kScratchReg r9
22
23
24// Adds Arm-specific methods to convert InstructionOperands.
25class ArmOperandConverter final : public InstructionOperandConverter {
26 public:
27  ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
28      : InstructionOperandConverter(gen, instr) {}
29
30  SBit OutputSBit() const {
31    switch (instr_->flags_mode()) {
32      case kFlags_branch:
33      case kFlags_deoptimize:
34      case kFlags_set:
35        return SetCC;
36      case kFlags_none:
37        return LeaveCC;
38    }
39    UNREACHABLE();
40    return LeaveCC;
41  }
42
43  Operand InputImmediate(size_t index) {
44    Constant constant = ToConstant(instr_->InputAt(index));
45    switch (constant.type()) {
46      case Constant::kInt32:
47        return Operand(constant.ToInt32());
48      case Constant::kFloat32:
49        return Operand(
50            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
51      case Constant::kFloat64:
52        return Operand(
53            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
54      case Constant::kInt64:
55      case Constant::kExternalReference:
56      case Constant::kHeapObject:
57      case Constant::kRpoNumber:
58        break;
59    }
60    UNREACHABLE();
61    return Operand::Zero();
62  }
63
64  Operand InputOperand2(size_t first_index) {
65    const size_t index = first_index;
66    switch (AddressingModeField::decode(instr_->opcode())) {
67      case kMode_None:
68      case kMode_Offset_RI:
69      case kMode_Offset_RR:
70        break;
71      case kMode_Operand2_I:
72        return InputImmediate(index + 0);
73      case kMode_Operand2_R:
74        return Operand(InputRegister(index + 0));
75      case kMode_Operand2_R_ASR_I:
76        return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
77      case kMode_Operand2_R_ASR_R:
78        return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
79      case kMode_Operand2_R_LSL_I:
80        return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
81      case kMode_Operand2_R_LSL_R:
82        return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
83      case kMode_Operand2_R_LSR_I:
84        return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
85      case kMode_Operand2_R_LSR_R:
86        return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
87      case kMode_Operand2_R_ROR_I:
88        return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
89      case kMode_Operand2_R_ROR_R:
90        return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
91    }
92    UNREACHABLE();
93    return Operand::Zero();
94  }
95
96  MemOperand InputOffset(size_t* first_index) {
97    const size_t index = *first_index;
98    switch (AddressingModeField::decode(instr_->opcode())) {
99      case kMode_None:
100      case kMode_Operand2_I:
101      case kMode_Operand2_R:
102      case kMode_Operand2_R_ASR_I:
103      case kMode_Operand2_R_ASR_R:
104      case kMode_Operand2_R_LSL_R:
105      case kMode_Operand2_R_LSR_I:
106      case kMode_Operand2_R_LSR_R:
107      case kMode_Operand2_R_ROR_I:
108      case kMode_Operand2_R_ROR_R:
109        break;
110      case kMode_Operand2_R_LSL_I:
111        *first_index += 3;
112        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
113                          LSL, InputInt32(index + 2));
114      case kMode_Offset_RI:
115        *first_index += 2;
116        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
117      case kMode_Offset_RR:
118        *first_index += 2;
119        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
120    }
121    UNREACHABLE();
122    return MemOperand(r0);
123  }
124
125  MemOperand InputOffset(size_t first_index = 0) {
126    return InputOffset(&first_index);
127  }
128
129  MemOperand ToMemOperand(InstructionOperand* op) const {
130    DCHECK_NOT_NULL(op);
131    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
132    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
133  }
134
135  MemOperand SlotToMemOperand(int slot) const {
136    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
137    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
138  }
139};
140
141
142namespace {
143
144class OutOfLineLoadFloat final : public OutOfLineCode {
145 public:
146  OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
147      : OutOfLineCode(gen), result_(result) {}
148
149  void Generate() final {
150    // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
151    __ vmov(result_, -1.0f);
152    __ vsqrt(result_, result_);
153  }
154
155 private:
156  SwVfpRegister const result_;
157};
158
159class OutOfLineLoadDouble final : public OutOfLineCode {
160 public:
161  OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
162      : OutOfLineCode(gen), result_(result) {}
163
164  void Generate() final {
165    // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
166    __ vmov(result_, -1.0);
167    __ vsqrt(result_, result_);
168  }
169
170 private:
171  DwVfpRegister const result_;
172};
173
174
175class OutOfLineLoadInteger final : public OutOfLineCode {
176 public:
177  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
178      : OutOfLineCode(gen), result_(result) {}
179
180  void Generate() final { __ mov(result_, Operand::Zero()); }
181
182 private:
183  Register const result_;
184};
185
186
187class OutOfLineRecordWrite final : public OutOfLineCode {
188 public:
189  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
190                       Register value, Register scratch0, Register scratch1,
191                       RecordWriteMode mode)
192      : OutOfLineCode(gen),
193        object_(object),
194        index_(index),
195        index_immediate_(0),
196        value_(value),
197        scratch0_(scratch0),
198        scratch1_(scratch1),
199        mode_(mode),
200        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
201
202  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
203                       Register value, Register scratch0, Register scratch1,
204                       RecordWriteMode mode)
205      : OutOfLineCode(gen),
206        object_(object),
207        index_(no_reg),
208        index_immediate_(index),
209        value_(value),
210        scratch0_(scratch0),
211        scratch1_(scratch1),
212        mode_(mode),
213        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
214
215  void Generate() final {
216    if (mode_ > RecordWriteMode::kValueIsPointer) {
217      __ JumpIfSmi(value_, exit());
218    }
219    __ CheckPageFlag(value_, scratch0_,
220                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
221                     exit());
222    RememberedSetAction const remembered_set_action =
223        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
224                                             : OMIT_REMEMBERED_SET;
225    SaveFPRegsMode const save_fp_mode =
226        frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
227    if (must_save_lr_) {
228      // We need to save and restore lr if the frame was elided.
229      __ Push(lr);
230    }
231    RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
232                         remembered_set_action, save_fp_mode);
233    if (index_.is(no_reg)) {
234      __ add(scratch1_, object_, Operand(index_immediate_));
235    } else {
236      DCHECK_EQ(0, index_immediate_);
237      __ add(scratch1_, object_, Operand(index_));
238    }
239    __ CallStub(&stub);
240    if (must_save_lr_) {
241      __ Pop(lr);
242    }
243  }
244
245 private:
246  Register const object_;
247  Register const index_;
248  int32_t const index_immediate_;  // Valid if index_.is(no_reg).
249  Register const value_;
250  Register const scratch0_;
251  Register const scratch1_;
252  RecordWriteMode const mode_;
253  bool must_save_lr_;
254};
255
256
257Condition FlagsConditionToCondition(FlagsCondition condition) {
258  switch (condition) {
259    case kEqual:
260      return eq;
261    case kNotEqual:
262      return ne;
263    case kSignedLessThan:
264      return lt;
265    case kSignedGreaterThanOrEqual:
266      return ge;
267    case kSignedLessThanOrEqual:
268      return le;
269    case kSignedGreaterThan:
270      return gt;
271    case kUnsignedLessThan:
272      return lo;
273    case kUnsignedGreaterThanOrEqual:
274      return hs;
275    case kUnsignedLessThanOrEqual:
276      return ls;
277    case kUnsignedGreaterThan:
278      return hi;
279    case kFloatLessThanOrUnordered:
280      return lt;
281    case kFloatGreaterThanOrEqual:
282      return ge;
283    case kFloatLessThanOrEqual:
284      return ls;
285    case kFloatGreaterThanOrUnordered:
286      return hi;
287    case kFloatLessThan:
288      return lo;
289    case kFloatGreaterThanOrEqualOrUnordered:
290      return hs;
291    case kFloatLessThanOrEqualOrUnordered:
292      return le;
293    case kFloatGreaterThan:
294      return gt;
295    case kOverflow:
296      return vs;
297    case kNotOverflow:
298      return vc;
299    default:
300      break;
301  }
302  UNREACHABLE();
303  return kNoCondition;
304}
305
306}  // namespace
307
308#define ASSEMBLE_CHECKED_LOAD_FP(Type)                         \
309  do {                                                         \
310    auto result = i.Output##Type##Register();                  \
311    auto offset = i.InputRegister(0);                          \
312    if (instr->InputAt(1)->IsRegister()) {                     \
313      __ cmp(offset, i.InputRegister(1));                      \
314    } else {                                                   \
315      __ cmp(offset, i.InputImmediate(1));                     \
316    }                                                          \
317    auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
318    __ b(hs, ool->entry());                                    \
319    __ vldr(result, i.InputOffset(2));                         \
320    __ bind(ool->exit());                                      \
321    DCHECK_EQ(LeaveCC, i.OutputSBit());                        \
322  } while (0)
323
324#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                \
325  do {                                                          \
326    auto result = i.OutputRegister();                           \
327    auto offset = i.InputRegister(0);                           \
328    if (instr->InputAt(1)->IsRegister()) {                      \
329      __ cmp(offset, i.InputRegister(1));                       \
330    } else {                                                    \
331      __ cmp(offset, i.InputImmediate(1));                      \
332    }                                                           \
333    auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
334    __ b(hs, ool->entry());                                     \
335    __ asm_instr(result, i.InputOffset(2));                     \
336    __ bind(ool->exit());                                       \
337    DCHECK_EQ(LeaveCC, i.OutputSBit());                         \
338  } while (0)
339
340#define ASSEMBLE_CHECKED_STORE_FP(Type)      \
341  do {                                       \
342    auto offset = i.InputRegister(0);        \
343    if (instr->InputAt(1)->IsRegister()) {   \
344      __ cmp(offset, i.InputRegister(1));    \
345    } else {                                 \
346      __ cmp(offset, i.InputImmediate(1));   \
347    }                                        \
348    auto value = i.Input##Type##Register(2); \
349    __ vstr(value, i.InputOffset(3), lo);    \
350    DCHECK_EQ(LeaveCC, i.OutputSBit());      \
351  } while (0)
352
353#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
354  do {                                            \
355    auto offset = i.InputRegister(0);             \
356    if (instr->InputAt(1)->IsRegister()) {        \
357      __ cmp(offset, i.InputRegister(1));         \
358    } else {                                      \
359      __ cmp(offset, i.InputImmediate(1));        \
360    }                                             \
361    auto value = i.InputRegister(2);              \
362    __ asm_instr(value, i.InputOffset(3), lo);    \
363    DCHECK_EQ(LeaveCC, i.OutputSBit());           \
364  } while (0)
365
366#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)                       \
367  do {                                                                \
368    __ asm_instr(i.OutputRegister(),                                  \
369                 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
370    __ dmb(ISH);                                                      \
371  } while (0)
372
373#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)                      \
374  do {                                                                \
375    __ dmb(ISH);                                                      \
376    __ asm_instr(i.InputRegister(2),                                  \
377                 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
378    __ dmb(ISH);                                                      \
379  } while (0)
380
381#define ASSEMBLE_IEEE754_BINOP(name)                                           \
382  do {                                                                         \
383    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
384    /* and generate a CallAddress instruction instead. */                      \
385    FrameScope scope(masm(), StackFrame::MANUAL);                              \
386    __ PrepareCallCFunction(0, 2, kScratchReg);                                \
387    __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
388                            i.InputDoubleRegister(1));                         \
389    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
390                     0, 2);                                                    \
391    /* Move the result in the double result register. */                       \
392    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
393    DCHECK_EQ(LeaveCC, i.OutputSBit());                                        \
394  } while (0)
395
396#define ASSEMBLE_IEEE754_UNOP(name)                                            \
397  do {                                                                         \
398    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
399    /* and generate a CallAddress instruction instead. */                      \
400    FrameScope scope(masm(), StackFrame::MANUAL);                              \
401    __ PrepareCallCFunction(0, 1, kScratchReg);                                \
402    __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
403    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
404                     0, 1);                                                    \
405    /* Move the result in the double result register. */                       \
406    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
407    DCHECK_EQ(LeaveCC, i.OutputSBit());                                        \
408  } while (0)
409
410void CodeGenerator::AssembleDeconstructFrame() {
411  __ LeaveFrame(StackFrame::MANUAL);
412}
413
414void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
415  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
416  if (sp_slot_delta > 0) {
417    __ add(sp, sp, Operand(sp_slot_delta * kPointerSize));
418  }
419  frame_access_state()->SetFrameAccessToDefault();
420}
421
422
423void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
424  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
425  if (sp_slot_delta < 0) {
426    __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
427    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
428  }
429  if (frame_access_state()->has_frame()) {
430    if (FLAG_enable_embedded_constant_pool) {
431      __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
432    }
433    __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
434    __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
435  }
436  frame_access_state()->SetFrameAccessToSP();
437}
438
439void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
440                                                     Register scratch1,
441                                                     Register scratch2,
442                                                     Register scratch3) {
443  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
444  Label done;
445
446  // Check if current frame is an arguments adaptor frame.
447  __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
448  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
449  __ b(ne, &done);
450
451  // Load arguments count from current arguments adaptor frame (note, it
452  // does not include receiver).
453  Register caller_args_count_reg = scratch1;
454  __ ldr(caller_args_count_reg,
455         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
456  __ SmiUntag(caller_args_count_reg);
457
458  ParameterCount callee_args_count(args_reg);
459  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
460                        scratch3);
461  __ bind(&done);
462}
463
464// Assembles an instruction after register allocation, producing machine code.
465CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
466    Instruction* instr) {
467  ArmOperandConverter i(this, instr);
468
469  __ MaybeCheckConstPool();
470  InstructionCode opcode = instr->opcode();
471  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
472  switch (arch_opcode) {
473    case kArchCallCodeObject: {
474      EnsureSpaceForLazyDeopt();
475      if (instr->InputAt(0)->IsImmediate()) {
476        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
477                RelocInfo::CODE_TARGET);
478      } else {
479        __ add(ip, i.InputRegister(0),
480               Operand(Code::kHeaderSize - kHeapObjectTag));
481        __ Call(ip);
482      }
483      RecordCallPosition(instr);
484      DCHECK_EQ(LeaveCC, i.OutputSBit());
485      frame_access_state()->ClearSPDelta();
486      break;
487    }
488    case kArchTailCallCodeObjectFromJSFunction:
489    case kArchTailCallCodeObject: {
490      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
491      AssembleDeconstructActivationRecord(stack_param_delta);
492      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
493        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
494                                         i.TempRegister(0), i.TempRegister(1),
495                                         i.TempRegister(2));
496      }
497      if (instr->InputAt(0)->IsImmediate()) {
498        __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
499                RelocInfo::CODE_TARGET);
500      } else {
501        __ add(ip, i.InputRegister(0),
502               Operand(Code::kHeaderSize - kHeapObjectTag));
503        __ Jump(ip);
504      }
505      DCHECK_EQ(LeaveCC, i.OutputSBit());
506      frame_access_state()->ClearSPDelta();
507      break;
508    }
509    case kArchTailCallAddress: {
510      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
511      AssembleDeconstructActivationRecord(stack_param_delta);
512      CHECK(!instr->InputAt(0)->IsImmediate());
513      __ Jump(i.InputRegister(0));
514      frame_access_state()->ClearSPDelta();
515      break;
516    }
517    case kArchCallJSFunction: {
518      EnsureSpaceForLazyDeopt();
519      Register func = i.InputRegister(0);
520      if (FLAG_debug_code) {
521        // Check the function's context matches the context argument.
522        __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
523        __ cmp(cp, kScratchReg);
524        __ Assert(eq, kWrongFunctionContext);
525      }
526      __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
527      __ Call(ip);
528      RecordCallPosition(instr);
529      DCHECK_EQ(LeaveCC, i.OutputSBit());
530      frame_access_state()->ClearSPDelta();
531      break;
532    }
533    case kArchTailCallJSFunctionFromJSFunction:
534    case kArchTailCallJSFunction: {
535      Register func = i.InputRegister(0);
536      if (FLAG_debug_code) {
537        // Check the function's context matches the context argument.
538        __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
539        __ cmp(cp, kScratchReg);
540        __ Assert(eq, kWrongFunctionContext);
541      }
542      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
543      AssembleDeconstructActivationRecord(stack_param_delta);
544      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
545        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
546                                         i.TempRegister(0), i.TempRegister(1),
547                                         i.TempRegister(2));
548      }
549      __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
550      __ Jump(ip);
551      DCHECK_EQ(LeaveCC, i.OutputSBit());
552      frame_access_state()->ClearSPDelta();
553      break;
554    }
555    case kArchPrepareCallCFunction: {
556      int const num_parameters = MiscField::decode(instr->opcode());
557      __ PrepareCallCFunction(num_parameters, kScratchReg);
558      // Frame alignment requires using FP-relative frame addressing.
559      frame_access_state()->SetFrameAccessToFP();
560      break;
561    }
562    case kArchPrepareTailCall:
563      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
564      break;
565    case kArchCallCFunction: {
566      int const num_parameters = MiscField::decode(instr->opcode());
567      if (instr->InputAt(0)->IsImmediate()) {
568        ExternalReference ref = i.InputExternalReference(0);
569        __ CallCFunction(ref, num_parameters);
570      } else {
571        Register func = i.InputRegister(0);
572        __ CallCFunction(func, num_parameters);
573      }
574      frame_access_state()->SetFrameAccessToDefault();
575      frame_access_state()->ClearSPDelta();
576      break;
577    }
578    case kArchJmp:
579      AssembleArchJump(i.InputRpo(0));
580      DCHECK_EQ(LeaveCC, i.OutputSBit());
581      break;
582    case kArchLookupSwitch:
583      AssembleArchLookupSwitch(instr);
584      DCHECK_EQ(LeaveCC, i.OutputSBit());
585      break;
586    case kArchTableSwitch:
587      AssembleArchTableSwitch(instr);
588      DCHECK_EQ(LeaveCC, i.OutputSBit());
589      break;
590    case kArchDebugBreak:
591      __ stop("kArchDebugBreak");
592      break;
593    case kArchComment: {
594      Address comment_string = i.InputExternalReference(0).address();
595      __ RecordComment(reinterpret_cast<const char*>(comment_string));
596      break;
597    }
598    case kArchNop:
599    case kArchThrowTerminator:
600      // don't emit code for nops.
601      DCHECK_EQ(LeaveCC, i.OutputSBit());
602      break;
603    case kArchDeoptimize: {
604      int deopt_state_id =
605          BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
606      Deoptimizer::BailoutType bailout_type =
607          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
608      CodeGenResult result =
609          AssembleDeoptimizerCall(deopt_state_id, bailout_type);
610      if (result != kSuccess) return result;
611      break;
612    }
613    case kArchRet:
614      AssembleReturn();
615      DCHECK_EQ(LeaveCC, i.OutputSBit());
616      break;
617    case kArchStackPointer:
618      __ mov(i.OutputRegister(), sp);
619      DCHECK_EQ(LeaveCC, i.OutputSBit());
620      break;
621    case kArchFramePointer:
622      __ mov(i.OutputRegister(), fp);
623      DCHECK_EQ(LeaveCC, i.OutputSBit());
624      break;
625    case kArchParentFramePointer:
626      if (frame_access_state()->has_frame()) {
627        __ ldr(i.OutputRegister(), MemOperand(fp, 0));
628      } else {
629        __ mov(i.OutputRegister(), fp);
630      }
631      break;
632    case kArchTruncateDoubleToI:
633      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
634      DCHECK_EQ(LeaveCC, i.OutputSBit());
635      break;
636    case kArchStoreWithWriteBarrier: {
637      RecordWriteMode mode =
638          static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
639      Register object = i.InputRegister(0);
640      Register value = i.InputRegister(2);
641      Register scratch0 = i.TempRegister(0);
642      Register scratch1 = i.TempRegister(1);
643      OutOfLineRecordWrite* ool;
644
645      AddressingMode addressing_mode =
646          AddressingModeField::decode(instr->opcode());
647      if (addressing_mode == kMode_Offset_RI) {
648        int32_t index = i.InputInt32(1);
649        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
650                                                scratch0, scratch1, mode);
651        __ str(value, MemOperand(object, index));
652      } else {
653        DCHECK_EQ(kMode_Offset_RR, addressing_mode);
654        Register index(i.InputRegister(1));
655        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
656                                                scratch0, scratch1, mode);
657        __ str(value, MemOperand(object, index));
658      }
659      __ CheckPageFlag(object, scratch0,
660                       MemoryChunk::kPointersFromHereAreInterestingMask, ne,
661                       ool->entry());
662      __ bind(ool->exit());
663      break;
664    }
665    case kArchStackSlot: {
666      FrameOffset offset =
667          frame_access_state()->GetFrameOffset(i.InputInt32(0));
668      Register base;
669      if (offset.from_stack_pointer()) {
670        base = sp;
671      } else {
672        base = fp;
673      }
674      __ add(i.OutputRegister(0), base, Operand(offset.offset()));
675      break;
676    }
677    case kIeee754Float64Atan:
678      ASSEMBLE_IEEE754_UNOP(atan);
679      break;
680    case kIeee754Float64Atan2:
681      ASSEMBLE_IEEE754_BINOP(atan2);
682      break;
683    case kIeee754Float64Cbrt:
684      ASSEMBLE_IEEE754_UNOP(cbrt);
685      break;
686    case kIeee754Float64Cos:
687      ASSEMBLE_IEEE754_UNOP(cos);
688      break;
689    case kIeee754Float64Exp:
690      ASSEMBLE_IEEE754_UNOP(exp);
691      break;
692    case kIeee754Float64Expm1:
693      ASSEMBLE_IEEE754_UNOP(expm1);
694      break;
695    case kIeee754Float64Atanh:
696      ASSEMBLE_IEEE754_UNOP(atanh);
697      break;
698    case kIeee754Float64Log:
699      ASSEMBLE_IEEE754_UNOP(log);
700      break;
701    case kIeee754Float64Log1p:
702      ASSEMBLE_IEEE754_UNOP(log1p);
703      break;
704    case kIeee754Float64Log2:
705      ASSEMBLE_IEEE754_UNOP(log2);
706      break;
707    case kIeee754Float64Log10:
708      ASSEMBLE_IEEE754_UNOP(log10);
709      break;
710    case kIeee754Float64Sin:
711      ASSEMBLE_IEEE754_UNOP(sin);
712      break;
713    case kIeee754Float64Tan:
714      ASSEMBLE_IEEE754_UNOP(tan);
715      break;
716    case kArmAdd:
717      __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
718             i.OutputSBit());
719      break;
720    case kArmAnd:
721      __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
722              i.OutputSBit());
723      break;
724    case kArmBic:
725      __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
726             i.OutputSBit());
727      break;
728    case kArmMul:
729      __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
730             i.OutputSBit());
731      break;
732    case kArmMla:
733      __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
734             i.InputRegister(2), i.OutputSBit());
735      break;
736    case kArmMls: {
737      CpuFeatureScope scope(masm(), ARMv7);
738      __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
739             i.InputRegister(2));
740      DCHECK_EQ(LeaveCC, i.OutputSBit());
741      break;
742    }
743    case kArmSmmul:
744      __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
745      DCHECK_EQ(LeaveCC, i.OutputSBit());
746      break;
747    case kArmSmmla:
748      __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
749               i.InputRegister(2));
750      DCHECK_EQ(LeaveCC, i.OutputSBit());
751      break;
752    case kArmUmull:
753      __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
754               i.InputRegister(1), i.OutputSBit());
755      break;
756    case kArmSdiv: {
757      CpuFeatureScope scope(masm(), SUDIV);
758      __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
759      DCHECK_EQ(LeaveCC, i.OutputSBit());
760      break;
761    }
762    case kArmUdiv: {
763      CpuFeatureScope scope(masm(), SUDIV);
764      __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
765      DCHECK_EQ(LeaveCC, i.OutputSBit());
766      break;
767    }
768    case kArmMov:
769      __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
770      break;
771    case kArmMvn:
772      __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
773      break;
774    case kArmOrr:
775      __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
776             i.OutputSBit());
777      break;
778    case kArmEor:
779      __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
780             i.OutputSBit());
781      break;
782    case kArmSub:
783      __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
784             i.OutputSBit());
785      break;
786    case kArmRsb:
787      __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
788             i.OutputSBit());
789      break;
790    case kArmBfc: {
791      CpuFeatureScope scope(masm(), ARMv7);
792      __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
793      DCHECK_EQ(LeaveCC, i.OutputSBit());
794      break;
795    }
796    case kArmUbfx: {
797      CpuFeatureScope scope(masm(), ARMv7);
798      __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
799              i.InputInt8(2));
800      DCHECK_EQ(LeaveCC, i.OutputSBit());
801      break;
802    }
803    case kArmSbfx: {
804      CpuFeatureScope scope(masm(), ARMv7);
805      __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
806              i.InputInt8(2));
807      DCHECK_EQ(LeaveCC, i.OutputSBit());
808      break;
809    }
810    case kArmSxtb:
811      __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
812      DCHECK_EQ(LeaveCC, i.OutputSBit());
813      break;
814    case kArmSxth:
815      __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
816      DCHECK_EQ(LeaveCC, i.OutputSBit());
817      break;
818    case kArmSxtab:
819      __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
820               i.InputInt32(2));
821      DCHECK_EQ(LeaveCC, i.OutputSBit());
822      break;
823    case kArmSxtah:
824      __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
825               i.InputInt32(2));
826      DCHECK_EQ(LeaveCC, i.OutputSBit());
827      break;
828    case kArmUxtb:
829      __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
830      DCHECK_EQ(LeaveCC, i.OutputSBit());
831      break;
832    case kArmUxth:
833      __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
834      DCHECK_EQ(LeaveCC, i.OutputSBit());
835      break;
836    case kArmUxtab:
837      __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
838               i.InputInt32(2));
839      DCHECK_EQ(LeaveCC, i.OutputSBit());
840      break;
841    case kArmUxtah:
842      __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
843               i.InputInt32(2));
844      DCHECK_EQ(LeaveCC, i.OutputSBit());
845      break;
846    case kArmRbit: {
847      CpuFeatureScope scope(masm(), ARMv7);
848      __ rbit(i.OutputRegister(), i.InputRegister(0));
849      DCHECK_EQ(LeaveCC, i.OutputSBit());
850      break;
851    }
852    case kArmClz:
853      __ clz(i.OutputRegister(), i.InputRegister(0));
854      DCHECK_EQ(LeaveCC, i.OutputSBit());
855      break;
856    case kArmCmp:
857      __ cmp(i.InputRegister(0), i.InputOperand2(1));
858      DCHECK_EQ(SetCC, i.OutputSBit());
859      break;
860    case kArmCmn:
861      __ cmn(i.InputRegister(0), i.InputOperand2(1));
862      DCHECK_EQ(SetCC, i.OutputSBit());
863      break;
864    case kArmTst:
865      __ tst(i.InputRegister(0), i.InputOperand2(1));
866      DCHECK_EQ(SetCC, i.OutputSBit());
867      break;
868    case kArmTeq:
869      __ teq(i.InputRegister(0), i.InputOperand2(1));
870      DCHECK_EQ(SetCC, i.OutputSBit());
871      break;
872    case kArmAddPair:
873      // i.InputRegister(0) ... left low word.
874      // i.InputRegister(1) ... left high word.
875      // i.InputRegister(2) ... right low word.
876      // i.InputRegister(3) ... right high word.
877      __ add(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
878             SBit::SetCC);
879      __ adc(i.OutputRegister(1), i.InputRegister(1),
880             Operand(i.InputRegister(3)));
881      DCHECK_EQ(LeaveCC, i.OutputSBit());
882      break;
883    case kArmSubPair:
884      // i.InputRegister(0) ... left low word.
885      // i.InputRegister(1) ... left high word.
886      // i.InputRegister(2) ... right low word.
887      // i.InputRegister(3) ... right high word.
888      __ sub(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
889             SBit::SetCC);
890      __ sbc(i.OutputRegister(1), i.InputRegister(1),
891             Operand(i.InputRegister(3)));
892      DCHECK_EQ(LeaveCC, i.OutputSBit());
893      break;
894    case kArmMulPair:
895      // i.InputRegister(0) ... left low word.
896      // i.InputRegister(1) ... left high word.
897      // i.InputRegister(2) ... right low word.
898      // i.InputRegister(3) ... right high word.
899      __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
900               i.InputRegister(2));
901      __ mla(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(3),
902             i.OutputRegister(1));
903      __ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
904             i.OutputRegister(1));
905      break;
906    case kArmLslPair:
907      if (instr->InputAt(2)->IsImmediate()) {
908        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
909                   i.InputRegister(1), i.InputInt32(2));
910      } else {
911        __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
912                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
913      }
914      break;
915    case kArmLsrPair:
916      if (instr->InputAt(2)->IsImmediate()) {
917        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
918                   i.InputRegister(1), i.InputInt32(2));
919      } else {
920        __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
921                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
922      }
923      break;
924    case kArmAsrPair:
925      if (instr->InputAt(2)->IsImmediate()) {
926        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
927                   i.InputRegister(1), i.InputInt32(2));
928      } else {
929        __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
930                   i.InputRegister(1), kScratchReg, i.InputRegister(2));
931      }
932      break;
933    case kArmVcmpF32:
934      if (instr->InputAt(1)->IsFPRegister()) {
935        __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
936                                 i.InputFloatRegister(1));
937      } else {
938        DCHECK(instr->InputAt(1)->IsImmediate());
939        // 0.0 is the only immediate supported by vcmp instructions.
940        DCHECK(i.InputFloat32(1) == 0.0f);
941        __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
942      }
943      DCHECK_EQ(SetCC, i.OutputSBit());
944      break;
945    case kArmVaddF32:
946      __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
947              i.InputFloatRegister(1));
948      DCHECK_EQ(LeaveCC, i.OutputSBit());
949      break;
950    case kArmVsubF32:
951      __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
952              i.InputFloatRegister(1));
953      DCHECK_EQ(LeaveCC, i.OutputSBit());
954      break;
955    case kArmVmulF32:
956      __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
957              i.InputFloatRegister(1));
958      DCHECK_EQ(LeaveCC, i.OutputSBit());
959      break;
960    case kArmVmlaF32:
961      __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
962              i.InputFloatRegister(2));
963      DCHECK_EQ(LeaveCC, i.OutputSBit());
964      break;
965    case kArmVmlsF32:
966      __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
967              i.InputFloatRegister(2));
968      DCHECK_EQ(LeaveCC, i.OutputSBit());
969      break;
970    case kArmVdivF32:
971      __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
972              i.InputFloatRegister(1));
973      DCHECK_EQ(LeaveCC, i.OutputSBit());
974      break;
975    case kArmVsqrtF32:
976      __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
977      break;
978    case kArmVabsF32:
979      __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
980      break;
981    case kArmVnegF32:
982      __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
983      break;
984    case kArmVcmpF64:
985      if (instr->InputAt(1)->IsFPRegister()) {
986        __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
987                                 i.InputDoubleRegister(1));
988      } else {
989        DCHECK(instr->InputAt(1)->IsImmediate());
990        // 0.0 is the only immediate supported by vcmp instructions.
991        DCHECK(i.InputDouble(1) == 0.0);
992        __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
993      }
994      DCHECK_EQ(SetCC, i.OutputSBit());
995      break;
996    case kArmVaddF64:
997      __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
998              i.InputDoubleRegister(1));
999      DCHECK_EQ(LeaveCC, i.OutputSBit());
1000      break;
1001    case kArmVsubF64:
1002      __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1003              i.InputDoubleRegister(1));
1004      DCHECK_EQ(LeaveCC, i.OutputSBit());
1005      break;
1006    case kArmVmulF64:
1007      __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1008              i.InputDoubleRegister(1));
1009      DCHECK_EQ(LeaveCC, i.OutputSBit());
1010      break;
1011    case kArmVmlaF64:
1012      __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1013              i.InputDoubleRegister(2));
1014      DCHECK_EQ(LeaveCC, i.OutputSBit());
1015      break;
1016    case kArmVmlsF64:
1017      __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
1018              i.InputDoubleRegister(2));
1019      DCHECK_EQ(LeaveCC, i.OutputSBit());
1020      break;
1021    case kArmVdivF64:
1022      __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1023              i.InputDoubleRegister(1));
1024      DCHECK_EQ(LeaveCC, i.OutputSBit());
1025      break;
1026    case kArmVmodF64: {
1027      // TODO(bmeurer): We should really get rid of this special instruction,
1028      // and generate a CallAddress instruction instead.
1029      FrameScope scope(masm(), StackFrame::MANUAL);
1030      __ PrepareCallCFunction(0, 2, kScratchReg);
1031      __ MovToFloatParameters(i.InputDoubleRegister(0),
1032                              i.InputDoubleRegister(1));
1033      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1034                       0, 2);
1035      // Move the result in the double result register.
1036      __ MovFromFloatResult(i.OutputDoubleRegister());
1037      DCHECK_EQ(LeaveCC, i.OutputSBit());
1038      break;
1039    }
1040    case kArmVsqrtF64:
1041      __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1042      break;
1043    case kArmVabsF64:
1044      __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1045      break;
1046    case kArmVnegF64:
1047      __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1048      break;
1049    case kArmVrintmF32:
1050      __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
1051      break;
1052    case kArmVrintmF64:
1053      __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1054      break;
1055    case kArmVrintpF32:
1056      __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
1057      break;
1058    case kArmVrintpF64:
1059      __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1060      break;
1061    case kArmVrintzF32:
1062      __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
1063      break;
1064    case kArmVrintzF64:
1065      __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1066      break;
1067    case kArmVrintaF64:
1068      __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1069      break;
1070    case kArmVrintnF32:
1071      __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
1072      break;
1073    case kArmVrintnF64:
1074      __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1075      break;
1076    case kArmVcvtF32F64: {
1077      __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
1078      DCHECK_EQ(LeaveCC, i.OutputSBit());
1079      break;
1080    }
1081    case kArmVcvtF64F32: {
1082      __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
1083      DCHECK_EQ(LeaveCC, i.OutputSBit());
1084      break;
1085    }
1086    case kArmVcvtF32S32: {
1087      SwVfpRegister scratch = kScratchDoubleReg.low();
1088      __ vmov(scratch, i.InputRegister(0));
1089      __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
1090      DCHECK_EQ(LeaveCC, i.OutputSBit());
1091      break;
1092    }
1093    case kArmVcvtF32U32: {
1094      SwVfpRegister scratch = kScratchDoubleReg.low();
1095      __ vmov(scratch, i.InputRegister(0));
1096      __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
1097      DCHECK_EQ(LeaveCC, i.OutputSBit());
1098      break;
1099    }
1100    case kArmVcvtF64S32: {
1101      SwVfpRegister scratch = kScratchDoubleReg.low();
1102      __ vmov(scratch, i.InputRegister(0));
1103      __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
1104      DCHECK_EQ(LeaveCC, i.OutputSBit());
1105      break;
1106    }
1107    case kArmVcvtF64U32: {
1108      SwVfpRegister scratch = kScratchDoubleReg.low();
1109      __ vmov(scratch, i.InputRegister(0));
1110      __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
1111      DCHECK_EQ(LeaveCC, i.OutputSBit());
1112      break;
1113    }
1114    case kArmVcvtS32F32: {
1115      SwVfpRegister scratch = kScratchDoubleReg.low();
1116      __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
1117      __ vmov(i.OutputRegister(), scratch);
1118      DCHECK_EQ(LeaveCC, i.OutputSBit());
1119      break;
1120    }
1121    case kArmVcvtU32F32: {
1122      SwVfpRegister scratch = kScratchDoubleReg.low();
1123      __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
1124      __ vmov(i.OutputRegister(), scratch);
1125      DCHECK_EQ(LeaveCC, i.OutputSBit());
1126      break;
1127    }
1128    case kArmVcvtS32F64: {
1129      SwVfpRegister scratch = kScratchDoubleReg.low();
1130      __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
1131      __ vmov(i.OutputRegister(), scratch);
1132      DCHECK_EQ(LeaveCC, i.OutputSBit());
1133      break;
1134    }
1135    case kArmVcvtU32F64: {
1136      SwVfpRegister scratch = kScratchDoubleReg.low();
1137      __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
1138      __ vmov(i.OutputRegister(), scratch);
1139      DCHECK_EQ(LeaveCC, i.OutputSBit());
1140      break;
1141    }
1142    case kArmVmovU32F32:
1143      __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
1144      DCHECK_EQ(LeaveCC, i.OutputSBit());
1145      break;
1146    case kArmVmovF32U32:
1147      __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
1148      DCHECK_EQ(LeaveCC, i.OutputSBit());
1149      break;
1150    case kArmVmovLowU32F64:
1151      __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0));
1152      DCHECK_EQ(LeaveCC, i.OutputSBit());
1153      break;
1154    case kArmVmovLowF64U32:
1155      __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1));
1156      DCHECK_EQ(LeaveCC, i.OutputSBit());
1157      break;
1158    case kArmVmovHighU32F64:
1159      __ VmovHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1160      DCHECK_EQ(LeaveCC, i.OutputSBit());
1161      break;
1162    case kArmVmovHighF64U32:
1163      __ VmovHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1164      DCHECK_EQ(LeaveCC, i.OutputSBit());
1165      break;
1166    case kArmVmovF64U32U32:
1167      __ vmov(i.OutputDoubleRegister(), i.InputRegister(0), i.InputRegister(1));
1168      DCHECK_EQ(LeaveCC, i.OutputSBit());
1169      break;
1170    case kArmLdrb:
1171      __ ldrb(i.OutputRegister(), i.InputOffset());
1172      DCHECK_EQ(LeaveCC, i.OutputSBit());
1173      break;
1174    case kArmLdrsb:
1175      __ ldrsb(i.OutputRegister(), i.InputOffset());
1176      DCHECK_EQ(LeaveCC, i.OutputSBit());
1177      break;
1178    case kArmStrb:
1179      __ strb(i.InputRegister(0), i.InputOffset(1));
1180      DCHECK_EQ(LeaveCC, i.OutputSBit());
1181      break;
1182    case kArmLdrh:
1183      __ ldrh(i.OutputRegister(), i.InputOffset());
1184      break;
1185    case kArmLdrsh:
1186      __ ldrsh(i.OutputRegister(), i.InputOffset());
1187      break;
1188    case kArmStrh:
1189      __ strh(i.InputRegister(0), i.InputOffset(1));
1190      DCHECK_EQ(LeaveCC, i.OutputSBit());
1191      break;
1192    case kArmLdr:
1193      __ ldr(i.OutputRegister(), i.InputOffset());
1194      break;
1195    case kArmStr:
1196      __ str(i.InputRegister(0), i.InputOffset(1));
1197      DCHECK_EQ(LeaveCC, i.OutputSBit());
1198      break;
1199    case kArmVldrF32: {
1200      __ vldr(i.OutputFloatRegister(), i.InputOffset());
1201      DCHECK_EQ(LeaveCC, i.OutputSBit());
1202      break;
1203    }
1204    case kArmVstrF32:
1205      __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
1206      DCHECK_EQ(LeaveCC, i.OutputSBit());
1207      break;
1208    case kArmVldrF64:
1209      __ vldr(i.OutputDoubleRegister(), i.InputOffset());
1210      DCHECK_EQ(LeaveCC, i.OutputSBit());
1211      break;
1212    case kArmVstrF64:
1213      __ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
1214      DCHECK_EQ(LeaveCC, i.OutputSBit());
1215      break;
1216    case kArmFloat32Max: {
1217      CpuFeatureScope scope(masm(), ARMv8);
1218      // (b < a) ? a : b
1219      SwVfpRegister a = i.InputFloatRegister(0);
1220      SwVfpRegister b = i.InputFloatRegister(1);
1221      SwVfpRegister result = i.OutputFloatRegister();
1222      __ VFPCompareAndSetFlags(a, b);
1223      __ vsel(gt, result, a, b);
1224      break;
1225    }
1226    case kArmFloat32Min: {
1227      CpuFeatureScope scope(masm(), ARMv8);
1228      // (a < b) ? a : b
1229      SwVfpRegister a = i.InputFloatRegister(0);
1230      SwVfpRegister b = i.InputFloatRegister(1);
1231      SwVfpRegister result = i.OutputFloatRegister();
1232      __ VFPCompareAndSetFlags(b, a);
1233      __ vsel(gt, result, a, b);
1234      break;
1235    }
1236    case kArmFloat64Max: {
1237      CpuFeatureScope scope(masm(), ARMv8);
1238      // (b < a) ? a : b
1239      DwVfpRegister a = i.InputDoubleRegister(0);
1240      DwVfpRegister b = i.InputDoubleRegister(1);
1241      DwVfpRegister result = i.OutputDoubleRegister();
1242      __ VFPCompareAndSetFlags(a, b);
1243      __ vsel(gt, result, a, b);
1244      break;
1245    }
1246    case kArmFloat64Min: {
1247      CpuFeatureScope scope(masm(), ARMv8);
1248      // (a < b) ? a : b
1249      DwVfpRegister a = i.InputDoubleRegister(0);
1250      DwVfpRegister b = i.InputDoubleRegister(1);
1251      DwVfpRegister result = i.OutputDoubleRegister();
1252      __ VFPCompareAndSetFlags(b, a);
1253      __ vsel(gt, result, a, b);
1254      break;
1255    }
1256    case kArmFloat64SilenceNaN: {
1257      DwVfpRegister value = i.InputDoubleRegister(0);
1258      DwVfpRegister result = i.OutputDoubleRegister();
1259      __ VFPCanonicalizeNaN(result, value);
1260      break;
1261    }
1262    case kArmPush:
1263      if (instr->InputAt(0)->IsFPRegister()) {
1264        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1265        if (op->representation() == MachineRepresentation::kFloat64) {
1266          __ vpush(i.InputDoubleRegister(0));
1267          frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1268        } else {
1269          DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1270          __ vpush(i.InputFloatRegister(0));
1271          frame_access_state()->IncreaseSPDelta(1);
1272        }
1273      } else {
1274        __ push(i.InputRegister(0));
1275        frame_access_state()->IncreaseSPDelta(1);
1276      }
1277      DCHECK_EQ(LeaveCC, i.OutputSBit());
1278      break;
1279    case kArmPoke: {
1280      int const slot = MiscField::decode(instr->opcode());
1281      __ str(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
1282      DCHECK_EQ(LeaveCC, i.OutputSBit());
1283      break;
1284    }
1285    case kCheckedLoadInt8:
1286      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
1287      break;
1288    case kCheckedLoadUint8:
1289      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
1290      break;
1291    case kCheckedLoadInt16:
1292      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
1293      break;
1294    case kCheckedLoadUint16:
1295      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
1296      break;
1297    case kCheckedLoadWord32:
1298      ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
1299      break;
1300    case kCheckedLoadFloat32:
1301      ASSEMBLE_CHECKED_LOAD_FP(Float);
1302      break;
1303    case kCheckedLoadFloat64:
1304      ASSEMBLE_CHECKED_LOAD_FP(Double);
1305      break;
1306    case kCheckedStoreWord8:
1307      ASSEMBLE_CHECKED_STORE_INTEGER(strb);
1308      break;
1309    case kCheckedStoreWord16:
1310      ASSEMBLE_CHECKED_STORE_INTEGER(strh);
1311      break;
1312    case kCheckedStoreWord32:
1313      ASSEMBLE_CHECKED_STORE_INTEGER(str);
1314      break;
1315    case kCheckedStoreFloat32:
1316      ASSEMBLE_CHECKED_STORE_FP(Float);
1317      break;
1318    case kCheckedStoreFloat64:
1319      ASSEMBLE_CHECKED_STORE_FP(Double);
1320      break;
1321    case kCheckedLoadWord64:
1322    case kCheckedStoreWord64:
1323      UNREACHABLE();  // currently unsupported checked int64 load/store.
1324      break;
1325
1326    case kAtomicLoadInt8:
1327      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
1328      break;
1329    case kAtomicLoadUint8:
1330      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
1331      break;
1332    case kAtomicLoadInt16:
1333      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
1334      break;
1335    case kAtomicLoadUint16:
1336      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
1337      break;
1338    case kAtomicLoadWord32:
1339      ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
1340      break;
1341
1342    case kAtomicStoreWord8:
1343      ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
1344      break;
1345    case kAtomicStoreWord16:
1346      ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
1347      break;
1348    case kAtomicStoreWord32:
1349      ASSEMBLE_ATOMIC_STORE_INTEGER(str);
1350      break;
1351  }
1352  return kSuccess;
1353}  // NOLINT(readability/fn_size)
1354
1355
1356// Assembles branches after an instruction.
1357void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1358  ArmOperandConverter i(this, instr);
1359  Label* tlabel = branch->true_label;
1360  Label* flabel = branch->false_label;
1361  Condition cc = FlagsConditionToCondition(branch->condition);
1362  __ b(cc, tlabel);
1363  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
1364}
1365
1366
1367void CodeGenerator::AssembleArchJump(RpoNumber target) {
1368  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1369}
1370
1371
1372// Assembles boolean materializations after an instruction.
1373void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1374                                        FlagsCondition condition) {
1375  ArmOperandConverter i(this, instr);
1376
1377  // Materialize a full 32-bit 1 or 0 value. The result register is always the
1378  // last output of the instruction.
1379  DCHECK_NE(0u, instr->OutputCount());
1380  Register reg = i.OutputRegister(instr->OutputCount() - 1);
1381  Condition cc = FlagsConditionToCondition(condition);
1382  __ mov(reg, Operand(0));
1383  __ mov(reg, Operand(1), LeaveCC, cc);
1384}
1385
1386
1387void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1388  ArmOperandConverter i(this, instr);
1389  Register input = i.InputRegister(0);
1390  for (size_t index = 2; index < instr->InputCount(); index += 2) {
1391    __ cmp(input, Operand(i.InputInt32(index + 0)));
1392    __ b(eq, GetLabel(i.InputRpo(index + 1)));
1393  }
1394  AssembleArchJump(i.InputRpo(1));
1395}
1396
1397
1398void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1399  ArmOperandConverter i(this, instr);
1400  Register input = i.InputRegister(0);
1401  size_t const case_count = instr->InputCount() - 2;
1402  // Ensure to emit the constant pool first if necessary.
1403  __ CheckConstPool(true, true);
1404  __ cmp(input, Operand(case_count));
1405  __ BlockConstPoolFor(case_count + 2);
1406  __ add(pc, pc, Operand(input, LSL, 2), LeaveCC, lo);
1407  __ b(GetLabel(i.InputRpo(1)));
1408  for (size_t index = 0; index < case_count; ++index) {
1409    __ b(GetLabel(i.InputRpo(index + 2)));
1410  }
1411}
1412
1413CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
1414    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1415  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1416      isolate(), deoptimization_id, bailout_type);
1417  // TODO(turbofan): We should be able to generate better code by sharing the
1418  // actual final call site and just bl'ing to it here, similar to what we do
1419  // in the lithium backend.
1420  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
1421  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1422  __ CheckConstPool(false, false);
1423  return kSuccess;
1424}
1425
1426void CodeGenerator::FinishFrame(Frame* frame) {
1427  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1428
1429  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1430  if (saves_fp != 0) {
1431    frame->AlignSavedCalleeRegisterSlots();
1432  }
1433
1434  if (saves_fp != 0) {
1435    // Save callee-saved FP registers.
1436    STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
1437    uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
1438    uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
1439    DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
1440    frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
1441                                            (kDoubleSize / kPointerSize));
1442  }
1443  const RegList saves = FLAG_enable_embedded_constant_pool
1444                            ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
1445                            : descriptor->CalleeSavedRegisters();
1446  if (saves != 0) {
1447    // Save callee-saved registers.
1448    frame->AllocateSavedCalleeRegisterSlots(
1449        base::bits::CountPopulation32(saves));
1450  }
1451}
1452
1453void CodeGenerator::AssembleConstructFrame() {
1454  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1455  if (frame_access_state()->has_frame()) {
1456    if (descriptor->IsCFunctionCall()) {
1457      if (FLAG_enable_embedded_constant_pool) {
1458        __ Push(lr, fp, pp);
1459        // Adjust FP to point to saved FP.
1460        __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
1461      } else {
1462        __ Push(lr, fp);
1463        __ mov(fp, sp);
1464      }
1465    } else if (descriptor->IsJSFunctionCall()) {
1466      __ Prologue(this->info()->GeneratePreagedPrologue());
1467    } else {
1468      __ StubPrologue(info()->GetOutputStackFrameType());
1469    }
1470  }
1471
1472  int shrink_slots = frame()->GetSpillSlotCount();
1473
1474  if (info()->is_osr()) {
1475    // TurboFan OSR-compiled functions cannot be entered directly.
1476    __ Abort(kShouldNotDirectlyEnterOsrFunction);
1477
1478    // Unoptimized code jumps directly to this entrypoint while the unoptimized
1479    // frame is still on the stack. Optimized code uses OSR values directly from
1480    // the unoptimized frame. Thus, all that needs to be done is to allocate the
1481    // remaining stack slots.
1482    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1483    osr_pc_offset_ = __ pc_offset();
1484    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1485  }
1486
1487  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1488  if (shrink_slots > 0) {
1489    __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
1490  }
1491
1492  if (saves_fp != 0) {
1493    // Save callee-saved FP registers.
1494    STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
1495    uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
1496    uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
1497    DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
1498    __ vstm(db_w, sp, DwVfpRegister::from_code(first),
1499            DwVfpRegister::from_code(last));
1500  }
1501  const RegList saves = FLAG_enable_embedded_constant_pool
1502                            ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
1503                            : descriptor->CalleeSavedRegisters();
1504  if (saves != 0) {
1505    // Save callee-saved registers.
1506    __ stm(db_w, sp, saves);
1507  }
1508}
1509
1510
1511void CodeGenerator::AssembleReturn() {
1512  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1513  int pop_count = static_cast<int>(descriptor->StackParameterCount());
1514
1515  // Restore registers.
1516  const RegList saves = FLAG_enable_embedded_constant_pool
1517                            ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
1518                            : descriptor->CalleeSavedRegisters();
1519  if (saves != 0) {
1520    __ ldm(ia_w, sp, saves);
1521  }
1522
1523  // Restore FP registers.
1524  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1525  if (saves_fp != 0) {
1526    STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
1527    uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
1528    uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
1529    __ vldm(ia_w, sp, DwVfpRegister::from_code(first),
1530            DwVfpRegister::from_code(last));
1531  }
1532
1533  if (descriptor->IsCFunctionCall()) {
1534    AssembleDeconstructFrame();
1535  } else if (frame_access_state()->has_frame()) {
1536    // Canonicalize JSFunction return sites for now.
1537    if (return_label_.is_bound()) {
1538      __ b(&return_label_);
1539      return;
1540    } else {
1541      __ bind(&return_label_);
1542      AssembleDeconstructFrame();
1543    }
1544  }
1545  __ Ret(pop_count);
1546}
1547
1548
1549void CodeGenerator::AssembleMove(InstructionOperand* source,
1550                                 InstructionOperand* destination) {
1551  ArmOperandConverter g(this, nullptr);
1552  // Dispatch on the source and destination operand kinds.  Not all
1553  // combinations are possible.
1554  if (source->IsRegister()) {
1555    DCHECK(destination->IsRegister() || destination->IsStackSlot());
1556    Register src = g.ToRegister(source);
1557    if (destination->IsRegister()) {
1558      __ mov(g.ToRegister(destination), src);
1559    } else {
1560      __ str(src, g.ToMemOperand(destination));
1561    }
1562  } else if (source->IsStackSlot()) {
1563    DCHECK(destination->IsRegister() || destination->IsStackSlot());
1564    MemOperand src = g.ToMemOperand(source);
1565    if (destination->IsRegister()) {
1566      __ ldr(g.ToRegister(destination), src);
1567    } else {
1568      Register temp = kScratchReg;
1569      __ ldr(temp, src);
1570      __ str(temp, g.ToMemOperand(destination));
1571    }
1572  } else if (source->IsConstant()) {
1573    Constant src = g.ToConstant(source);
1574    if (destination->IsRegister() || destination->IsStackSlot()) {
1575      Register dst =
1576          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1577      switch (src.type()) {
1578        case Constant::kInt32:
1579          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
1580              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
1581              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
1582            __ mov(dst, Operand(src.ToInt32(), src.rmode()));
1583          } else {
1584            __ mov(dst, Operand(src.ToInt32()));
1585          }
1586          break;
1587        case Constant::kInt64:
1588          UNREACHABLE();
1589          break;
1590        case Constant::kFloat32:
1591          __ Move(dst,
1592                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1593          break;
1594        case Constant::kFloat64:
1595          __ Move(dst,
1596                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1597          break;
1598        case Constant::kExternalReference:
1599          __ mov(dst, Operand(src.ToExternalReference()));
1600          break;
1601        case Constant::kHeapObject: {
1602          Handle<HeapObject> src_object = src.ToHeapObject();
1603          Heap::RootListIndex index;
1604          int slot;
1605          if (IsMaterializableFromFrame(src_object, &slot)) {
1606            __ ldr(dst, g.SlotToMemOperand(slot));
1607          } else if (IsMaterializableFromRoot(src_object, &index)) {
1608            __ LoadRoot(dst, index);
1609          } else {
1610            __ Move(dst, src_object);
1611          }
1612          break;
1613        }
1614        case Constant::kRpoNumber:
1615          UNREACHABLE();  // TODO(dcarney): loading RPO constants on arm.
1616          break;
1617      }
1618      if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
1619    } else if (src.type() == Constant::kFloat32) {
1620      if (destination->IsFPStackSlot()) {
1621        MemOperand dst = g.ToMemOperand(destination);
1622        __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
1623        __ str(ip, dst);
1624      } else {
1625        SwVfpRegister dst = g.ToFloatRegister(destination);
1626        __ vmov(dst, src.ToFloat32());
1627      }
1628    } else {
1629      DCHECK_EQ(Constant::kFloat64, src.type());
1630      DwVfpRegister dst = destination->IsFPRegister()
1631                              ? g.ToDoubleRegister(destination)
1632                              : kScratchDoubleReg;
1633      __ vmov(dst, src.ToFloat64(), kScratchReg);
1634      if (destination->IsFPStackSlot()) {
1635        __ vstr(dst, g.ToMemOperand(destination));
1636      }
1637    }
1638  } else if (source->IsFPRegister()) {
1639    MachineRepresentation rep = LocationOperand::cast(source)->representation();
1640    if (rep == MachineRepresentation::kFloat64) {
1641      DwVfpRegister src = g.ToDoubleRegister(source);
1642      if (destination->IsFPRegister()) {
1643        DwVfpRegister dst = g.ToDoubleRegister(destination);
1644        __ Move(dst, src);
1645      } else {
1646        DCHECK(destination->IsFPStackSlot());
1647        __ vstr(src, g.ToMemOperand(destination));
1648      }
1649    } else {
1650      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1651      SwVfpRegister src = g.ToFloatRegister(source);
1652      if (destination->IsFPRegister()) {
1653        SwVfpRegister dst = g.ToFloatRegister(destination);
1654        __ Move(dst, src);
1655      } else {
1656        DCHECK(destination->IsFPStackSlot());
1657        __ vstr(src, g.ToMemOperand(destination));
1658      }
1659    }
1660  } else if (source->IsFPStackSlot()) {
1661    MemOperand src = g.ToMemOperand(source);
1662    MachineRepresentation rep =
1663        LocationOperand::cast(destination)->representation();
1664    if (destination->IsFPRegister()) {
1665      if (rep == MachineRepresentation::kFloat64) {
1666        __ vldr(g.ToDoubleRegister(destination), src);
1667      } else {
1668        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1669        __ vldr(g.ToFloatRegister(destination), src);
1670      }
1671    } else {
1672      DCHECK(destination->IsFPStackSlot());
1673      if (rep == MachineRepresentation::kFloat64) {
1674        DwVfpRegister temp = kScratchDoubleReg;
1675        __ vldr(temp, src);
1676        __ vstr(temp, g.ToMemOperand(destination));
1677      } else {
1678        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1679        SwVfpRegister temp = kScratchDoubleReg.low();
1680        __ vldr(temp, src);
1681        __ vstr(temp, g.ToMemOperand(destination));
1682      }
1683    }
1684  } else {
1685    UNREACHABLE();
1686  }
1687}
1688
1689
1690void CodeGenerator::AssembleSwap(InstructionOperand* source,
1691                                 InstructionOperand* destination) {
1692  ArmOperandConverter g(this, nullptr);
1693  // Dispatch on the source and destination operand kinds.  Not all
1694  // combinations are possible.
1695  if (source->IsRegister()) {
1696    // Register-register.
1697    Register temp = kScratchReg;
1698    Register src = g.ToRegister(source);
1699    if (destination->IsRegister()) {
1700      Register dst = g.ToRegister(destination);
1701      __ Move(temp, src);
1702      __ Move(src, dst);
1703      __ Move(dst, temp);
1704    } else {
1705      DCHECK(destination->IsStackSlot());
1706      MemOperand dst = g.ToMemOperand(destination);
1707      __ mov(temp, src);
1708      __ ldr(src, dst);
1709      __ str(temp, dst);
1710    }
1711  } else if (source->IsStackSlot()) {
1712    DCHECK(destination->IsStackSlot());
1713    Register temp_0 = kScratchReg;
1714    SwVfpRegister temp_1 = kScratchDoubleReg.low();
1715    MemOperand src = g.ToMemOperand(source);
1716    MemOperand dst = g.ToMemOperand(destination);
1717    __ ldr(temp_0, src);
1718    __ vldr(temp_1, dst);
1719    __ str(temp_0, dst);
1720    __ vstr(temp_1, src);
1721  } else if (source->IsFPRegister()) {
1722    MachineRepresentation rep = LocationOperand::cast(source)->representation();
1723    LowDwVfpRegister temp = kScratchDoubleReg;
1724    if (rep == MachineRepresentation::kFloat64) {
1725      DwVfpRegister src = g.ToDoubleRegister(source);
1726      if (destination->IsFPRegister()) {
1727        DwVfpRegister dst = g.ToDoubleRegister(destination);
1728        __ Move(temp, src);
1729        __ Move(src, dst);
1730        __ Move(dst, temp);
1731      } else {
1732        DCHECK(destination->IsFPStackSlot());
1733        MemOperand dst = g.ToMemOperand(destination);
1734        __ Move(temp, src);
1735        __ vldr(src, dst);
1736        __ vstr(temp, dst);
1737      }
1738    } else {
1739      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1740      SwVfpRegister src = g.ToFloatRegister(source);
1741      if (destination->IsFPRegister()) {
1742        SwVfpRegister dst = g.ToFloatRegister(destination);
1743        __ Move(temp.low(), src);
1744        __ Move(src, dst);
1745        __ Move(dst, temp.low());
1746      } else {
1747        DCHECK(destination->IsFPStackSlot());
1748        MemOperand dst = g.ToMemOperand(destination);
1749        __ Move(temp.low(), src);
1750        __ vldr(src, dst);
1751        __ vstr(temp.low(), dst);
1752      }
1753    }
1754  } else if (source->IsFPStackSlot()) {
1755    DCHECK(destination->IsFPStackSlot());
1756    Register temp_0 = kScratchReg;
1757    LowDwVfpRegister temp_1 = kScratchDoubleReg;
1758    MemOperand src0 = g.ToMemOperand(source);
1759    MemOperand dst0 = g.ToMemOperand(destination);
1760    MachineRepresentation rep = LocationOperand::cast(source)->representation();
1761    if (rep == MachineRepresentation::kFloat64) {
1762      MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1763      MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
1764      __ vldr(temp_1, dst0);  // Save destination in temp_1.
1765      __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
1766      __ str(temp_0, dst0);
1767      __ ldr(temp_0, src1);
1768      __ str(temp_0, dst1);
1769      __ vstr(temp_1, src0);
1770    } else {
1771      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1772      __ vldr(temp_1.low(), dst0);  // Save destination in temp_1.
1773      __ ldr(temp_0, src0);  // Then use temp_0 to copy source to destination.
1774      __ str(temp_0, dst0);
1775      __ vstr(temp_1.low(), src0);
1776    }
1777  } else {
1778    // No other combinations are possible.
1779    UNREACHABLE();
1780  }
1781}
1782
1783
1784void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1785  // On 32-bit ARM we emit the jump tables inline.
1786  UNREACHABLE();
1787}
1788
1789
1790void CodeGenerator::EnsureSpaceForLazyDeopt() {
1791  if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1792    return;
1793  }
1794
1795  int space_needed = Deoptimizer::patch_size();
1796  // Ensure that we have enough space after the previous lazy-bailout
1797  // instruction for patching the code here.
1798  int current_pc = masm()->pc_offset();
1799  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1800    // Block literal pool emission for duration of padding.
1801    v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
1802    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1803    DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1804    while (padding_size > 0) {
1805      __ nop();
1806      padding_size -= v8::internal::Assembler::kInstrSize;
1807    }
1808  }
1809}
1810
1811#undef __
1812
1813}  // namespace compiler
1814}  // namespace internal
1815}  // namespace v8
1816