1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/adapters.h"
6#include "src/base/bits.h"
7#include "src/compiler/instruction-selector-impl.h"
8#include "src/compiler/node-matchers.h"
9#include "src/compiler/node-properties.h"
10
11namespace v8 {
12namespace internal {
13namespace compiler {
14
15// Adds Arm-specific methods for generating InstructionOperands.
16class ArmOperandGenerator : public OperandGenerator {
17 public:
18  explicit ArmOperandGenerator(InstructionSelector* selector)
19      : OperandGenerator(selector) {}
20
21  bool CanBeImmediate(int32_t value) const {
22    return Assembler::ImmediateFitsAddrMode1Instruction(value);
23  }
24
25  bool CanBeImmediate(uint32_t value) const {
26    return CanBeImmediate(bit_cast<int32_t>(value));
27  }
28
29  bool CanBeImmediate(Node* node, InstructionCode opcode) {
30    Int32Matcher m(node);
31    if (!m.HasValue()) return false;
32    int32_t value = m.Value();
33    switch (ArchOpcodeField::decode(opcode)) {
34      case kArmAnd:
35      case kArmMov:
36      case kArmMvn:
37      case kArmBic:
38        return CanBeImmediate(value) || CanBeImmediate(~value);
39
40      case kArmAdd:
41      case kArmSub:
42      case kArmCmp:
43      case kArmCmn:
44        return CanBeImmediate(value) || CanBeImmediate(-value);
45
46      case kArmTst:
47      case kArmTeq:
48      case kArmOrr:
49      case kArmEor:
50      case kArmRsb:
51        return CanBeImmediate(value);
52
53      case kArmVldrF32:
54      case kArmVstrF32:
55      case kArmVldrF64:
56      case kArmVstrF64:
57        return value >= -1020 && value <= 1020 && (value % 4) == 0;
58
59      case kArmLdrb:
60      case kArmLdrsb:
61      case kArmStrb:
62      case kArmLdr:
63      case kArmStr:
64        return value >= -4095 && value <= 4095;
65
66      case kArmLdrh:
67      case kArmLdrsh:
68      case kArmStrh:
69        return value >= -255 && value <= 255;
70
71      default:
72        break;
73    }
74    return false;
75  }
76};
77
78
79namespace {
80
81void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
82  ArmOperandGenerator g(selector);
83  selector->Emit(opcode, g.DefineAsRegister(node),
84                 g.UseRegister(node->InputAt(0)));
85}
86
87void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
88  ArmOperandGenerator g(selector);
89  selector->Emit(opcode, g.DefineAsRegister(node),
90                 g.UseRegister(node->InputAt(0)),
91                 g.UseRegister(node->InputAt(1)));
92}
93
94void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
95  ArmOperandGenerator g(selector);
96  // Use DefineSameAsFirst for ternary ops that clobber their first input,
97  // e.g. the NEON vbsl instruction.
98  selector->Emit(
99      opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
100      g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
101}
102
103void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
104  ArmOperandGenerator g(selector);
105  int32_t imm = OpParameter<int32_t>(node);
106  selector->Emit(opcode, g.DefineAsRegister(node),
107                 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
108}
109
110void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
111  ArmOperandGenerator g(selector);
112  int32_t imm = OpParameter<int32_t>(node);
113  selector->Emit(opcode, g.DefineAsRegister(node),
114                 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
115                 g.UseRegister(node->InputAt(1)));
116}
117
118template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
119          AddressingMode kImmMode, AddressingMode kRegMode>
120bool TryMatchShift(InstructionSelector* selector,
121                   InstructionCode* opcode_return, Node* node,
122                   InstructionOperand* value_return,
123                   InstructionOperand* shift_return) {
124  ArmOperandGenerator g(selector);
125  if (node->opcode() == kOpcode) {
126    Int32BinopMatcher m(node);
127    *value_return = g.UseRegister(m.left().node());
128    if (m.right().IsInRange(kImmMin, kImmMax)) {
129      *opcode_return |= AddressingModeField::encode(kImmMode);
130      *shift_return = g.UseImmediate(m.right().node());
131    } else {
132      *opcode_return |= AddressingModeField::encode(kRegMode);
133      *shift_return = g.UseRegister(m.right().node());
134    }
135    return true;
136  }
137  return false;
138}
139
140template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
141          AddressingMode kImmMode>
142bool TryMatchShiftImmediate(InstructionSelector* selector,
143                            InstructionCode* opcode_return, Node* node,
144                            InstructionOperand* value_return,
145                            InstructionOperand* shift_return) {
146  ArmOperandGenerator g(selector);
147  if (node->opcode() == kOpcode) {
148    Int32BinopMatcher m(node);
149    if (m.right().IsInRange(kImmMin, kImmMax)) {
150      *opcode_return |= AddressingModeField::encode(kImmMode);
151      *value_return = g.UseRegister(m.left().node());
152      *shift_return = g.UseImmediate(m.right().node());
153      return true;
154    }
155  }
156  return false;
157}
158
159bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
160                 Node* node, InstructionOperand* value_return,
161                 InstructionOperand* shift_return) {
162  return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
163                       kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
164                                               value_return, shift_return);
165}
166
167
168bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
169                 Node* node, InstructionOperand* value_return,
170                 InstructionOperand* shift_return) {
171  return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
172                       kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
173                                               value_return, shift_return);
174}
175
176
177bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
178                 Node* node, InstructionOperand* value_return,
179                 InstructionOperand* shift_return) {
180  return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
181                       kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
182                                               value_return, shift_return);
183}
184
185bool TryMatchLSLImmediate(InstructionSelector* selector,
186                          InstructionCode* opcode_return, Node* node,
187                          InstructionOperand* value_return,
188                          InstructionOperand* shift_return) {
189  return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
190                                kMode_Operand2_R_LSL_I>(
191      selector, opcode_return, node, value_return, shift_return);
192}
193
194bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
195                 Node* node, InstructionOperand* value_return,
196                 InstructionOperand* shift_return) {
197  return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
198                       kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
199                                               value_return, shift_return);
200}
201
202
203bool TryMatchShift(InstructionSelector* selector,
204                   InstructionCode* opcode_return, Node* node,
205                   InstructionOperand* value_return,
206                   InstructionOperand* shift_return) {
207  return (
208      TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
209      TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
210      TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
211      TryMatchROR(selector, opcode_return, node, value_return, shift_return));
212}
213
214
215bool TryMatchImmediateOrShift(InstructionSelector* selector,
216                              InstructionCode* opcode_return, Node* node,
217                              size_t* input_count_return,
218                              InstructionOperand* inputs) {
219  ArmOperandGenerator g(selector);
220  if (g.CanBeImmediate(node, *opcode_return)) {
221    *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
222    inputs[0] = g.UseImmediate(node);
223    *input_count_return = 1;
224    return true;
225  }
226  if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
227    *input_count_return = 2;
228    return true;
229  }
230  return false;
231}
232
233
234void VisitBinop(InstructionSelector* selector, Node* node,
235                InstructionCode opcode, InstructionCode reverse_opcode,
236                FlagsContinuation* cont) {
237  ArmOperandGenerator g(selector);
238  Int32BinopMatcher m(node);
239  InstructionOperand inputs[5];
240  size_t input_count = 0;
241  InstructionOperand outputs[2];
242  size_t output_count = 0;
243
244  if (m.left().node() == m.right().node()) {
245    // If both inputs refer to the same operand, enforce allocating a register
246    // for both of them to ensure that we don't end up generating code like
247    // this:
248    //
249    //   mov r0, r1, asr #16
250    //   adds r0, r0, r1, asr #16
251    //   bvs label
252    InstructionOperand const input = g.UseRegister(m.left().node());
253    opcode |= AddressingModeField::encode(kMode_Operand2_R);
254    inputs[input_count++] = input;
255    inputs[input_count++] = input;
256  } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
257                                      &input_count, &inputs[1])) {
258    inputs[0] = g.UseRegister(m.left().node());
259    input_count++;
260  } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
261                                      m.left().node(), &input_count,
262                                      &inputs[1])) {
263    inputs[0] = g.UseRegister(m.right().node());
264    opcode = reverse_opcode;
265    input_count++;
266  } else {
267    opcode |= AddressingModeField::encode(kMode_Operand2_R);
268    inputs[input_count++] = g.UseRegister(m.left().node());
269    inputs[input_count++] = g.UseRegister(m.right().node());
270  }
271
272  if (cont->IsBranch()) {
273    inputs[input_count++] = g.Label(cont->true_block());
274    inputs[input_count++] = g.Label(cont->false_block());
275  }
276
277  outputs[output_count++] = g.DefineAsRegister(node);
278  if (cont->IsSet()) {
279    outputs[output_count++] = g.DefineAsRegister(cont->result());
280  }
281
282  DCHECK_NE(0u, input_count);
283  DCHECK_NE(0u, output_count);
284  DCHECK_GE(arraysize(inputs), input_count);
285  DCHECK_GE(arraysize(outputs), output_count);
286  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
287
288  opcode = cont->Encode(opcode);
289  if (cont->IsDeoptimize()) {
290    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
291                             cont->kind(), cont->reason(), cont->frame_state());
292  } else if (cont->IsTrap()) {
293    inputs[input_count++] = g.UseImmediate(cont->trap_id());
294    selector->Emit(opcode, output_count, outputs, input_count, inputs);
295  } else {
296    selector->Emit(opcode, output_count, outputs, input_count, inputs);
297  }
298}
299
300
301void VisitBinop(InstructionSelector* selector, Node* node,
302                InstructionCode opcode, InstructionCode reverse_opcode) {
303  FlagsContinuation cont;
304  VisitBinop(selector, node, opcode, reverse_opcode, &cont);
305}
306
307
308void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
309             ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
310             InstructionOperand result_operand, InstructionOperand left_operand,
311             InstructionOperand right_operand) {
312  ArmOperandGenerator g(selector);
313  if (selector->IsSupported(SUDIV)) {
314    selector->Emit(div_opcode, result_operand, left_operand, right_operand);
315    return;
316  }
317  InstructionOperand left_double_operand = g.TempDoubleRegister();
318  InstructionOperand right_double_operand = g.TempDoubleRegister();
319  InstructionOperand result_double_operand = g.TempDoubleRegister();
320  selector->Emit(f64i32_opcode, left_double_operand, left_operand);
321  selector->Emit(f64i32_opcode, right_double_operand, right_operand);
322  selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
323                 right_double_operand);
324  selector->Emit(i32f64_opcode, result_operand, result_double_operand);
325}
326
327
328void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
329              ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
330  ArmOperandGenerator g(selector);
331  Int32BinopMatcher m(node);
332  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
333          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
334          g.UseRegister(m.right().node()));
335}
336
337
338void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
339              ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) {
340  ArmOperandGenerator g(selector);
341  Int32BinopMatcher m(node);
342  InstructionOperand div_operand = g.TempRegister();
343  InstructionOperand result_operand = g.DefineAsRegister(node);
344  InstructionOperand left_operand = g.UseRegister(m.left().node());
345  InstructionOperand right_operand = g.UseRegister(m.right().node());
346  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
347          left_operand, right_operand);
348  if (selector->IsSupported(ARMv7)) {
349    selector->Emit(kArmMls, result_operand, div_operand, right_operand,
350                   left_operand);
351  } else {
352    InstructionOperand mul_operand = g.TempRegister();
353    selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
354    selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
355                   result_operand, left_operand, mul_operand);
356  }
357}
358
359void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
360              InstructionOperand* output, Node* base, Node* index) {
361  ArmOperandGenerator g(selector);
362  InstructionOperand inputs[3];
363  size_t input_count = 2;
364
365  inputs[0] = g.UseRegister(base);
366  if (g.CanBeImmediate(index, opcode)) {
367    inputs[1] = g.UseImmediate(index);
368    opcode |= AddressingModeField::encode(kMode_Offset_RI);
369  } else if ((opcode == kArmLdr) &&
370             TryMatchLSLImmediate(selector, &opcode, index, &inputs[1],
371                                  &inputs[2])) {
372    input_count = 3;
373  } else {
374    inputs[1] = g.UseRegister(index);
375    opcode |= AddressingModeField::encode(kMode_Offset_RR);
376  }
377  selector->Emit(opcode, 1, output, input_count, inputs);
378}
379
380void EmitStore(InstructionSelector* selector, InstructionCode opcode,
381               size_t input_count, InstructionOperand* inputs,
382               Node* index) {
383  ArmOperandGenerator g(selector);
384
385  if (g.CanBeImmediate(index, opcode)) {
386    inputs[input_count++] = g.UseImmediate(index);
387    opcode |= AddressingModeField::encode(kMode_Offset_RI);
388  } else if ((opcode == kArmStr) &&
389             TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
390                                  &inputs[3])) {
391    input_count = 4;
392  } else {
393    inputs[input_count++] = g.UseRegister(index);
394    opcode |= AddressingModeField::encode(kMode_Offset_RR);
395  }
396  selector->Emit(opcode, 0, nullptr, input_count, inputs);
397}
398
399}  // namespace
400
401
402void InstructionSelector::VisitLoad(Node* node) {
403  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
404  ArmOperandGenerator g(this);
405  Node* base = node->InputAt(0);
406  Node* index = node->InputAt(1);
407
408  InstructionCode opcode = kArchNop;
409  switch (load_rep.representation()) {
410    case MachineRepresentation::kFloat32:
411      opcode = kArmVldrF32;
412      break;
413    case MachineRepresentation::kFloat64:
414      opcode = kArmVldrF64;
415      break;
416    case MachineRepresentation::kBit:  // Fall through.
417    case MachineRepresentation::kWord8:
418      opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
419      break;
420    case MachineRepresentation::kWord16:
421      opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
422      break;
423    case MachineRepresentation::kTaggedSigned:   // Fall through.
424    case MachineRepresentation::kTaggedPointer:  // Fall through.
425    case MachineRepresentation::kTagged:  // Fall through.
426    case MachineRepresentation::kWord32:
427      opcode = kArmLdr;
428      break;
429    case MachineRepresentation::kWord64:   // Fall through.
430    case MachineRepresentation::kSimd128:  // Fall through.
431    case MachineRepresentation::kSimd1x4:  // Fall through.
432    case MachineRepresentation::kSimd1x8:  // Fall through.
433    case MachineRepresentation::kSimd1x16:  // Fall through.
434    case MachineRepresentation::kNone:
435      UNREACHABLE();
436      return;
437  }
438
439  InstructionOperand output = g.DefineAsRegister(node);
440  EmitLoad(this, opcode, &output, base, index);
441}
442
443void InstructionSelector::VisitProtectedLoad(Node* node) {
444  // TODO(eholk)
445  UNIMPLEMENTED();
446}
447
448void InstructionSelector::VisitStore(Node* node) {
449  ArmOperandGenerator g(this);
450  Node* base = node->InputAt(0);
451  Node* index = node->InputAt(1);
452  Node* value = node->InputAt(2);
453
454  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
455  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
456  MachineRepresentation rep = store_rep.representation();
457
458  if (write_barrier_kind != kNoWriteBarrier) {
459    DCHECK(CanBeTaggedPointer(rep));
460    AddressingMode addressing_mode;
461    InstructionOperand inputs[3];
462    size_t input_count = 0;
463    inputs[input_count++] = g.UseUniqueRegister(base);
464    // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
465    // for the store itself, so we must check compatibility with both.
466    if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
467      inputs[input_count++] = g.UseImmediate(index);
468      addressing_mode = kMode_Offset_RI;
469    } else {
470      inputs[input_count++] = g.UseUniqueRegister(index);
471      addressing_mode = kMode_Offset_RR;
472    }
473    inputs[input_count++] = g.UseUniqueRegister(value);
474    RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
475    switch (write_barrier_kind) {
476      case kNoWriteBarrier:
477        UNREACHABLE();
478        break;
479      case kMapWriteBarrier:
480        record_write_mode = RecordWriteMode::kValueIsMap;
481        break;
482      case kPointerWriteBarrier:
483        record_write_mode = RecordWriteMode::kValueIsPointer;
484        break;
485      case kFullWriteBarrier:
486        record_write_mode = RecordWriteMode::kValueIsAny;
487        break;
488    }
489    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
490    size_t const temp_count = arraysize(temps);
491    InstructionCode code = kArchStoreWithWriteBarrier;
492    code |= AddressingModeField::encode(addressing_mode);
493    code |= MiscField::encode(static_cast<int>(record_write_mode));
494    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
495  } else {
496    InstructionCode opcode = kArchNop;
497    switch (rep) {
498      case MachineRepresentation::kFloat32:
499        opcode = kArmVstrF32;
500        break;
501      case MachineRepresentation::kFloat64:
502        opcode = kArmVstrF64;
503        break;
504      case MachineRepresentation::kBit:  // Fall through.
505      case MachineRepresentation::kWord8:
506        opcode = kArmStrb;
507        break;
508      case MachineRepresentation::kWord16:
509        opcode = kArmStrh;
510        break;
511      case MachineRepresentation::kTaggedSigned:   // Fall through.
512      case MachineRepresentation::kTaggedPointer:  // Fall through.
513      case MachineRepresentation::kTagged:  // Fall through.
514      case MachineRepresentation::kWord32:
515        opcode = kArmStr;
516        break;
517      case MachineRepresentation::kWord64:   // Fall through.
518      case MachineRepresentation::kSimd128:  // Fall through.
519      case MachineRepresentation::kSimd1x4:  // Fall through.
520      case MachineRepresentation::kSimd1x8:  // Fall through.
521      case MachineRepresentation::kSimd1x16:  // Fall through.
522      case MachineRepresentation::kNone:
523        UNREACHABLE();
524        return;
525    }
526
527    InstructionOperand inputs[4];
528    size_t input_count = 0;
529    inputs[input_count++] = g.UseRegister(value);
530    inputs[input_count++] = g.UseRegister(base);
531    EmitStore(this, opcode, input_count, inputs, index);
532  }
533}
534
535void InstructionSelector::VisitProtectedStore(Node* node) {
536  // TODO(eholk)
537  UNIMPLEMENTED();
538}
539
540void InstructionSelector::VisitUnalignedLoad(Node* node) {
541  UnalignedLoadRepresentation load_rep =
542      UnalignedLoadRepresentationOf(node->op());
543  ArmOperandGenerator g(this);
544  Node* base = node->InputAt(0);
545  Node* index = node->InputAt(1);
546
547  InstructionCode opcode = kArmLdr;
548  // Only floating point loads need to be specially handled; integer loads
549  // support unaligned access. We support unaligned FP loads by loading to
550  // integer registers first, then moving to the destination FP register.
551  switch (load_rep.representation()) {
552    case MachineRepresentation::kFloat32: {
553      InstructionOperand temp = g.TempRegister();
554      EmitLoad(this, opcode, &temp, base, index);
555      Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
556      return;
557    }
558    case MachineRepresentation::kFloat64: {
559      // TODO(arm): use vld1.8 for this when NEON is available.
560      // Compute the address of the least-significant half of the FP value.
561      // We assume that the base node is unlikely to be an encodable immediate
562      // or the result of a shift operation, so only consider the addressing
563      // mode that should be used for the index node.
564      InstructionCode add_opcode = kArmAdd;
565      InstructionOperand inputs[3];
566      inputs[0] = g.UseRegister(base);
567
568      size_t input_count;
569      if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
570                                   &inputs[1])) {
571        // input_count has been set by TryMatchImmediateOrShift(), so increment
572        // it to account for the base register in inputs[0].
573        input_count++;
574      } else {
575        add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
576        inputs[1] = g.UseRegister(index);
577        input_count = 2;  // Base register and index.
578      }
579
580      InstructionOperand addr = g.TempRegister();
581      Emit(add_opcode, 1, &addr, input_count, inputs);
582
583      // Load both halves and move to an FP register.
584      InstructionOperand fp_lo = g.TempRegister();
585      InstructionOperand fp_hi = g.TempRegister();
586      opcode |= AddressingModeField::encode(kMode_Offset_RI);
587      Emit(opcode, fp_lo, addr, g.TempImmediate(0));
588      Emit(opcode, fp_hi, addr, g.TempImmediate(4));
589      Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
590      return;
591    }
592    default:
593      // All other cases should support unaligned accesses.
594      UNREACHABLE();
595      return;
596  }
597}
598
599void InstructionSelector::VisitUnalignedStore(Node* node) {
600  ArmOperandGenerator g(this);
601  Node* base = node->InputAt(0);
602  Node* index = node->InputAt(1);
603  Node* value = node->InputAt(2);
604
605  InstructionOperand inputs[4];
606  size_t input_count = 0;
607
608  UnalignedStoreRepresentation store_rep =
609      UnalignedStoreRepresentationOf(node->op());
610
611  // Only floating point stores need to be specially handled; integer stores
612  // support unaligned access. We support unaligned FP stores by moving the
613  // value to integer registers first, then storing to the destination address.
614  switch (store_rep) {
615    case MachineRepresentation::kFloat32: {
616      inputs[input_count++] = g.TempRegister();
617      Emit(kArmVmovU32F32, inputs[0], g.UseRegister(value));
618      inputs[input_count++] = g.UseRegister(base);
619      EmitStore(this, kArmStr, input_count, inputs, index);
620      return;
621    }
622    case MachineRepresentation::kFloat64: {
623      // TODO(arm): use vst1.8 for this when NEON is available.
624      // Store a 64-bit floating point value using two 32-bit integer stores.
625      // Computing the store address here would require three live temporary
626      // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
627      // storing the least-significant half of the value.
628
629      // First, move the 64-bit FP value into two temporary integer registers.
630      InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
631      inputs[input_count++] = g.UseRegister(value);
632      Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count,
633           inputs);
634
635      // Store the least-significant half.
636      inputs[0] = fp[0];  // Low 32-bits of FP value.
637      inputs[input_count++] = g.UseRegister(base);  // First store base address.
638      EmitStore(this, kArmStr, input_count, inputs, index);
639
640      // Store the most-significant half.
641      InstructionOperand base4 = g.TempRegister();
642      Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
643           g.UseRegister(base), g.TempImmediate(4));  // Compute base + 4.
644      inputs[0] = fp[1];  // High 32-bits of FP value.
645      inputs[1] = base4;  // Second store base + 4 address.
646      EmitStore(this, kArmStr, input_count, inputs, index);
647      return;
648    }
649    default:
650      // All other cases should support unaligned accesses.
651      UNREACHABLE();
652      return;
653  }
654}
655
656void InstructionSelector::VisitCheckedLoad(Node* node) {
657  CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
658  ArmOperandGenerator g(this);
659  Node* const buffer = node->InputAt(0);
660  Node* const offset = node->InputAt(1);
661  Node* const length = node->InputAt(2);
662  ArchOpcode opcode = kArchNop;
663  switch (load_rep.representation()) {
664    case MachineRepresentation::kWord8:
665      opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
666      break;
667    case MachineRepresentation::kWord16:
668      opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
669      break;
670    case MachineRepresentation::kWord32:
671      opcode = kCheckedLoadWord32;
672      break;
673    case MachineRepresentation::kFloat32:
674      opcode = kCheckedLoadFloat32;
675      break;
676    case MachineRepresentation::kFloat64:
677      opcode = kCheckedLoadFloat64;
678      break;
679    case MachineRepresentation::kBit:      // Fall through.
680    case MachineRepresentation::kTaggedSigned:   // Fall through.
681    case MachineRepresentation::kTaggedPointer:  // Fall through.
682    case MachineRepresentation::kTagged:   // Fall through.
683    case MachineRepresentation::kWord64:   // Fall through.
684    case MachineRepresentation::kSimd128:  // Fall through.
685    case MachineRepresentation::kSimd1x4:  // Fall through.
686    case MachineRepresentation::kSimd1x8:  // Fall through.
687    case MachineRepresentation::kSimd1x16:  // Fall through.
688    case MachineRepresentation::kNone:
689      UNREACHABLE();
690      return;
691  }
692  InstructionOperand offset_operand = g.UseRegister(offset);
693  InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
694                                          ? g.UseImmediate(length)
695                                          : g.UseRegister(length);
696  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
697       g.DefineAsRegister(node), offset_operand, length_operand,
698       g.UseRegister(buffer), offset_operand);
699}
700
701
702void InstructionSelector::VisitCheckedStore(Node* node) {
703  MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
704  ArmOperandGenerator g(this);
705  Node* const buffer = node->InputAt(0);
706  Node* const offset = node->InputAt(1);
707  Node* const length = node->InputAt(2);
708  Node* const value = node->InputAt(3);
709  ArchOpcode opcode = kArchNop;
710  switch (rep) {
711    case MachineRepresentation::kWord8:
712      opcode = kCheckedStoreWord8;
713      break;
714    case MachineRepresentation::kWord16:
715      opcode = kCheckedStoreWord16;
716      break;
717    case MachineRepresentation::kWord32:
718      opcode = kCheckedStoreWord32;
719      break;
720    case MachineRepresentation::kFloat32:
721      opcode = kCheckedStoreFloat32;
722      break;
723    case MachineRepresentation::kFloat64:
724      opcode = kCheckedStoreFloat64;
725      break;
726    case MachineRepresentation::kBit:      // Fall through.
727    case MachineRepresentation::kTaggedSigned:   // Fall through.
728    case MachineRepresentation::kTaggedPointer:  // Fall through.
729    case MachineRepresentation::kTagged:   // Fall through.
730    case MachineRepresentation::kWord64:   // Fall through.
731    case MachineRepresentation::kSimd128:  // Fall through.
732    case MachineRepresentation::kSimd1x4:  // Fall through.
733    case MachineRepresentation::kSimd1x8:  // Fall through.
734    case MachineRepresentation::kSimd1x16:  // Fall through.
735    case MachineRepresentation::kNone:
736      UNREACHABLE();
737      return;
738  }
739  InstructionOperand offset_operand = g.UseRegister(offset);
740  InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
741                                          ? g.UseImmediate(length)
742                                          : g.UseRegister(length);
743  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
744       offset_operand, length_operand, g.UseRegister(value),
745       g.UseRegister(buffer), offset_operand);
746}
747
748
749namespace {
750
751void EmitBic(InstructionSelector* selector, Node* node, Node* left,
752             Node* right) {
753  ArmOperandGenerator g(selector);
754  InstructionCode opcode = kArmBic;
755  InstructionOperand value_operand;
756  InstructionOperand shift_operand;
757  if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
758    selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
759                   value_operand, shift_operand);
760    return;
761  }
762  selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
763                 g.DefineAsRegister(node), g.UseRegister(left),
764                 g.UseRegister(right));
765}
766
767
768void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
769              uint32_t lsb, uint32_t width) {
770  DCHECK_LE(1u, width);
771  DCHECK_LE(width, 32u - lsb);
772  ArmOperandGenerator g(selector);
773  selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
774                 g.TempImmediate(lsb), g.TempImmediate(width));
775}
776
777}  // namespace
778
779
780void InstructionSelector::VisitWord32And(Node* node) {
781  ArmOperandGenerator g(this);
782  Int32BinopMatcher m(node);
783  if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
784    Int32BinopMatcher mleft(m.left().node());
785    if (mleft.right().Is(-1)) {
786      EmitBic(this, node, m.right().node(), mleft.left().node());
787      return;
788    }
789  }
790  if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
791    Int32BinopMatcher mright(m.right().node());
792    if (mright.right().Is(-1)) {
793      EmitBic(this, node, m.left().node(), mright.left().node());
794      return;
795    }
796  }
797  if (m.right().HasValue()) {
798    uint32_t const value = m.right().Value();
799    uint32_t width = base::bits::CountPopulation32(value);
800    uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
801
802    // Try to merge SHR operations on the left hand input into this AND.
803    if (m.left().IsWord32Shr()) {
804      Int32BinopMatcher mshr(m.left().node());
805      if (mshr.right().HasValue()) {
806        uint32_t const shift = mshr.right().Value();
807
808        if (((shift == 8) || (shift == 16) || (shift == 24)) &&
809            ((value == 0xff) || (value == 0xffff))) {
810          // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
811          // bytewise rotation.
812          Emit((value == 0xff) ? kArmUxtb : kArmUxth,
813               g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
814               g.TempImmediate(mshr.right().Value()));
815          return;
816        } else if (IsSupported(ARMv7) && (width != 0) &&
817                   ((leading_zeros + width) == 32)) {
818          // Merge Shr into And by emitting a UBFX instruction.
819          DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
820          if ((1 <= shift) && (shift <= 31)) {
821            // UBFX cannot extract bits past the register size, however since
822            // shifting the original value would have introduced some zeros we
823            // can still use UBFX with a smaller mask and the remaining bits
824            // will be zeros.
825            EmitUbfx(this, node, mshr.left().node(), shift,
826                     std::min(width, 32 - shift));
827            return;
828          }
829        }
830      }
831    } else if (value == 0xffff) {
832      // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
833      // better than AND 0xff for this operation.
834      Emit(kArmUxth, g.DefineAsRegister(m.node()),
835           g.UseRegister(m.left().node()), g.TempImmediate(0));
836      return;
837    }
838    if (g.CanBeImmediate(~value)) {
839      // Emit BIC for this AND by inverting the immediate value first.
840      Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
841           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
842           g.TempImmediate(~value));
843      return;
844    }
845    if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
846      // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
847      // replace this AND with UBFX. Other contiguous bit patterns have already
848      // been handled by BIC or will be handled by AND.
849      if ((width != 0) && ((leading_zeros + width) == 32) &&
850          (9 <= leading_zeros) && (leading_zeros <= 23)) {
851        DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
852        EmitUbfx(this, node, m.left().node(), 0, width);
853        return;
854      }
855
856      width = 32 - width;
857      leading_zeros = base::bits::CountLeadingZeros32(~value);
858      uint32_t lsb = base::bits::CountTrailingZeros32(~value);
859      if ((leading_zeros + width + lsb) == 32) {
860        // This AND can be replaced with BFC.
861        Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
862             g.TempImmediate(lsb), g.TempImmediate(width));
863        return;
864      }
865    }
866  }
867  VisitBinop(this, node, kArmAnd, kArmAnd);
868}
869
870
871void InstructionSelector::VisitWord32Or(Node* node) {
872  VisitBinop(this, node, kArmOrr, kArmOrr);
873}
874
875
876void InstructionSelector::VisitWord32Xor(Node* node) {
877  ArmOperandGenerator g(this);
878  Int32BinopMatcher m(node);
879  if (m.right().Is(-1)) {
880    InstructionCode opcode = kArmMvn;
881    InstructionOperand value_operand;
882    InstructionOperand shift_operand;
883    if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
884                      &shift_operand)) {
885      Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
886      return;
887    }
888    Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
889         g.DefineAsRegister(node), g.UseRegister(m.left().node()));
890    return;
891  }
892  VisitBinop(this, node, kArmEor, kArmEor);
893}
894
895
896namespace {
897
898template <typename TryMatchShift>
899void VisitShift(InstructionSelector* selector, Node* node,
900                TryMatchShift try_match_shift, FlagsContinuation* cont) {
901  ArmOperandGenerator g(selector);
902  InstructionCode opcode = kArmMov;
903  InstructionOperand inputs[4];
904  size_t input_count = 2;
905  InstructionOperand outputs[2];
906  size_t output_count = 0;
907
908  CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
909
910  if (cont->IsBranch()) {
911    inputs[input_count++] = g.Label(cont->true_block());
912    inputs[input_count++] = g.Label(cont->false_block());
913  }
914
915  outputs[output_count++] = g.DefineAsRegister(node);
916  if (cont->IsSet()) {
917    outputs[output_count++] = g.DefineAsRegister(cont->result());
918  }
919
920  DCHECK_NE(0u, input_count);
921  DCHECK_NE(0u, output_count);
922  DCHECK_GE(arraysize(inputs), input_count);
923  DCHECK_GE(arraysize(outputs), output_count);
924  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
925
926  opcode = cont->Encode(opcode);
927  if (cont->IsDeoptimize()) {
928    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
929                             cont->kind(), cont->reason(), cont->frame_state());
930  } else if (cont->IsTrap()) {
931    inputs[input_count++] = g.UseImmediate(cont->trap_id());
932    selector->Emit(opcode, output_count, outputs, input_count, inputs);
933  } else {
934    selector->Emit(opcode, output_count, outputs, input_count, inputs);
935  }
936}
937
938
939template <typename TryMatchShift>
940void VisitShift(InstructionSelector* selector, Node* node,
941                              TryMatchShift try_match_shift) {
942  FlagsContinuation cont;
943  VisitShift(selector, node, try_match_shift, &cont);
944}
945
946}  // namespace
947
948
949void InstructionSelector::VisitWord32Shl(Node* node) {
950  VisitShift(this, node, TryMatchLSL);
951}
952
953
954void InstructionSelector::VisitWord32Shr(Node* node) {
955  ArmOperandGenerator g(this);
956  Int32BinopMatcher m(node);
957  if (IsSupported(ARMv7) && m.left().IsWord32And() &&
958      m.right().IsInRange(0, 31)) {
959    uint32_t lsb = m.right().Value();
960    Int32BinopMatcher mleft(m.left().node());
961    if (mleft.right().HasValue()) {
962      uint32_t value = (mleft.right().Value() >> lsb) << lsb;
963      uint32_t width = base::bits::CountPopulation32(value);
964      uint32_t msb = base::bits::CountLeadingZeros32(value);
965      if (msb + width + lsb == 32) {
966        DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
967        return EmitUbfx(this, node, mleft.left().node(), lsb, width);
968      }
969    }
970  }
971  VisitShift(this, node, TryMatchLSR);
972}
973
974
975void InstructionSelector::VisitWord32Sar(Node* node) {
976  ArmOperandGenerator g(this);
977  Int32BinopMatcher m(node);
978  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
979    Int32BinopMatcher mleft(m.left().node());
980    if (m.right().HasValue() && mleft.right().HasValue()) {
981      uint32_t sar = m.right().Value();
982      uint32_t shl = mleft.right().Value();
983      if ((sar == shl) && (sar == 16)) {
984        Emit(kArmSxth, g.DefineAsRegister(node),
985             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
986        return;
987      } else if ((sar == shl) && (sar == 24)) {
988        Emit(kArmSxtb, g.DefineAsRegister(node),
989             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
990        return;
991      } else if (IsSupported(ARMv7) && (sar >= shl)) {
992        Emit(kArmSbfx, g.DefineAsRegister(node),
993             g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
994             g.TempImmediate(32 - sar));
995        return;
996      }
997    }
998  }
999  VisitShift(this, node, TryMatchASR);
1000}
1001
1002void InstructionSelector::VisitInt32PairAdd(Node* node) {
1003  ArmOperandGenerator g(this);
1004
1005  Node* projection1 = NodeProperties::FindProjection(node, 1);
1006  if (projection1) {
1007    // We use UseUniqueRegister here to avoid register sharing with the output
1008    // registers.
1009    InstructionOperand inputs[] = {
1010        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
1011        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
1012
1013    InstructionOperand outputs[] = {
1014        g.DefineAsRegister(node),
1015        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1016
1017    Emit(kArmAddPair, 2, outputs, 4, inputs);
1018  } else {
1019    // The high word of the result is not used, so we emit the standard 32 bit
1020    // instruction.
1021    Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R),
1022         g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
1023         g.UseRegister(node->InputAt(2)));
1024  }
1025}
1026
1027void InstructionSelector::VisitInt32PairSub(Node* node) {
1028  ArmOperandGenerator g(this);
1029
1030  Node* projection1 = NodeProperties::FindProjection(node, 1);
1031  if (projection1) {
1032    // We use UseUniqueRegister here to avoid register sharing with the output
1033    // register.
1034    InstructionOperand inputs[] = {
1035        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
1036        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
1037
1038    InstructionOperand outputs[] = {
1039        g.DefineAsRegister(node),
1040        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1041
1042    Emit(kArmSubPair, 2, outputs, 4, inputs);
1043  } else {
1044    // The high word of the result is not used, so we emit the standard 32 bit
1045    // instruction.
1046    Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
1047         g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
1048         g.UseRegister(node->InputAt(2)));
1049  }
1050}
1051
1052void InstructionSelector::VisitInt32PairMul(Node* node) {
1053  ArmOperandGenerator g(this);
1054  Node* projection1 = NodeProperties::FindProjection(node, 1);
1055  if (projection1) {
1056    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1057                                   g.UseUniqueRegister(node->InputAt(1)),
1058                                   g.UseUniqueRegister(node->InputAt(2)),
1059                                   g.UseUniqueRegister(node->InputAt(3))};
1060
1061    InstructionOperand outputs[] = {
1062        g.DefineAsRegister(node),
1063        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1064
1065    Emit(kArmMulPair, 2, outputs, 4, inputs);
1066  } else {
1067    // The high word of the result is not used, so we emit the standard 32 bit
1068    // instruction.
1069    Emit(kArmMul | AddressingModeField::encode(kMode_Operand2_R),
1070         g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
1071         g.UseRegister(node->InputAt(2)));
1072  }
1073}
1074
1075namespace {
1076// Shared routine for multiple shift operations.
1077void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
1078                          Node* node) {
1079  ArmOperandGenerator g(selector);
1080  // We use g.UseUniqueRegister here to guarantee that there is
1081  // no register aliasing of input registers with output registers.
1082  Int32Matcher m(node->InputAt(2));
1083  InstructionOperand shift_operand;
1084  if (m.HasValue()) {
1085    shift_operand = g.UseImmediate(m.node());
1086  } else {
1087    shift_operand = g.UseUniqueRegister(m.node());
1088  }
1089
1090  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1091                                 g.UseUniqueRegister(node->InputAt(1)),
1092                                 shift_operand};
1093
1094  Node* projection1 = NodeProperties::FindProjection(node, 1);
1095
1096  InstructionOperand outputs[2];
1097  InstructionOperand temps[1];
1098  int32_t output_count = 0;
1099  int32_t temp_count = 0;
1100
1101  outputs[output_count++] = g.DefineAsRegister(node);
1102  if (projection1) {
1103    outputs[output_count++] = g.DefineAsRegister(projection1);
1104  } else {
1105    temps[temp_count++] = g.TempRegister();
1106  }
1107
1108  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1109}
1110}  // namespace
1111void InstructionSelector::VisitWord32PairShl(Node* node) {
1112  VisitWord32PairShift(this, kArmLslPair, node);
1113}
1114
1115void InstructionSelector::VisitWord32PairShr(Node* node) {
1116  VisitWord32PairShift(this, kArmLsrPair, node);
1117}
1118
1119void InstructionSelector::VisitWord32PairSar(Node* node) {
1120  VisitWord32PairShift(this, kArmAsrPair, node);
1121}
1122
1123void InstructionSelector::VisitWord32Ror(Node* node) {
1124  VisitShift(this, node, TryMatchROR);
1125}
1126
1127void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1128
1129void InstructionSelector::VisitWord32ReverseBits(Node* node) {
1130  DCHECK(IsSupported(ARMv7));
1131  VisitRR(this, kArmRbit, node);
1132}
1133
1134void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
1135
1136void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
1137
1138void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
1139
1140
1141void InstructionSelector::VisitInt32Add(Node* node) {
1142  ArmOperandGenerator g(this);
1143  Int32BinopMatcher m(node);
1144  if (CanCover(node, m.left().node())) {
1145    switch (m.left().opcode()) {
1146      case IrOpcode::kInt32Mul: {
1147        Int32BinopMatcher mleft(m.left().node());
1148        Emit(kArmMla, g.DefineAsRegister(node),
1149             g.UseRegister(mleft.left().node()),
1150             g.UseRegister(mleft.right().node()),
1151             g.UseRegister(m.right().node()));
1152        return;
1153      }
1154      case IrOpcode::kInt32MulHigh: {
1155        Int32BinopMatcher mleft(m.left().node());
1156        Emit(kArmSmmla, g.DefineAsRegister(node),
1157             g.UseRegister(mleft.left().node()),
1158             g.UseRegister(mleft.right().node()),
1159             g.UseRegister(m.right().node()));
1160        return;
1161      }
1162      case IrOpcode::kWord32And: {
1163        Int32BinopMatcher mleft(m.left().node());
1164        if (mleft.right().Is(0xff)) {
1165          Emit(kArmUxtab, g.DefineAsRegister(node),
1166               g.UseRegister(m.right().node()),
1167               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
1168          return;
1169        } else if (mleft.right().Is(0xffff)) {
1170          Emit(kArmUxtah, g.DefineAsRegister(node),
1171               g.UseRegister(m.right().node()),
1172               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
1173          return;
1174        }
1175      }
1176      case IrOpcode::kWord32Sar: {
1177        Int32BinopMatcher mleft(m.left().node());
1178        if (CanCover(mleft.node(), mleft.left().node()) &&
1179            mleft.left().IsWord32Shl()) {
1180          Int32BinopMatcher mleftleft(mleft.left().node());
1181          if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
1182            Emit(kArmSxtab, g.DefineAsRegister(node),
1183                 g.UseRegister(m.right().node()),
1184                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
1185            return;
1186          } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
1187            Emit(kArmSxtah, g.DefineAsRegister(node),
1188                 g.UseRegister(m.right().node()),
1189                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
1190            return;
1191          }
1192        }
1193      }
1194      default:
1195        break;
1196    }
1197  }
1198  if (CanCover(node, m.right().node())) {
1199    switch (m.right().opcode()) {
1200      case IrOpcode::kInt32Mul: {
1201        Int32BinopMatcher mright(m.right().node());
1202        Emit(kArmMla, g.DefineAsRegister(node),
1203             g.UseRegister(mright.left().node()),
1204             g.UseRegister(mright.right().node()),
1205             g.UseRegister(m.left().node()));
1206        return;
1207      }
1208      case IrOpcode::kInt32MulHigh: {
1209        Int32BinopMatcher mright(m.right().node());
1210        Emit(kArmSmmla, g.DefineAsRegister(node),
1211             g.UseRegister(mright.left().node()),
1212             g.UseRegister(mright.right().node()),
1213             g.UseRegister(m.left().node()));
1214        return;
1215      }
1216      case IrOpcode::kWord32And: {
1217        Int32BinopMatcher mright(m.right().node());
1218        if (mright.right().Is(0xff)) {
1219          Emit(kArmUxtab, g.DefineAsRegister(node),
1220               g.UseRegister(m.left().node()),
1221               g.UseRegister(mright.left().node()), g.TempImmediate(0));
1222          return;
1223        } else if (mright.right().Is(0xffff)) {
1224          Emit(kArmUxtah, g.DefineAsRegister(node),
1225               g.UseRegister(m.left().node()),
1226               g.UseRegister(mright.left().node()), g.TempImmediate(0));
1227          return;
1228        }
1229      }
1230      case IrOpcode::kWord32Sar: {
1231        Int32BinopMatcher mright(m.right().node());
1232        if (CanCover(mright.node(), mright.left().node()) &&
1233            mright.left().IsWord32Shl()) {
1234          Int32BinopMatcher mrightleft(mright.left().node());
1235          if (mright.right().Is(24) && mrightleft.right().Is(24)) {
1236            Emit(kArmSxtab, g.DefineAsRegister(node),
1237                 g.UseRegister(m.left().node()),
1238                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
1239            return;
1240          } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
1241            Emit(kArmSxtah, g.DefineAsRegister(node),
1242                 g.UseRegister(m.left().node()),
1243                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
1244            return;
1245          }
1246        }
1247      }
1248      default:
1249        break;
1250    }
1251  }
1252  VisitBinop(this, node, kArmAdd, kArmAdd);
1253}
1254
1255
1256void InstructionSelector::VisitInt32Sub(Node* node) {
1257  ArmOperandGenerator g(this);
1258  Int32BinopMatcher m(node);
1259  if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
1260      CanCover(node, m.right().node())) {
1261    Int32BinopMatcher mright(m.right().node());
1262    Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
1263         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
1264    return;
1265  }
1266  VisitBinop(this, node, kArmSub, kArmRsb);
1267}
1268
1269namespace {
1270
1271void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
1272                              FlagsContinuation* cont) {
1273  ArmOperandGenerator g(selector);
1274  Int32BinopMatcher m(node);
1275  InstructionOperand result_operand = g.DefineAsRegister(node);
1276  InstructionOperand temp_operand = g.TempRegister();
1277  InstructionOperand outputs[] = {result_operand, temp_operand};
1278  InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
1279                                 g.UseRegister(m.right().node())};
1280  selector->Emit(kArmSmull, 2, outputs, 2, inputs);
1281
1282  // result operand needs shift operator.
1283  InstructionOperand shift_31 = g.UseImmediate(31);
1284  InstructionCode opcode = cont->Encode(kArmCmp) |
1285                           AddressingModeField::encode(kMode_Operand2_R_ASR_I);
1286  if (cont->IsBranch()) {
1287    selector->Emit(opcode, g.NoOutput(), temp_operand, result_operand, shift_31,
1288                   g.Label(cont->true_block()), g.Label(cont->false_block()));
1289  } else if (cont->IsDeoptimize()) {
1290    InstructionOperand in[] = {temp_operand, result_operand, shift_31};
1291    selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
1292                             cont->reason(), cont->frame_state());
1293  } else if (cont->IsSet()) {
1294    selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
1295                   result_operand, shift_31);
1296  } else {
1297    DCHECK(cont->IsTrap());
1298    InstructionOperand in[] = {temp_operand, result_operand, shift_31,
1299                               g.UseImmediate(cont->trap_id())};
1300    selector->Emit(opcode, 0, nullptr, 4, in);
1301  }
1302}
1303
1304}  // namespace
1305
1306void InstructionSelector::VisitInt32Mul(Node* node) {
1307  ArmOperandGenerator g(this);
1308  Int32BinopMatcher m(node);
1309  if (m.right().HasValue() && m.right().Value() > 0) {
1310    int32_t value = m.right().Value();
1311    if (base::bits::IsPowerOfTwo32(value - 1)) {
1312      Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1313           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1314           g.UseRegister(m.left().node()),
1315           g.TempImmediate(WhichPowerOf2(value - 1)));
1316      return;
1317    }
1318    if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
1319      Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1320           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1321           g.UseRegister(m.left().node()),
1322           g.TempImmediate(WhichPowerOf2(value + 1)));
1323      return;
1324    }
1325  }
1326  VisitRRR(this, kArmMul, node);
1327}
1328
1329void InstructionSelector::VisitUint32MulHigh(Node* node) {
1330  ArmOperandGenerator g(this);
1331  InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
1332  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
1333                                 g.UseRegister(node->InputAt(1))};
1334  Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
1335}
1336
1337
1338void InstructionSelector::VisitInt32Div(Node* node) {
1339  VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
1340}
1341
1342
1343void InstructionSelector::VisitUint32Div(Node* node) {
1344  VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
1345}
1346
1347
1348void InstructionSelector::VisitInt32Mod(Node* node) {
1349  VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
1350}
1351
1352
1353void InstructionSelector::VisitUint32Mod(Node* node) {
1354  VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
1355}
1356
1357#define RR_OP_LIST(V)                                \
1358  V(Word32Clz, kArmClz)                              \
1359  V(ChangeFloat32ToFloat64, kArmVcvtF64F32)          \
1360  V(RoundInt32ToFloat32, kArmVcvtF32S32)             \
1361  V(RoundUint32ToFloat32, kArmVcvtF32U32)            \
1362  V(ChangeInt32ToFloat64, kArmVcvtF64S32)            \
1363  V(ChangeUint32ToFloat64, kArmVcvtF64U32)           \
1364  V(TruncateFloat32ToInt32, kArmVcvtS32F32)          \
1365  V(TruncateFloat32ToUint32, kArmVcvtU32F32)         \
1366  V(ChangeFloat64ToInt32, kArmVcvtS32F64)            \
1367  V(ChangeFloat64ToUint32, kArmVcvtU32F64)           \
1368  V(TruncateFloat64ToUint32, kArmVcvtU32F64)         \
1369  V(TruncateFloat64ToFloat32, kArmVcvtF32F64)        \
1370  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
1371  V(RoundFloat64ToInt32, kArmVcvtS32F64)             \
1372  V(BitcastFloat32ToInt32, kArmVmovU32F32)           \
1373  V(BitcastInt32ToFloat32, kArmVmovF32U32)           \
1374  V(Float64ExtractLowWord32, kArmVmovLowU32F64)      \
1375  V(Float64ExtractHighWord32, kArmVmovHighU32F64)    \
1376  V(Float64SilenceNaN, kArmFloat64SilenceNaN)        \
1377  V(Float32Abs, kArmVabsF32)                         \
1378  V(Float64Abs, kArmVabsF64)                         \
1379  V(Float32Neg, kArmVnegF32)                         \
1380  V(Float64Neg, kArmVnegF64)                         \
1381  V(Float32Sqrt, kArmVsqrtF32)                       \
1382  V(Float64Sqrt, kArmVsqrtF64)
1383
1384#define RR_OP_LIST_V8(V)                 \
1385  V(Float32RoundDown, kArmVrintmF32)     \
1386  V(Float64RoundDown, kArmVrintmF64)     \
1387  V(Float32RoundUp, kArmVrintpF32)       \
1388  V(Float64RoundUp, kArmVrintpF64)       \
1389  V(Float32RoundTruncate, kArmVrintzF32) \
1390  V(Float64RoundTruncate, kArmVrintzF64) \
1391  V(Float64RoundTiesAway, kArmVrintaF64) \
1392  V(Float32RoundTiesEven, kArmVrintnF32) \
1393  V(Float64RoundTiesEven, kArmVrintnF64)
1394
1395#define RRR_OP_LIST(V)          \
1396  V(Int32MulHigh, kArmSmmul)    \
1397  V(Float32Mul, kArmVmulF32)    \
1398  V(Float64Mul, kArmVmulF64)    \
1399  V(Float32Div, kArmVdivF32)    \
1400  V(Float64Div, kArmVdivF64)    \
1401  V(Float32Max, kArmFloat32Max) \
1402  V(Float64Max, kArmFloat64Max) \
1403  V(Float32Min, kArmFloat32Min) \
1404  V(Float64Min, kArmFloat64Min)
1405
1406#define RR_VISITOR(Name, opcode)                      \
1407  void InstructionSelector::Visit##Name(Node* node) { \
1408    VisitRR(this, opcode, node);                      \
1409  }
1410RR_OP_LIST(RR_VISITOR)
1411#undef RR_VISITOR
1412
1413#define RR_VISITOR_V8(Name, opcode)                   \
1414  void InstructionSelector::Visit##Name(Node* node) { \
1415    DCHECK(CpuFeatures::IsSupported(ARMv8));          \
1416    VisitRR(this, opcode, node);                      \
1417  }
1418RR_OP_LIST_V8(RR_VISITOR_V8)
1419#undef RR_VISITOR_V8
1420
1421#define RRR_VISITOR(Name, opcode)                     \
1422  void InstructionSelector::Visit##Name(Node* node) { \
1423    VisitRRR(this, opcode, node);                     \
1424  }
1425RRR_OP_LIST(RRR_VISITOR)
1426#undef RRR_VISITOR
1427
1428void InstructionSelector::VisitFloat32Add(Node* node) {
1429  ArmOperandGenerator g(this);
1430  Float32BinopMatcher m(node);
1431  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1432    Float32BinopMatcher mleft(m.left().node());
1433    Emit(kArmVmlaF32, g.DefineSameAsFirst(node),
1434         g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1435         g.UseRegister(mleft.right().node()));
1436    return;
1437  }
1438  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1439    Float32BinopMatcher mright(m.right().node());
1440    Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1441         g.UseRegister(mright.left().node()),
1442         g.UseRegister(mright.right().node()));
1443    return;
1444  }
1445  VisitRRR(this, kArmVaddF32, node);
1446}
1447
1448
1449void InstructionSelector::VisitFloat64Add(Node* node) {
1450  ArmOperandGenerator g(this);
1451  Float64BinopMatcher m(node);
1452  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1453    Float64BinopMatcher mleft(m.left().node());
1454    Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
1455         g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1456         g.UseRegister(mleft.right().node()));
1457    return;
1458  }
1459  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1460    Float64BinopMatcher mright(m.right().node());
1461    Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1462         g.UseRegister(mright.left().node()),
1463         g.UseRegister(mright.right().node()));
1464    return;
1465  }
1466  VisitRRR(this, kArmVaddF64, node);
1467}
1468
1469void InstructionSelector::VisitFloat32Sub(Node* node) {
1470  ArmOperandGenerator g(this);
1471  Float32BinopMatcher m(node);
1472  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1473    Float32BinopMatcher mright(m.right().node());
1474    Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1475         g.UseRegister(mright.left().node()),
1476         g.UseRegister(mright.right().node()));
1477    return;
1478  }
1479  VisitRRR(this, kArmVsubF32, node);
1480}
1481
1482void InstructionSelector::VisitFloat64Sub(Node* node) {
1483  ArmOperandGenerator g(this);
1484  Float64BinopMatcher m(node);
1485  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1486    Float64BinopMatcher mright(m.right().node());
1487    Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1488         g.UseRegister(mright.left().node()),
1489         g.UseRegister(mright.right().node()));
1490    return;
1491  }
1492  VisitRRR(this, kArmVsubF64, node);
1493}
1494
1495void InstructionSelector::VisitFloat64Mod(Node* node) {
1496  ArmOperandGenerator g(this);
1497  Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1498       g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
1499}
1500
1501void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1502                                                   InstructionCode opcode) {
1503  ArmOperandGenerator g(this);
1504  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1505       g.UseFixed(node->InputAt(1), d1))
1506      ->MarkAsCall();
1507}
1508
1509void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1510                                                  InstructionCode opcode) {
1511  ArmOperandGenerator g(this);
1512  Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
1513      ->MarkAsCall();
1514}
1515
1516void InstructionSelector::EmitPrepareArguments(
1517    ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
1518    Node* node) {
1519  ArmOperandGenerator g(this);
1520
1521  // Prepare for C function call.
1522  if (descriptor->IsCFunctionCall()) {
1523    Emit(kArchPrepareCallCFunction |
1524             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
1525         0, nullptr, 0, nullptr);
1526
1527    // Poke any stack arguments.
1528    for (size_t n = 0; n < arguments->size(); ++n) {
1529      PushParameter input = (*arguments)[n];
1530      if (input.node()) {
1531        int slot = static_cast<int>(n);
1532        Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
1533             g.UseRegister(input.node()));
1534      }
1535    }
1536  } else {
1537    // Push any stack arguments.
1538    for (PushParameter input : base::Reversed(*arguments)) {
1539      // Skip any alignment holes in pushed nodes.
1540      if (input.node() == nullptr) continue;
1541      Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
1542    }
1543  }
1544}
1545
1546
1547bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1548
1549int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1550
1551namespace {
1552
1553// Shared routine for multiple compare operations.
1554void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1555                  InstructionOperand left, InstructionOperand right,
1556                  FlagsContinuation* cont) {
1557  ArmOperandGenerator g(selector);
1558  opcode = cont->Encode(opcode);
1559  if (cont->IsBranch()) {
1560    selector->Emit(opcode, g.NoOutput(), left, right,
1561                   g.Label(cont->true_block()), g.Label(cont->false_block()));
1562  } else if (cont->IsDeoptimize()) {
1563    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
1564                             cont->reason(), cont->frame_state());
1565  } else if (cont->IsSet()) {
1566    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1567  } else {
1568    DCHECK(cont->IsTrap());
1569    selector->Emit(opcode, g.NoOutput(), left, right,
1570                   g.UseImmediate(cont->trap_id()));
1571  }
1572}
1573
1574
1575// Shared routine for multiple float32 compare operations.
1576void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1577                         FlagsContinuation* cont) {
1578  ArmOperandGenerator g(selector);
1579  Float32BinopMatcher m(node);
1580  if (m.right().Is(0.0f)) {
1581    VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
1582                 g.UseImmediate(m.right().node()), cont);
1583  } else if (m.left().Is(0.0f)) {
1584    cont->Commute();
1585    VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.right().node()),
1586                 g.UseImmediate(m.left().node()), cont);
1587  } else {
1588    VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
1589                 g.UseRegister(m.right().node()), cont);
1590  }
1591}
1592
1593
1594// Shared routine for multiple float64 compare operations.
1595void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1596                         FlagsContinuation* cont) {
1597  ArmOperandGenerator g(selector);
1598  Float64BinopMatcher m(node);
1599  if (m.right().Is(0.0)) {
1600    VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
1601                 g.UseImmediate(m.right().node()), cont);
1602  } else if (m.left().Is(0.0)) {
1603    cont->Commute();
1604    VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.right().node()),
1605                 g.UseImmediate(m.left().node()), cont);
1606  } else {
1607    VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
1608                 g.UseRegister(m.right().node()), cont);
1609  }
1610}
1611
1612// Check whether we can convert:
1613// ((a <op> b) cmp 0), b.<cond>
1614// to:
1615// (a <ops> b), b.<cond'>
1616// where <ops> is the flag setting version of <op>.
1617// We only generate conditions <cond'> that are a combination of the N
1618// and Z flags. This avoids the need to make this function dependent on
1619// the flag-setting operation.
1620bool CanUseFlagSettingBinop(FlagsCondition cond) {
1621  switch (cond) {
1622    case kEqual:
1623    case kNotEqual:
1624    case kSignedLessThan:
1625    case kSignedGreaterThanOrEqual:
1626    case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
1627    case kUnsignedGreaterThan:      // x > 0 -> x != 0
1628      return true;
1629    default:
1630      return false;
1631  }
1632}
1633
1634// Map <cond> to <cond'> so that the following transformation is possible:
1635// ((a <op> b) cmp 0), b.<cond>
1636// to:
1637// (a <ops> b), b.<cond'>
1638// where <ops> is the flag setting version of <op>.
1639FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
1640  DCHECK(CanUseFlagSettingBinop(cond));
1641  switch (cond) {
1642    case kEqual:
1643    case kNotEqual:
1644      return cond;
1645    case kSignedLessThan:
1646      return kNegative;
1647    case kSignedGreaterThanOrEqual:
1648      return kPositiveOrZero;
1649    case kUnsignedLessThanOrEqual:  // x <= 0 -> x == 0
1650      return kEqual;
1651    case kUnsignedGreaterThan:  // x > 0 -> x != 0
1652      return kNotEqual;
1653    default:
1654      UNREACHABLE();
1655      return cond;
1656  }
1657}
1658
1659// Check if we can perform the transformation:
1660// ((a <op> b) cmp 0), b.<cond>
1661// to:
1662// (a <ops> b), b.<cond'>
1663// where <ops> is the flag setting version of <op>, and if so,
1664// updates {node}, {opcode} and {cont} accordingly.
1665void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
1666                                             Node** node, Node* binop,
1667                                             InstructionCode* opcode,
1668                                             FlagsCondition cond,
1669                                             FlagsContinuation* cont) {
1670  InstructionCode binop_opcode;
1671  InstructionCode no_output_opcode;
1672  switch (binop->opcode()) {
1673    case IrOpcode::kInt32Add:
1674      binop_opcode = kArmAdd;
1675      no_output_opcode = kArmCmn;
1676      break;
1677    case IrOpcode::kWord32And:
1678      binop_opcode = kArmAnd;
1679      no_output_opcode = kArmTst;
1680      break;
1681    case IrOpcode::kWord32Or:
1682      binop_opcode = kArmOrr;
1683      no_output_opcode = kArmOrr;
1684      break;
1685    case IrOpcode::kWord32Xor:
1686      binop_opcode = kArmEor;
1687      no_output_opcode = kArmTeq;
1688      break;
1689    default:
1690      UNREACHABLE();
1691      return;
1692  }
1693  if (selector->CanCover(*node, binop)) {
1694    // The comparison is the only user of {node}.
1695    cont->Overwrite(MapForFlagSettingBinop(cond));
1696    *opcode = no_output_opcode;
1697    *node = binop;
1698  } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
1699    // We can also handle the case where the {node} and the comparison are in
1700    // the same basic block, and the comparison is the only user of {node} in
1701    // this basic block ({node} has users in other basic blocks).
1702    cont->Overwrite(MapForFlagSettingBinop(cond));
1703    *opcode = binop_opcode;
1704    *node = binop;
1705  }
1706}
1707
1708// Shared routine for multiple word compare operations.
1709void VisitWordCompare(InstructionSelector* selector, Node* node,
1710                      InstructionCode opcode, FlagsContinuation* cont) {
1711  ArmOperandGenerator g(selector);
1712  Int32BinopMatcher m(node);
1713  InstructionOperand inputs[5];
1714  size_t input_count = 0;
1715  InstructionOperand outputs[2];
1716  size_t output_count = 0;
1717  bool has_result = (opcode != kArmCmp) && (opcode != kArmCmn) &&
1718                    (opcode != kArmTst) && (opcode != kArmTeq);
1719
1720  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
1721                               &input_count, &inputs[1])) {
1722    inputs[0] = g.UseRegister(m.left().node());
1723    input_count++;
1724  } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
1725                                      &input_count, &inputs[1])) {
1726    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1727    inputs[0] = g.UseRegister(m.right().node());
1728    input_count++;
1729  } else {
1730    opcode |= AddressingModeField::encode(kMode_Operand2_R);
1731    inputs[input_count++] = g.UseRegister(m.left().node());
1732    inputs[input_count++] = g.UseRegister(m.right().node());
1733  }
1734
1735  if (has_result) {
1736    if (cont->IsDeoptimize()) {
1737      // If we can deoptimize as a result of the binop, we need to make sure
1738      // that the deopt inputs are not overwritten by the binop result. One way
1739      // to achieve that is to declare the output register as same-as-first.
1740      outputs[output_count++] = g.DefineSameAsFirst(node);
1741    } else {
1742      outputs[output_count++] = g.DefineAsRegister(node);
1743    }
1744  }
1745
1746  if (cont->IsBranch()) {
1747    inputs[input_count++] = g.Label(cont->true_block());
1748    inputs[input_count++] = g.Label(cont->false_block());
1749  } else if (cont->IsSet()) {
1750    outputs[output_count++] = g.DefineAsRegister(cont->result());
1751  }
1752
1753  DCHECK_NE(0u, input_count);
1754  DCHECK_GE(arraysize(inputs), input_count);
1755  DCHECK_GE(arraysize(outputs), output_count);
1756
1757  opcode = cont->Encode(opcode);
1758  if (cont->IsDeoptimize()) {
1759    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
1760                             cont->kind(), cont->reason(), cont->frame_state());
1761  } else if (cont->IsTrap()) {
1762    inputs[input_count++] = g.UseImmediate(cont->trap_id());
1763    selector->Emit(opcode, output_count, outputs, input_count, inputs);
1764  } else {
1765    selector->Emit(opcode, output_count, outputs, input_count, inputs);
1766  }
1767}
1768
1769
1770void VisitWordCompare(InstructionSelector* selector, Node* node,
1771                      FlagsContinuation* cont) {
1772  InstructionCode opcode = kArmCmp;
1773  Int32BinopMatcher m(node);
1774
1775  FlagsCondition cond = cont->condition();
1776  if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32Or() ||
1777                          m.left().IsWord32And() || m.left().IsWord32Xor())) {
1778    // Emit flag setting instructions for comparisons against zero.
1779    if (CanUseFlagSettingBinop(cond)) {
1780      Node* binop = m.left().node();
1781      MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
1782                                              cond, cont);
1783    }
1784  } else if (m.left().Is(0) &&
1785             (m.right().IsInt32Add() || m.right().IsWord32Or() ||
1786              m.right().IsWord32And() || m.right().IsWord32Xor())) {
1787    // Same as above, but we need to commute the condition before we
1788    // continue with the rest of the checks.
1789    cond = CommuteFlagsCondition(cond);
1790    if (CanUseFlagSettingBinop(cond)) {
1791      Node* binop = m.right().node();
1792      MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
1793                                              cond, cont);
1794    }
1795  }
1796
1797  VisitWordCompare(selector, node, opcode, cont);
1798}
1799
1800
1801// Shared routine for word comparisons against zero.
1802void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1803                          Node* value, FlagsContinuation* cont) {
1804  // Try to combine with comparisons against 0 by simply inverting the branch.
1805  while (value->opcode() == IrOpcode::kWord32Equal &&
1806         selector->CanCover(user, value)) {
1807    Int32BinopMatcher m(value);
1808    if (!m.right().Is(0)) break;
1809
1810    user = value;
1811    value = m.left().node();
1812    cont->Negate();
1813  }
1814
1815  if (selector->CanCover(user, value)) {
1816    switch (value->opcode()) {
1817      case IrOpcode::kWord32Equal:
1818        cont->OverwriteAndNegateIfEqual(kEqual);
1819        return VisitWordCompare(selector, value, cont);
1820      case IrOpcode::kInt32LessThan:
1821        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1822        return VisitWordCompare(selector, value, cont);
1823      case IrOpcode::kInt32LessThanOrEqual:
1824        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1825        return VisitWordCompare(selector, value, cont);
1826      case IrOpcode::kUint32LessThan:
1827        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1828        return VisitWordCompare(selector, value, cont);
1829      case IrOpcode::kUint32LessThanOrEqual:
1830        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1831        return VisitWordCompare(selector, value, cont);
1832      case IrOpcode::kFloat32Equal:
1833        cont->OverwriteAndNegateIfEqual(kEqual);
1834        return VisitFloat32Compare(selector, value, cont);
1835      case IrOpcode::kFloat32LessThan:
1836        cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1837        return VisitFloat32Compare(selector, value, cont);
1838      case IrOpcode::kFloat32LessThanOrEqual:
1839        cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1840        return VisitFloat32Compare(selector, value, cont);
1841      case IrOpcode::kFloat64Equal:
1842        cont->OverwriteAndNegateIfEqual(kEqual);
1843        return VisitFloat64Compare(selector, value, cont);
1844      case IrOpcode::kFloat64LessThan:
1845        cont->OverwriteAndNegateIfEqual(kFloatLessThan);
1846        return VisitFloat64Compare(selector, value, cont);
1847      case IrOpcode::kFloat64LessThanOrEqual:
1848        cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
1849        return VisitFloat64Compare(selector, value, cont);
1850      case IrOpcode::kProjection:
1851        // Check if this is the overflow output projection of an
1852        // <Operation>WithOverflow node.
1853        if (ProjectionIndexOf(value->op()) == 1u) {
1854          // We cannot combine the <Operation>WithOverflow with this branch
1855          // unless the 0th projection (the use of the actual value of the
1856          // <Operation> is either nullptr, which means there's no use of the
1857          // actual value, or was already defined, which means it is scheduled
1858          // *AFTER* this branch).
1859          Node* const node = value->InputAt(0);
1860          Node* const result = NodeProperties::FindProjection(node, 0);
1861          if (!result || selector->IsDefined(result)) {
1862            switch (node->opcode()) {
1863              case IrOpcode::kInt32AddWithOverflow:
1864                cont->OverwriteAndNegateIfEqual(kOverflow);
1865                return VisitBinop(selector, node, kArmAdd, kArmAdd, cont);
1866              case IrOpcode::kInt32SubWithOverflow:
1867                cont->OverwriteAndNegateIfEqual(kOverflow);
1868                return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
1869              case IrOpcode::kInt32MulWithOverflow:
1870                // ARM doesn't set the overflow flag for multiplication, so we
1871                // need to test on kNotEqual. Here is the code sequence used:
1872                //   smull resultlow, resulthigh, left, right
1873                //   cmp resulthigh, Operand(resultlow, ASR, 31)
1874                cont->OverwriteAndNegateIfEqual(kNotEqual);
1875                return EmitInt32MulWithOverflow(selector, node, cont);
1876              default:
1877                break;
1878            }
1879          }
1880        }
1881        break;
1882      case IrOpcode::kInt32Add:
1883        return VisitWordCompare(selector, value, kArmCmn, cont);
1884      case IrOpcode::kInt32Sub:
1885        return VisitWordCompare(selector, value, kArmCmp, cont);
1886      case IrOpcode::kWord32And:
1887        return VisitWordCompare(selector, value, kArmTst, cont);
1888      case IrOpcode::kWord32Or:
1889        return VisitBinop(selector, value, kArmOrr, kArmOrr, cont);
1890      case IrOpcode::kWord32Xor:
1891        return VisitWordCompare(selector, value, kArmTeq, cont);
1892      case IrOpcode::kWord32Sar:
1893        return VisitShift(selector, value, TryMatchASR, cont);
1894      case IrOpcode::kWord32Shl:
1895        return VisitShift(selector, value, TryMatchLSL, cont);
1896      case IrOpcode::kWord32Shr:
1897        return VisitShift(selector, value, TryMatchLSR, cont);
1898      case IrOpcode::kWord32Ror:
1899        return VisitShift(selector, value, TryMatchROR, cont);
1900      default:
1901        break;
1902    }
1903  }
1904
1905  if (user->opcode() == IrOpcode::kWord32Equal) {
1906    return VisitWordCompare(selector, user, cont);
1907  }
1908
1909  // Continuation could not be combined with a compare, emit compare against 0.
1910  ArmOperandGenerator g(selector);
1911  InstructionCode const opcode =
1912      cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
1913  InstructionOperand const value_operand = g.UseRegister(value);
1914  if (cont->IsBranch()) {
1915    selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
1916                   g.Label(cont->true_block()), g.Label(cont->false_block()));
1917  } else if (cont->IsDeoptimize()) {
1918    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
1919                             cont->kind(), cont->reason(), cont->frame_state());
1920  } else if (cont->IsSet()) {
1921    selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1922                   value_operand);
1923  } else {
1924    DCHECK(cont->IsTrap());
1925    selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
1926                   g.UseImmediate(cont->trap_id()));
1927  }
1928}
1929
1930}  // namespace
1931
1932void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1933                                      BasicBlock* fbranch) {
1934  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1935  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1936}
1937
1938void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1939  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
1940  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1941      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
1942  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1943}
1944
1945void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1946  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
1947  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
1948      kEqual, p.kind(), p.reason(), node->InputAt(1));
1949  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1950}
1951
1952void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
1953  FlagsContinuation cont =
1954      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
1955  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1956}
1957
1958void InstructionSelector::VisitTrapUnless(Node* node,
1959                                          Runtime::FunctionId func_id) {
1960  FlagsContinuation cont =
1961      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
1962  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1963}
1964
1965void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1966  ArmOperandGenerator g(this);
1967  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1968
1969  // Emit either ArchTableSwitch or ArchLookupSwitch.
1970  size_t table_space_cost = 4 + sw.value_range;
1971  size_t table_time_cost = 3;
1972  size_t lookup_space_cost = 3 + 2 * sw.case_count;
1973  size_t lookup_time_cost = sw.case_count;
1974  if (sw.case_count > 0 &&
1975      table_space_cost + 3 * table_time_cost <=
1976          lookup_space_cost + 3 * lookup_time_cost &&
1977      sw.min_value > std::numeric_limits<int32_t>::min()) {
1978    InstructionOperand index_operand = value_operand;
1979    if (sw.min_value) {
1980      index_operand = g.TempRegister();
1981      Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
1982           index_operand, value_operand, g.TempImmediate(sw.min_value));
1983    }
1984    // Generate a table lookup.
1985    return EmitTableSwitch(sw, index_operand);
1986  }
1987
1988  // Generate a sequence of conditional jumps.
1989  return EmitLookupSwitch(sw, value_operand);
1990}
1991
1992
1993void InstructionSelector::VisitWord32Equal(Node* const node) {
1994  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1995  Int32BinopMatcher m(node);
1996  if (m.right().Is(0)) {
1997    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1998  }
1999  VisitWordCompare(this, node, &cont);
2000}
2001
2002
2003void InstructionSelector::VisitInt32LessThan(Node* node) {
2004  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2005  VisitWordCompare(this, node, &cont);
2006}
2007
2008
2009void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2010  FlagsContinuation cont =
2011      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2012  VisitWordCompare(this, node, &cont);
2013}
2014
2015
2016void InstructionSelector::VisitUint32LessThan(Node* node) {
2017  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2018  VisitWordCompare(this, node, &cont);
2019}
2020
2021
2022void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2023  FlagsContinuation cont =
2024      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2025  VisitWordCompare(this, node, &cont);
2026}
2027
2028
2029void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2030  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2031    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2032    return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
2033  }
2034  FlagsContinuation cont;
2035  VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
2036}
2037
2038void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2039  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2040    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2041    return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
2042  }
2043  FlagsContinuation cont;
2044  VisitBinop(this, node, kArmSub, kArmRsb, &cont);
2045}
2046
2047void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2048  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2049    // ARM doesn't set the overflow flag for multiplication, so we need to test
2050    // on kNotEqual. Here is the code sequence used:
2051    //   smull resultlow, resulthigh, left, right
2052    //   cmp resulthigh, Operand(resultlow, ASR, 31)
2053    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
2054    return EmitInt32MulWithOverflow(this, node, &cont);
2055  }
2056  FlagsContinuation cont;
2057  EmitInt32MulWithOverflow(this, node, &cont);
2058}
2059
2060void InstructionSelector::VisitFloat32Equal(Node* node) {
2061  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2062  VisitFloat32Compare(this, node, &cont);
2063}
2064
2065
2066void InstructionSelector::VisitFloat32LessThan(Node* node) {
2067  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2068  VisitFloat32Compare(this, node, &cont);
2069}
2070
2071
2072void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2073  FlagsContinuation cont =
2074      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2075  VisitFloat32Compare(this, node, &cont);
2076}
2077
2078
2079void InstructionSelector::VisitFloat64Equal(Node* node) {
2080  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2081  VisitFloat64Compare(this, node, &cont);
2082}
2083
2084
2085void InstructionSelector::VisitFloat64LessThan(Node* node) {
2086  FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2087  VisitFloat64Compare(this, node, &cont);
2088}
2089
2090
2091void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2092  FlagsContinuation cont =
2093      FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2094  VisitFloat64Compare(this, node, &cont);
2095}
2096
2097void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2098  ArmOperandGenerator g(this);
2099  Node* left = node->InputAt(0);
2100  Node* right = node->InputAt(1);
2101  if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2102      CanCover(node, left)) {
2103    left = left->InputAt(1);
2104    Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
2105         g.UseRegister(left));
2106    return;
2107  }
2108  Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
2109       g.UseRegister(right));
2110}
2111
2112
2113void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2114  ArmOperandGenerator g(this);
2115  Node* left = node->InputAt(0);
2116  Node* right = node->InputAt(1);
2117  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2118      CanCover(node, left)) {
2119    left = left->InputAt(1);
2120    Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
2121         g.UseRegister(right));
2122    return;
2123  }
2124  Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
2125       g.UseRegister(right));
2126}
2127
2128void InstructionSelector::VisitAtomicLoad(Node* node) {
2129  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2130  ArmOperandGenerator g(this);
2131  Node* base = node->InputAt(0);
2132  Node* index = node->InputAt(1);
2133  ArchOpcode opcode = kArchNop;
2134  switch (load_rep.representation()) {
2135    case MachineRepresentation::kWord8:
2136      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2137      break;
2138    case MachineRepresentation::kWord16:
2139      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2140      break;
2141    case MachineRepresentation::kWord32:
2142      opcode = kAtomicLoadWord32;
2143      break;
2144    default:
2145      UNREACHABLE();
2146      return;
2147  }
2148  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
2149       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2150}
2151
2152void InstructionSelector::VisitAtomicStore(Node* node) {
2153  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2154  ArmOperandGenerator g(this);
2155  Node* base = node->InputAt(0);
2156  Node* index = node->InputAt(1);
2157  Node* value = node->InputAt(2);
2158  ArchOpcode opcode = kArchNop;
2159  switch (rep) {
2160    case MachineRepresentation::kWord8:
2161      opcode = kAtomicStoreWord8;
2162      break;
2163    case MachineRepresentation::kWord16:
2164      opcode = kAtomicStoreWord16;
2165      break;
2166    case MachineRepresentation::kWord32:
2167      opcode = kAtomicStoreWord32;
2168      break;
2169    default:
2170      UNREACHABLE();
2171      return;
2172  }
2173
2174  AddressingMode addressing_mode = kMode_Offset_RR;
2175  InstructionOperand inputs[4];
2176  size_t input_count = 0;
2177  inputs[input_count++] = g.UseUniqueRegister(base);
2178  inputs[input_count++] = g.UseUniqueRegister(index);
2179  inputs[input_count++] = g.UseUniqueRegister(value);
2180  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2181  Emit(code, 0, nullptr, input_count, inputs);
2182}
2183
2184#define SIMD_TYPE_LIST(V) \
2185  V(Float32x4)            \
2186  V(Int32x4)              \
2187  V(Int16x8)              \
2188  V(Int8x16)
2189
2190#define SIMD_FORMAT_LIST(V) \
2191  V(32x4)                   \
2192  V(16x8)                   \
2193  V(8x16)
2194
2195#define SIMD_UNOP_LIST(V)  \
2196  V(Float32x4FromInt32x4)  \
2197  V(Float32x4FromUint32x4) \
2198  V(Float32x4Abs)          \
2199  V(Float32x4Neg)          \
2200  V(Int32x4FromFloat32x4)  \
2201  V(Uint32x4FromFloat32x4) \
2202  V(Int32x4Neg)            \
2203  V(Int16x8Neg)            \
2204  V(Int8x16Neg)            \
2205  V(Simd128Not)
2206
2207#define SIMD_BINOP_LIST(V)      \
2208  V(Float32x4Add)               \
2209  V(Float32x4Sub)               \
2210  V(Float32x4Equal)             \
2211  V(Float32x4NotEqual)          \
2212  V(Int32x4Add)                 \
2213  V(Int32x4Sub)                 \
2214  V(Int32x4Mul)                 \
2215  V(Int32x4Min)                 \
2216  V(Int32x4Max)                 \
2217  V(Int32x4Equal)               \
2218  V(Int32x4NotEqual)            \
2219  V(Int32x4GreaterThan)         \
2220  V(Int32x4GreaterThanOrEqual)  \
2221  V(Uint32x4Min)                \
2222  V(Uint32x4Max)                \
2223  V(Uint32x4GreaterThan)        \
2224  V(Uint32x4GreaterThanOrEqual) \
2225  V(Int16x8Add)                 \
2226  V(Int16x8AddSaturate)         \
2227  V(Int16x8Sub)                 \
2228  V(Int16x8SubSaturate)         \
2229  V(Int16x8Mul)                 \
2230  V(Int16x8Min)                 \
2231  V(Int16x8Max)                 \
2232  V(Int16x8Equal)               \
2233  V(Int16x8NotEqual)            \
2234  V(Int16x8GreaterThan)         \
2235  V(Int16x8GreaterThanOrEqual)  \
2236  V(Uint16x8AddSaturate)        \
2237  V(Uint16x8SubSaturate)        \
2238  V(Uint16x8Min)                \
2239  V(Uint16x8Max)                \
2240  V(Uint16x8GreaterThan)        \
2241  V(Uint16x8GreaterThanOrEqual) \
2242  V(Int8x16Add)                 \
2243  V(Int8x16AddSaturate)         \
2244  V(Int8x16Sub)                 \
2245  V(Int8x16SubSaturate)         \
2246  V(Int8x16Mul)                 \
2247  V(Int8x16Min)                 \
2248  V(Int8x16Max)                 \
2249  V(Int8x16Equal)               \
2250  V(Int8x16NotEqual)            \
2251  V(Int8x16GreaterThan)         \
2252  V(Int8x16GreaterThanOrEqual)  \
2253  V(Uint8x16AddSaturate)        \
2254  V(Uint8x16SubSaturate)        \
2255  V(Uint8x16Min)                \
2256  V(Uint8x16Max)                \
2257  V(Uint8x16GreaterThan)        \
2258  V(Uint8x16GreaterThanOrEqual) \
2259  V(Simd128And)                 \
2260  V(Simd128Or)                  \
2261  V(Simd128Xor)
2262
2263#define SIMD_SHIFT_OP_LIST(V)   \
2264  V(Int32x4ShiftLeftByScalar)   \
2265  V(Int32x4ShiftRightByScalar)  \
2266  V(Uint32x4ShiftRightByScalar) \
2267  V(Int16x8ShiftLeftByScalar)   \
2268  V(Int16x8ShiftRightByScalar)  \
2269  V(Uint16x8ShiftRightByScalar) \
2270  V(Int8x16ShiftLeftByScalar)   \
2271  V(Int8x16ShiftRightByScalar)  \
2272  V(Uint8x16ShiftRightByScalar)
2273
2274#define SIMD_VISIT_SPLAT(Type)                              \
2275  void InstructionSelector::VisitCreate##Type(Node* node) { \
2276    VisitRR(this, kArm##Type##Splat, node);                 \
2277  }
2278SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2279#undef SIMD_VISIT_SPLAT
2280
2281#define SIMD_VISIT_EXTRACT_LANE(Type)                              \
2282  void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2283    VisitRRI(this, kArm##Type##ExtractLane, node);                 \
2284  }
2285SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
2286#undef SIMD_VISIT_EXTRACT_LANE
2287
2288#define SIMD_VISIT_REPLACE_LANE(Type)                              \
2289  void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2290    VisitRRIR(this, kArm##Type##ReplaceLane, node);                \
2291  }
2292SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2293#undef SIMD_VISIT_REPLACE_LANE
2294
2295#define SIMD_VISIT_UNOP(Name)                         \
2296  void InstructionSelector::Visit##Name(Node* node) { \
2297    VisitRR(this, kArm##Name, node);                  \
2298  }
2299SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2300#undef SIMD_VISIT_UNOP
2301
2302#define SIMD_VISIT_BINOP(Name)                        \
2303  void InstructionSelector::Visit##Name(Node* node) { \
2304    VisitRRR(this, kArm##Name, node);                 \
2305  }
2306SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2307#undef SIMD_VISIT_BINOP
2308
2309#define SIMD_VISIT_SHIFT_OP(Name)                     \
2310  void InstructionSelector::Visit##Name(Node* node) { \
2311    VisitRRI(this, kArm##Name, node);                 \
2312  }
2313SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2314#undef SIMD_VISIT_SHIFT_OP
2315
2316#define SIMD_VISIT_SELECT_OP(format)                                \
2317  void InstructionSelector::VisitSimd##format##Select(Node* node) { \
2318    VisitRRRR(this, kArmSimd##format##Select, node);                \
2319  }
2320SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
2321#undef SIMD_VISIT_SELECT_OP
2322
2323// static
2324MachineOperatorBuilder::Flags
2325InstructionSelector::SupportedMachineOperatorFlags() {
2326  MachineOperatorBuilder::Flags flags;
2327  if (CpuFeatures::IsSupported(SUDIV)) {
2328    // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
2329    // but the fall-back implementation does not.
2330    flags |= MachineOperatorBuilder::kInt32DivIsSafe |
2331             MachineOperatorBuilder::kUint32DivIsSafe;
2332  }
2333  if (CpuFeatures::IsSupported(ARMv7)) {
2334    flags |= MachineOperatorBuilder::kWord32ReverseBits;
2335  }
2336  if (CpuFeatures::IsSupported(ARMv8)) {
2337    flags |= MachineOperatorBuilder::kFloat32RoundDown |
2338             MachineOperatorBuilder::kFloat64RoundDown |
2339             MachineOperatorBuilder::kFloat32RoundUp |
2340             MachineOperatorBuilder::kFloat64RoundUp |
2341             MachineOperatorBuilder::kFloat32RoundTruncate |
2342             MachineOperatorBuilder::kFloat64RoundTruncate |
2343             MachineOperatorBuilder::kFloat64RoundTiesAway |
2344             MachineOperatorBuilder::kFloat32RoundTiesEven |
2345             MachineOperatorBuilder::kFloat64RoundTiesEven;
2346  }
2347  return flags;
2348}
2349
2350// static
2351MachineOperatorBuilder::AlignmentRequirements
2352InstructionSelector::AlignmentRequirements() {
2353  Vector<MachineType> req_aligned = Vector<MachineType>::New(2);
2354  req_aligned[0] = MachineType::Float32();
2355  req_aligned[1] = MachineType::Float64();
2356  return MachineOperatorBuilder::AlignmentRequirements::
2357      SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
2358}
2359
2360}  // namespace compiler
2361}  // namespace internal
2362}  // namespace v8
2363