1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-selector-impl.h"
6#include "src/compiler/node-matchers.h"
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12enum ImmediateMode {
13  kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
14  kShift32Imm,     // 0 - 31
15  kShift64Imm,     // 0 - 63
16  kLogical32Imm,
17  kLogical64Imm,
18  kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
19  kLoadStoreImm16,
20  kLoadStoreImm32,
21  kLoadStoreImm64,
22  kNoImmediate
23};
24
25
26// Adds Arm64-specific methods for generating operands.
27class Arm64OperandGenerator FINAL : public OperandGenerator {
28 public:
29  explicit Arm64OperandGenerator(InstructionSelector* selector)
30      : OperandGenerator(selector) {}
31
32  InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
33    if (CanBeImmediate(node, mode)) {
34      return UseImmediate(node);
35    }
36    return UseRegister(node);
37  }
38
39  bool CanBeImmediate(Node* node, ImmediateMode mode) {
40    int64_t value;
41    if (node->opcode() == IrOpcode::kInt32Constant)
42      value = OpParameter<int32_t>(node);
43    else if (node->opcode() == IrOpcode::kInt64Constant)
44      value = OpParameter<int64_t>(node);
45    else
46      return false;
47    unsigned ignored;
48    switch (mode) {
49      case kLogical32Imm:
50        // TODO(dcarney): some unencodable values can be handled by
51        // switching instructions.
52        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
53                                       &ignored, &ignored, &ignored);
54      case kLogical64Imm:
55        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
56                                       &ignored, &ignored, &ignored);
57      case kArithmeticImm:
58        // TODO(dcarney): -values can be handled by instruction swapping
59        return Assembler::IsImmAddSub(value);
60      case kShift32Imm:
61        return 0 <= value && value < 32;
62      case kShift64Imm:
63        return 0 <= value && value < 64;
64      case kLoadStoreImm8:
65        return IsLoadStoreImmediate(value, LSByte);
66      case kLoadStoreImm16:
67        return IsLoadStoreImmediate(value, LSHalfword);
68      case kLoadStoreImm32:
69        return IsLoadStoreImmediate(value, LSWord);
70      case kLoadStoreImm64:
71        return IsLoadStoreImmediate(value, LSDoubleWord);
72      case kNoImmediate:
73        return false;
74    }
75    return false;
76  }
77
78 private:
79  bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
80    return Assembler::IsImmLSScaled(value, size) ||
81           Assembler::IsImmLSUnscaled(value);
82  }
83};
84
85
86static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
87                     Node* node) {
88  Arm64OperandGenerator g(selector);
89  selector->Emit(opcode, g.DefineAsRegister(node),
90                 g.UseRegister(node->InputAt(0)),
91                 g.UseRegister(node->InputAt(1)));
92}
93
94
95static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
96                            Node* node) {
97  Arm64OperandGenerator g(selector);
98  selector->Emit(opcode, g.DefineAsRegister(node),
99                 g.UseRegister(node->InputAt(0)),
100                 g.UseRegister(node->InputAt(1)));
101}
102
103
104static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
105                     Node* node, ImmediateMode operand_mode) {
106  Arm64OperandGenerator g(selector);
107  selector->Emit(opcode, g.DefineAsRegister(node),
108                 g.UseRegister(node->InputAt(0)),
109                 g.UseOperand(node->InputAt(1), operand_mode));
110}
111
112
113// Shared routine for multiple binary operations.
114template <typename Matcher>
115static void VisitBinop(InstructionSelector* selector, Node* node,
116                       InstructionCode opcode, ImmediateMode operand_mode,
117                       FlagsContinuation* cont) {
118  Arm64OperandGenerator g(selector);
119  Matcher m(node);
120  InstructionOperand* inputs[4];
121  size_t input_count = 0;
122  InstructionOperand* outputs[2];
123  size_t output_count = 0;
124
125  inputs[input_count++] = g.UseRegister(m.left().node());
126  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
127
128  if (cont->IsBranch()) {
129    inputs[input_count++] = g.Label(cont->true_block());
130    inputs[input_count++] = g.Label(cont->false_block());
131  }
132
133  outputs[output_count++] = g.DefineAsRegister(node);
134  if (cont->IsSet()) {
135    outputs[output_count++] = g.DefineAsRegister(cont->result());
136  }
137
138  DCHECK_NE(0, input_count);
139  DCHECK_NE(0, output_count);
140  DCHECK_GE(arraysize(inputs), input_count);
141  DCHECK_GE(arraysize(outputs), output_count);
142
143  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
144                                      outputs, input_count, inputs);
145  if (cont->IsBranch()) instr->MarkAsControl();
146}
147
148
149// Shared routine for multiple binary operations.
150template <typename Matcher>
151static void VisitBinop(InstructionSelector* selector, Node* node,
152                       ArchOpcode opcode, ImmediateMode operand_mode) {
153  FlagsContinuation cont;
154  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
155}
156
157
158void InstructionSelector::VisitLoad(Node* node) {
159  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
160  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
161  Arm64OperandGenerator g(this);
162  Node* base = node->InputAt(0);
163  Node* index = node->InputAt(1);
164  ArchOpcode opcode;
165  ImmediateMode immediate_mode = kNoImmediate;
166  switch (rep) {
167    case kRepFloat32:
168      opcode = kArm64LdrS;
169      immediate_mode = kLoadStoreImm32;
170      break;
171    case kRepFloat64:
172      opcode = kArm64LdrD;
173      immediate_mode = kLoadStoreImm64;
174      break;
175    case kRepBit:  // Fall through.
176    case kRepWord8:
177      opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
178      immediate_mode = kLoadStoreImm8;
179      break;
180    case kRepWord16:
181      opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
182      immediate_mode = kLoadStoreImm16;
183      break;
184    case kRepWord32:
185      opcode = kArm64LdrW;
186      immediate_mode = kLoadStoreImm32;
187      break;
188    case kRepTagged:  // Fall through.
189    case kRepWord64:
190      opcode = kArm64Ldr;
191      immediate_mode = kLoadStoreImm64;
192      break;
193    default:
194      UNREACHABLE();
195      return;
196  }
197  if (g.CanBeImmediate(index, immediate_mode)) {
198    Emit(opcode | AddressingModeField::encode(kMode_MRI),
199         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
200  } else {
201    Emit(opcode | AddressingModeField::encode(kMode_MRR),
202         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
203  }
204}
205
206
207void InstructionSelector::VisitStore(Node* node) {
208  Arm64OperandGenerator g(this);
209  Node* base = node->InputAt(0);
210  Node* index = node->InputAt(1);
211  Node* value = node->InputAt(2);
212
213  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
214  MachineType rep = RepresentationOf(store_rep.machine_type());
215  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
216    DCHECK(rep == kRepTagged);
217    // TODO(dcarney): refactor RecordWrite function to take temp registers
218    //                and pass them here instead of using fixed regs
219    // TODO(dcarney): handle immediate indices.
220    InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
221    Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
222         g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
223         temps);
224    return;
225  }
226  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
227  ArchOpcode opcode;
228  ImmediateMode immediate_mode = kNoImmediate;
229  switch (rep) {
230    case kRepFloat32:
231      opcode = kArm64StrS;
232      immediate_mode = kLoadStoreImm32;
233      break;
234    case kRepFloat64:
235      opcode = kArm64StrD;
236      immediate_mode = kLoadStoreImm64;
237      break;
238    case kRepBit:  // Fall through.
239    case kRepWord8:
240      opcode = kArm64Strb;
241      immediate_mode = kLoadStoreImm8;
242      break;
243    case kRepWord16:
244      opcode = kArm64Strh;
245      immediate_mode = kLoadStoreImm16;
246      break;
247    case kRepWord32:
248      opcode = kArm64StrW;
249      immediate_mode = kLoadStoreImm32;
250      break;
251    case kRepTagged:  // Fall through.
252    case kRepWord64:
253      opcode = kArm64Str;
254      immediate_mode = kLoadStoreImm64;
255      break;
256    default:
257      UNREACHABLE();
258      return;
259  }
260  if (g.CanBeImmediate(index, immediate_mode)) {
261    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
262         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
263  } else {
264    Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
265         g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
266  }
267}
268
269
270void InstructionSelector::VisitWord32And(Node* node) {
271  VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
272}
273
274
275void InstructionSelector::VisitWord64And(Node* node) {
276  VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
277}
278
279
280void InstructionSelector::VisitWord32Or(Node* node) {
281  VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
282}
283
284
285void InstructionSelector::VisitWord64Or(Node* node) {
286  VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
287}
288
289
290void InstructionSelector::VisitWord32Xor(Node* node) {
291  Arm64OperandGenerator g(this);
292  Int32BinopMatcher m(node);
293  if (m.right().Is(-1)) {
294    Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
295  } else {
296    VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
297  }
298}
299
300
301void InstructionSelector::VisitWord64Xor(Node* node) {
302  Arm64OperandGenerator g(this);
303  Int64BinopMatcher m(node);
304  if (m.right().Is(-1)) {
305    Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
306  } else {
307    VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
308  }
309}
310
311
312void InstructionSelector::VisitWord32Shl(Node* node) {
313  VisitRRO(this, kArm64Shl32, node, kShift32Imm);
314}
315
316
317void InstructionSelector::VisitWord64Shl(Node* node) {
318  VisitRRO(this, kArm64Shl, node, kShift64Imm);
319}
320
321
322void InstructionSelector::VisitWord32Shr(Node* node) {
323  VisitRRO(this, kArm64Shr32, node, kShift32Imm);
324}
325
326
327void InstructionSelector::VisitWord64Shr(Node* node) {
328  VisitRRO(this, kArm64Shr, node, kShift64Imm);
329}
330
331
332void InstructionSelector::VisitWord32Sar(Node* node) {
333  VisitRRO(this, kArm64Sar32, node, kShift32Imm);
334}
335
336
337void InstructionSelector::VisitWord64Sar(Node* node) {
338  VisitRRO(this, kArm64Sar, node, kShift64Imm);
339}
340
341
342void InstructionSelector::VisitWord32Ror(Node* node) {
343  VisitRRO(this, kArm64Ror32, node, kShift32Imm);
344}
345
346
347void InstructionSelector::VisitWord64Ror(Node* node) {
348  VisitRRO(this, kArm64Ror, node, kShift64Imm);
349}
350
351
352void InstructionSelector::VisitInt32Add(Node* node) {
353  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
354}
355
356
357void InstructionSelector::VisitInt64Add(Node* node) {
358  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
359}
360
361
362void InstructionSelector::VisitInt32Sub(Node* node) {
363  Arm64OperandGenerator g(this);
364  Int32BinopMatcher m(node);
365  if (m.left().Is(0)) {
366    Emit(kArm64Neg32, g.DefineAsRegister(node),
367         g.UseRegister(m.right().node()));
368  } else {
369    VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
370  }
371}
372
373
374void InstructionSelector::VisitInt64Sub(Node* node) {
375  Arm64OperandGenerator g(this);
376  Int64BinopMatcher m(node);
377  if (m.left().Is(0)) {
378    Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
379  } else {
380    VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
381  }
382}
383
384
385void InstructionSelector::VisitInt32Mul(Node* node) {
386  VisitRRR(this, kArm64Mul32, node);
387}
388
389
390void InstructionSelector::VisitInt64Mul(Node* node) {
391  VisitRRR(this, kArm64Mul, node);
392}
393
394
395void InstructionSelector::VisitInt32Div(Node* node) {
396  VisitRRR(this, kArm64Idiv32, node);
397}
398
399
400void InstructionSelector::VisitInt64Div(Node* node) {
401  VisitRRR(this, kArm64Idiv, node);
402}
403
404
405void InstructionSelector::VisitInt32UDiv(Node* node) {
406  VisitRRR(this, kArm64Udiv32, node);
407}
408
409
410void InstructionSelector::VisitInt64UDiv(Node* node) {
411  VisitRRR(this, kArm64Udiv, node);
412}
413
414
415void InstructionSelector::VisitInt32Mod(Node* node) {
416  VisitRRR(this, kArm64Imod32, node);
417}
418
419
420void InstructionSelector::VisitInt64Mod(Node* node) {
421  VisitRRR(this, kArm64Imod, node);
422}
423
424
425void InstructionSelector::VisitInt32UMod(Node* node) {
426  VisitRRR(this, kArm64Umod32, node);
427}
428
429
430void InstructionSelector::VisitInt64UMod(Node* node) {
431  VisitRRR(this, kArm64Umod, node);
432}
433
434
435void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
436  Arm64OperandGenerator g(this);
437  Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
438       g.UseRegister(node->InputAt(0)));
439}
440
441
442void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
443  Arm64OperandGenerator g(this);
444  Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
445       g.UseRegister(node->InputAt(0)));
446}
447
448
449void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
450  Arm64OperandGenerator g(this);
451  Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
452       g.UseRegister(node->InputAt(0)));
453}
454
455
456void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
457  Arm64OperandGenerator g(this);
458  Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
459       g.UseRegister(node->InputAt(0)));
460}
461
462
463void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
464  Arm64OperandGenerator g(this);
465  Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
466}
467
468
469void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
470  Arm64OperandGenerator g(this);
471  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
472}
473
474
475void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
476  Arm64OperandGenerator g(this);
477  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
478}
479
480
481void InstructionSelector::VisitFloat64Add(Node* node) {
482  VisitRRRFloat64(this, kArm64Float64Add, node);
483}
484
485
486void InstructionSelector::VisitFloat64Sub(Node* node) {
487  VisitRRRFloat64(this, kArm64Float64Sub, node);
488}
489
490
491void InstructionSelector::VisitFloat64Mul(Node* node) {
492  VisitRRRFloat64(this, kArm64Float64Mul, node);
493}
494
495
496void InstructionSelector::VisitFloat64Div(Node* node) {
497  VisitRRRFloat64(this, kArm64Float64Div, node);
498}
499
500
501void InstructionSelector::VisitFloat64Mod(Node* node) {
502  Arm64OperandGenerator g(this);
503  Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
504       g.UseFixed(node->InputAt(0), d0),
505       g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
506}
507
508
509void InstructionSelector::VisitFloat64Sqrt(Node* node) {
510  Arm64OperandGenerator g(this);
511  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
512       g.UseRegister(node->InputAt(0)));
513}
514
515
516void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
517                                                    FlagsContinuation* cont) {
518  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
519}
520
521
522void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
523                                                    FlagsContinuation* cont) {
524  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
525}
526
527
528// Shared routine for multiple compare operations.
529static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
530                         InstructionOperand* left, InstructionOperand* right,
531                         FlagsContinuation* cont) {
532  Arm64OperandGenerator g(selector);
533  opcode = cont->Encode(opcode);
534  if (cont->IsBranch()) {
535    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
536                   g.Label(cont->false_block()))->MarkAsControl();
537  } else {
538    DCHECK(cont->IsSet());
539    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
540  }
541}
542
543
544// Shared routine for multiple word compare operations.
545static void VisitWordCompare(InstructionSelector* selector, Node* node,
546                             InstructionCode opcode, FlagsContinuation* cont,
547                             bool commutative) {
548  Arm64OperandGenerator g(selector);
549  Node* left = node->InputAt(0);
550  Node* right = node->InputAt(1);
551
552  // Match immediates on left or right side of comparison.
553  if (g.CanBeImmediate(right, kArithmeticImm)) {
554    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
555                 cont);
556  } else if (g.CanBeImmediate(left, kArithmeticImm)) {
557    if (!commutative) cont->Commute();
558    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
559                 cont);
560  } else {
561    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
562                 cont);
563  }
564}
565
566
567void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
568  switch (node->opcode()) {
569    case IrOpcode::kInt32Add:
570      return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
571    case IrOpcode::kInt32Sub:
572      return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
573    case IrOpcode::kWord32And:
574      return VisitWordCompare(this, node, kArm64Tst32, cont, true);
575    default:
576      break;
577  }
578
579  Arm64OperandGenerator g(this);
580  VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
581               cont);
582}
583
584
585void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
586  switch (node->opcode()) {
587    case IrOpcode::kWord64And:
588      return VisitWordCompare(this, node, kArm64Tst, cont, true);
589    default:
590      break;
591  }
592
593  Arm64OperandGenerator g(this);
594  VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
595}
596
597
598void InstructionSelector::VisitWord32Compare(Node* node,
599                                             FlagsContinuation* cont) {
600  VisitWordCompare(this, node, kArm64Cmp32, cont, false);
601}
602
603
604void InstructionSelector::VisitWord64Compare(Node* node,
605                                             FlagsContinuation* cont) {
606  VisitWordCompare(this, node, kArm64Cmp, cont, false);
607}
608
609
610void InstructionSelector::VisitFloat64Compare(Node* node,
611                                              FlagsContinuation* cont) {
612  Arm64OperandGenerator g(this);
613  Node* left = node->InputAt(0);
614  Node* right = node->InputAt(1);
615  VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
616               g.UseRegister(right), cont);
617}
618
619
620void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
621                                    BasicBlock* deoptimization) {
622  Arm64OperandGenerator g(this);
623  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
624
625  FrameStateDescriptor* frame_state_descriptor = NULL;
626  if (descriptor->NeedsFrameState()) {
627    frame_state_descriptor =
628        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
629  }
630
631  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
632
633  // Compute InstructionOperands for inputs and outputs.
634  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
635  // register if there are multiple uses of it. Improve constant pool and the
636  // heuristics in the register allocator for where to emit constants.
637  InitializeCallBuffer(call, &buffer, true, false);
638
639  // Push the arguments to the stack.
640  bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
641  int aligned_push_count = buffer.pushed_nodes.size();
642  // TODO(dcarney): claim and poke probably take small immediates,
643  //                loop here or whatever.
644  // Bump the stack pointer(s).
645  if (aligned_push_count > 0) {
646    // TODO(dcarney): it would be better to bump the csp here only
647    //                and emit paired stores with increment for non c frames.
648    Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
649  }
650  // Move arguments to the stack.
651  {
652    int slot = buffer.pushed_nodes.size() - 1;
653    // Emit the uneven pushes.
654    if (pushed_count_uneven) {
655      Node* input = buffer.pushed_nodes[slot];
656      Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
657      slot--;
658    }
659    // Now all pushes can be done in pairs.
660    for (; slot >= 0; slot -= 2) {
661      Emit(kArm64PokePair | MiscField::encode(slot), NULL,
662           g.UseRegister(buffer.pushed_nodes[slot]),
663           g.UseRegister(buffer.pushed_nodes[slot - 1]));
664    }
665  }
666
667  // Select the appropriate opcode based on the call type.
668  InstructionCode opcode;
669  switch (descriptor->kind()) {
670    case CallDescriptor::kCallCodeObject: {
671      opcode = kArchCallCodeObject;
672      break;
673    }
674    case CallDescriptor::kCallJSFunction:
675      opcode = kArchCallJSFunction;
676      break;
677    default:
678      UNREACHABLE();
679      return;
680  }
681  opcode |= MiscField::encode(descriptor->flags());
682
683  // Emit the call instruction.
684  Instruction* call_instr =
685      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
686           buffer.instruction_args.size(), &buffer.instruction_args.front());
687
688  call_instr->MarkAsCall();
689  if (deoptimization != NULL) {
690    DCHECK(continuation != NULL);
691    call_instr->MarkAsControl();
692  }
693}
694
695}  // namespace compiler
696}  // namespace internal
697}  // namespace v8
698