1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-selector-impl.h"
6#include "src/compiler/node-matchers.h"
7#include "src/compiler/node-properties-inl.h"
8
9namespace v8 {
10namespace internal {
11namespace compiler {
12
13// Adds IA32-specific methods for generating operands.
14class IA32OperandGenerator FINAL : public OperandGenerator {
15 public:
16  explicit IA32OperandGenerator(InstructionSelector* selector)
17      : OperandGenerator(selector) {}
18
19  InstructionOperand* UseByteRegister(Node* node) {
20    // TODO(dcarney): relax constraint.
21    return UseFixed(node, edx);
22  }
23
24  bool CanBeImmediate(Node* node) {
25    switch (node->opcode()) {
26      case IrOpcode::kInt32Constant:
27      case IrOpcode::kNumberConstant:
28      case IrOpcode::kExternalConstant:
29        return true;
30      case IrOpcode::kHeapConstant: {
31        // Constants in new space cannot be used as immediates in V8 because
32        // the GC does not scan code objects when collecting the new generation.
33        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
34        return !isolate()->heap()->InNewSpace(*value.handle());
35      }
36      default:
37        return false;
38    }
39  }
40};
41
42
43void InstructionSelector::VisitLoad(Node* node) {
44  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
45  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
46  IA32OperandGenerator g(this);
47  Node* base = node->InputAt(0);
48  Node* index = node->InputAt(1);
49
50  ArchOpcode opcode;
51  // TODO(titzer): signed/unsigned small loads
52  switch (rep) {
53    case kRepFloat32:
54      opcode = kIA32Movss;
55      break;
56    case kRepFloat64:
57      opcode = kIA32Movsd;
58      break;
59    case kRepBit:  // Fall through.
60    case kRepWord8:
61      opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
62      break;
63    case kRepWord16:
64      opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
65      break;
66    case kRepTagged:  // Fall through.
67    case kRepWord32:
68      opcode = kIA32Movl;
69      break;
70    default:
71      UNREACHABLE();
72      return;
73  }
74  if (g.CanBeImmediate(base)) {
75    if (Int32Matcher(index).Is(0)) {  // load [#base + #0]
76      Emit(opcode | AddressingModeField::encode(kMode_MI),
77           g.DefineAsRegister(node), g.UseImmediate(base));
78    } else {  // load [#base + %index]
79      Emit(opcode | AddressingModeField::encode(kMode_MRI),
80           g.DefineAsRegister(node), g.UseRegister(index),
81           g.UseImmediate(base));
82    }
83  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
84    Emit(opcode | AddressingModeField::encode(kMode_MRI),
85         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
86  } else {  // load [%base + %index + K]
87    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
88         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
89  }
90  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
91}
92
93
94void InstructionSelector::VisitStore(Node* node) {
95  IA32OperandGenerator g(this);
96  Node* base = node->InputAt(0);
97  Node* index = node->InputAt(1);
98  Node* value = node->InputAt(2);
99
100  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
101  MachineType rep = RepresentationOf(store_rep.machine_type());
102  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
103    DCHECK_EQ(kRepTagged, rep);
104    // TODO(dcarney): refactor RecordWrite function to take temp registers
105    //                and pass them here instead of using fixed regs
106    // TODO(dcarney): handle immediate indices.
107    InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
108    Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
109         g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
110         temps);
111    return;
112  }
113  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
114  InstructionOperand* val;
115  if (g.CanBeImmediate(value)) {
116    val = g.UseImmediate(value);
117  } else if (rep == kRepWord8 || rep == kRepBit) {
118    val = g.UseByteRegister(value);
119  } else {
120    val = g.UseRegister(value);
121  }
122  ArchOpcode opcode;
123  switch (rep) {
124    case kRepFloat32:
125      opcode = kIA32Movss;
126      break;
127    case kRepFloat64:
128      opcode = kIA32Movsd;
129      break;
130    case kRepBit:  // Fall through.
131    case kRepWord8:
132      opcode = kIA32Movb;
133      break;
134    case kRepWord16:
135      opcode = kIA32Movw;
136      break;
137    case kRepTagged:  // Fall through.
138    case kRepWord32:
139      opcode = kIA32Movl;
140      break;
141    default:
142      UNREACHABLE();
143      return;
144  }
145  if (g.CanBeImmediate(base)) {
146    if (Int32Matcher(index).Is(0)) {  // store [#base], %|#value
147      Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
148           g.UseImmediate(base), val);
149    } else {  // store [#base + %index], %|#value
150      Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
151           g.UseRegister(index), g.UseImmediate(base), val);
152    }
153  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
154    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
155         g.UseRegister(base), g.UseImmediate(index), val);
156  } else {  // store [%base + %index], %|#value
157    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
158         g.UseRegister(base), g.UseRegister(index), val);
159  }
160  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
161}
162
163
164// Shared routine for multiple binary operations.
165static void VisitBinop(InstructionSelector* selector, Node* node,
166                       InstructionCode opcode, FlagsContinuation* cont) {
167  IA32OperandGenerator g(selector);
168  Int32BinopMatcher m(node);
169  InstructionOperand* inputs[4];
170  size_t input_count = 0;
171  InstructionOperand* outputs[2];
172  size_t output_count = 0;
173
174  // TODO(turbofan): match complex addressing modes.
175  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
176  // this might be the last use and therefore its register can be reused.
177  if (g.CanBeImmediate(m.right().node())) {
178    inputs[input_count++] = g.Use(m.left().node());
179    inputs[input_count++] = g.UseImmediate(m.right().node());
180  } else {
181    inputs[input_count++] = g.UseRegister(m.left().node());
182    inputs[input_count++] = g.Use(m.right().node());
183  }
184
185  if (cont->IsBranch()) {
186    inputs[input_count++] = g.Label(cont->true_block());
187    inputs[input_count++] = g.Label(cont->false_block());
188  }
189
190  outputs[output_count++] = g.DefineSameAsFirst(node);
191  if (cont->IsSet()) {
192    // TODO(turbofan): Use byte register here.
193    outputs[output_count++] = g.DefineAsRegister(cont->result());
194  }
195
196  DCHECK_NE(0, input_count);
197  DCHECK_NE(0, output_count);
198  DCHECK_GE(arraysize(inputs), input_count);
199  DCHECK_GE(arraysize(outputs), output_count);
200
201  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
202                                      outputs, input_count, inputs);
203  if (cont->IsBranch()) instr->MarkAsControl();
204}
205
206
207// Shared routine for multiple binary operations.
208static void VisitBinop(InstructionSelector* selector, Node* node,
209                       InstructionCode opcode) {
210  FlagsContinuation cont;
211  VisitBinop(selector, node, opcode, &cont);
212}
213
214
215void InstructionSelector::VisitWord32And(Node* node) {
216  VisitBinop(this, node, kIA32And);
217}
218
219
220void InstructionSelector::VisitWord32Or(Node* node) {
221  VisitBinop(this, node, kIA32Or);
222}
223
224
225void InstructionSelector::VisitWord32Xor(Node* node) {
226  IA32OperandGenerator g(this);
227  Int32BinopMatcher m(node);
228  if (m.right().Is(-1)) {
229    Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
230  } else {
231    VisitBinop(this, node, kIA32Xor);
232  }
233}
234
235
236// Shared routine for multiple shift operations.
237static inline void VisitShift(InstructionSelector* selector, Node* node,
238                              ArchOpcode opcode) {
239  IA32OperandGenerator g(selector);
240  Node* left = node->InputAt(0);
241  Node* right = node->InputAt(1);
242
243  // TODO(turbofan): assembler only supports some addressing modes for shifts.
244  if (g.CanBeImmediate(right)) {
245    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
246                   g.UseImmediate(right));
247  } else {
248    Int32BinopMatcher m(node);
249    if (m.right().IsWord32And()) {
250      Int32BinopMatcher mright(right);
251      if (mright.right().Is(0x1F)) {
252        right = mright.left().node();
253      }
254    }
255    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
256                   g.UseFixed(right, ecx));
257  }
258}
259
260
261void InstructionSelector::VisitWord32Shl(Node* node) {
262  VisitShift(this, node, kIA32Shl);
263}
264
265
266void InstructionSelector::VisitWord32Shr(Node* node) {
267  VisitShift(this, node, kIA32Shr);
268}
269
270
271void InstructionSelector::VisitWord32Sar(Node* node) {
272  VisitShift(this, node, kIA32Sar);
273}
274
275
276void InstructionSelector::VisitWord32Ror(Node* node) {
277  VisitShift(this, node, kIA32Ror);
278}
279
280
281void InstructionSelector::VisitInt32Add(Node* node) {
282  VisitBinop(this, node, kIA32Add);
283}
284
285
286void InstructionSelector::VisitInt32Sub(Node* node) {
287  IA32OperandGenerator g(this);
288  Int32BinopMatcher m(node);
289  if (m.left().Is(0)) {
290    Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
291  } else {
292    VisitBinop(this, node, kIA32Sub);
293  }
294}
295
296
297void InstructionSelector::VisitInt32Mul(Node* node) {
298  IA32OperandGenerator g(this);
299  Node* left = node->InputAt(0);
300  Node* right = node->InputAt(1);
301  if (g.CanBeImmediate(right)) {
302    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
303         g.UseImmediate(right));
304  } else if (g.CanBeImmediate(left)) {
305    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
306         g.UseImmediate(left));
307  } else {
308    // TODO(turbofan): select better left operand.
309    Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
310         g.Use(right));
311  }
312}
313
314
315static inline void VisitDiv(InstructionSelector* selector, Node* node,
316                            ArchOpcode opcode) {
317  IA32OperandGenerator g(selector);
318  InstructionOperand* temps[] = {g.TempRegister(edx)};
319  size_t temp_count = arraysize(temps);
320  selector->Emit(opcode, g.DefineAsFixed(node, eax),
321                 g.UseFixed(node->InputAt(0), eax),
322                 g.UseUnique(node->InputAt(1)), temp_count, temps);
323}
324
325
326void InstructionSelector::VisitInt32Div(Node* node) {
327  VisitDiv(this, node, kIA32Idiv);
328}
329
330
331void InstructionSelector::VisitInt32UDiv(Node* node) {
332  VisitDiv(this, node, kIA32Udiv);
333}
334
335
336static inline void VisitMod(InstructionSelector* selector, Node* node,
337                            ArchOpcode opcode) {
338  IA32OperandGenerator g(selector);
339  InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
340  size_t temp_count = arraysize(temps);
341  selector->Emit(opcode, g.DefineAsFixed(node, edx),
342                 g.UseFixed(node->InputAt(0), eax),
343                 g.UseUnique(node->InputAt(1)), temp_count, temps);
344}
345
346
347void InstructionSelector::VisitInt32Mod(Node* node) {
348  VisitMod(this, node, kIA32Idiv);
349}
350
351
352void InstructionSelector::VisitInt32UMod(Node* node) {
353  VisitMod(this, node, kIA32Udiv);
354}
355
356
357void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
358  IA32OperandGenerator g(this);
359  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
360}
361
362
363void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
364  IA32OperandGenerator g(this);
365  // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
366  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
367       g.UseRegister(node->InputAt(0)));
368}
369
370
371void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
372  IA32OperandGenerator g(this);
373  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
374}
375
376
377void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
378  IA32OperandGenerator g(this);
379  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
380}
381
382
383void InstructionSelector::VisitFloat64Add(Node* node) {
384  IA32OperandGenerator g(this);
385  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
386       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
387}
388
389
390void InstructionSelector::VisitFloat64Sub(Node* node) {
391  IA32OperandGenerator g(this);
392  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
393       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
394}
395
396
397void InstructionSelector::VisitFloat64Mul(Node* node) {
398  IA32OperandGenerator g(this);
399  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
400       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
401}
402
403
404void InstructionSelector::VisitFloat64Div(Node* node) {
405  IA32OperandGenerator g(this);
406  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
407       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
408}
409
410
411void InstructionSelector::VisitFloat64Mod(Node* node) {
412  IA32OperandGenerator g(this);
413  InstructionOperand* temps[] = {g.TempRegister(eax)};
414  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
415       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
416       temps);
417}
418
419
420void InstructionSelector::VisitFloat64Sqrt(Node* node) {
421  IA32OperandGenerator g(this);
422  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
423}
424
425
426void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
427                                                    FlagsContinuation* cont) {
428  VisitBinop(this, node, kIA32Add, cont);
429}
430
431
432void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
433                                                    FlagsContinuation* cont) {
434  VisitBinop(this, node, kIA32Sub, cont);
435}
436
437
438// Shared routine for multiple compare operations.
439static inline void VisitCompare(InstructionSelector* selector,
440                                InstructionCode opcode,
441                                InstructionOperand* left,
442                                InstructionOperand* right,
443                                FlagsContinuation* cont) {
444  IA32OperandGenerator g(selector);
445  if (cont->IsBranch()) {
446    selector->Emit(cont->Encode(opcode), NULL, left, right,
447                   g.Label(cont->true_block()),
448                   g.Label(cont->false_block()))->MarkAsControl();
449  } else {
450    DCHECK(cont->IsSet());
451    // TODO(titzer): Needs byte register.
452    selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
453                   left, right);
454  }
455}
456
457
458// Shared routine for multiple word compare operations.
459static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
460                                    InstructionCode opcode,
461                                    FlagsContinuation* cont, bool commutative) {
462  IA32OperandGenerator g(selector);
463  Node* left = node->InputAt(0);
464  Node* right = node->InputAt(1);
465
466  // Match immediates on left or right side of comparison.
467  if (g.CanBeImmediate(right)) {
468    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
469  } else if (g.CanBeImmediate(left)) {
470    if (!commutative) cont->Commute();
471    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
472  } else {
473    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
474  }
475}
476
477
478void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
479  switch (node->opcode()) {
480    case IrOpcode::kInt32Sub:
481      return VisitWordCompare(this, node, kIA32Cmp, cont, false);
482    case IrOpcode::kWord32And:
483      return VisitWordCompare(this, node, kIA32Test, cont, true);
484    default:
485      break;
486  }
487
488  IA32OperandGenerator g(this);
489  VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
490}
491
492
493void InstructionSelector::VisitWord32Compare(Node* node,
494                                             FlagsContinuation* cont) {
495  VisitWordCompare(this, node, kIA32Cmp, cont, false);
496}
497
498
499void InstructionSelector::VisitFloat64Compare(Node* node,
500                                              FlagsContinuation* cont) {
501  IA32OperandGenerator g(this);
502  Node* left = node->InputAt(0);
503  Node* right = node->InputAt(1);
504  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
505}
506
507
508void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
509                                    BasicBlock* deoptimization) {
510  IA32OperandGenerator g(this);
511  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
512
513  FrameStateDescriptor* frame_state_descriptor = NULL;
514
515  if (descriptor->NeedsFrameState()) {
516    frame_state_descriptor =
517        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
518  }
519
520  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
521
522  // Compute InstructionOperands for inputs and outputs.
523  InitializeCallBuffer(call, &buffer, true, true);
524
525  // Push any stack arguments.
526  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
527       input != buffer.pushed_nodes.rend(); input++) {
528    // TODO(titzer): handle pushing double parameters.
529    Emit(kIA32Push, NULL,
530         g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
531  }
532
533  // Select the appropriate opcode based on the call type.
534  InstructionCode opcode;
535  switch (descriptor->kind()) {
536    case CallDescriptor::kCallCodeObject: {
537      opcode = kArchCallCodeObject;
538      break;
539    }
540    case CallDescriptor::kCallJSFunction:
541      opcode = kArchCallJSFunction;
542      break;
543    default:
544      UNREACHABLE();
545      return;
546  }
547  opcode |= MiscField::encode(descriptor->flags());
548
549  // Emit the call instruction.
550  Instruction* call_instr =
551      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
552           buffer.instruction_args.size(), &buffer.instruction_args.front());
553
554  call_instr->MarkAsCall();
555  if (deoptimization != NULL) {
556    DCHECK(continuation != NULL);
557    call_instr->MarkAsControl();
558  }
559}
560
561}  // namespace compiler
562}  // namespace internal
563}  // namespace v8
564