1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/instruction-scheduler.h"
6
7namespace v8 {
8namespace internal {
9namespace compiler {
10
11bool InstructionScheduler::SchedulerSupported() { return true; }
12
13
14int InstructionScheduler::GetTargetInstructionFlags(
15    const Instruction* instr) const {
16  switch (instr->arch_opcode()) {
17    case kX64Add:
18    case kX64Add32:
19    case kX64And:
20    case kX64And32:
21    case kX64Cmp:
22    case kX64Cmp32:
23    case kX64Cmp16:
24    case kX64Cmp8:
25    case kX64Test:
26    case kX64Test32:
27    case kX64Test16:
28    case kX64Test8:
29    case kX64Or:
30    case kX64Or32:
31    case kX64Xor:
32    case kX64Xor32:
33    case kX64Sub:
34    case kX64Sub32:
35    case kX64Imul:
36    case kX64Imul32:
37    case kX64ImulHigh32:
38    case kX64UmulHigh32:
39    case kX64Not:
40    case kX64Not32:
41    case kX64Neg:
42    case kX64Neg32:
43    case kX64Shl:
44    case kX64Shl32:
45    case kX64Shr:
46    case kX64Shr32:
47    case kX64Sar:
48    case kX64Sar32:
49    case kX64Ror:
50    case kX64Ror32:
51    case kX64Lzcnt:
52    case kX64Lzcnt32:
53    case kX64Tzcnt:
54    case kX64Tzcnt32:
55    case kX64Popcnt:
56    case kX64Popcnt32:
57    case kSSEFloat32Cmp:
58    case kSSEFloat32Add:
59    case kSSEFloat32Sub:
60    case kSSEFloat32Mul:
61    case kSSEFloat32Div:
62    case kSSEFloat32Abs:
63    case kSSEFloat32Neg:
64    case kSSEFloat32Sqrt:
65    case kSSEFloat32Round:
66    case kSSEFloat32ToFloat64:
67    case kSSEFloat64Cmp:
68    case kSSEFloat64Add:
69    case kSSEFloat64Sub:
70    case kSSEFloat64Mul:
71    case kSSEFloat64Div:
72    case kSSEFloat64Mod:
73    case kSSEFloat64Abs:
74    case kSSEFloat64Neg:
75    case kSSEFloat64Sqrt:
76    case kSSEFloat64Round:
77    case kSSEFloat32Max:
78    case kSSEFloat64Max:
79    case kSSEFloat32Min:
80    case kSSEFloat64Min:
81    case kSSEFloat64ToFloat32:
82    case kSSEFloat32ToInt32:
83    case kSSEFloat32ToUint32:
84    case kSSEFloat64ToInt32:
85    case kSSEFloat64ToUint32:
86    case kSSEFloat64ToInt64:
87    case kSSEFloat32ToInt64:
88    case kSSEFloat64ToUint64:
89    case kSSEFloat32ToUint64:
90    case kSSEInt32ToFloat64:
91    case kSSEInt32ToFloat32:
92    case kSSEInt64ToFloat32:
93    case kSSEInt64ToFloat64:
94    case kSSEUint64ToFloat32:
95    case kSSEUint64ToFloat64:
96    case kSSEUint32ToFloat64:
97    case kSSEUint32ToFloat32:
98    case kSSEFloat64ExtractLowWord32:
99    case kSSEFloat64ExtractHighWord32:
100    case kSSEFloat64InsertLowWord32:
101    case kSSEFloat64InsertHighWord32:
102    case kSSEFloat64LoadLowWord32:
103    case kSSEFloat64SilenceNaN:
104    case kAVXFloat32Cmp:
105    case kAVXFloat32Add:
106    case kAVXFloat32Sub:
107    case kAVXFloat32Mul:
108    case kAVXFloat32Div:
109    case kAVXFloat64Cmp:
110    case kAVXFloat64Add:
111    case kAVXFloat64Sub:
112    case kAVXFloat64Mul:
113    case kAVXFloat64Div:
114    case kAVXFloat64Abs:
115    case kAVXFloat64Neg:
116    case kAVXFloat32Abs:
117    case kAVXFloat32Neg:
118    case kX64BitcastFI:
119    case kX64BitcastDL:
120    case kX64BitcastIF:
121    case kX64BitcastLD:
122    case kX64Lea32:
123    case kX64Lea:
124    case kX64Dec32:
125    case kX64Inc32:
126    case kX64Int32x4Create:
127    case kX64Int32x4ExtractLane:
128    case kX64Int32x4ReplaceLane:
129    case kX64Int32x4Add:
130    case kX64Int32x4Sub:
131      return (instr->addressing_mode() == kMode_None)
132          ? kNoOpcodeFlags
133          : kIsLoadOperation | kHasSideEffect;
134
135    case kX64Idiv:
136    case kX64Idiv32:
137    case kX64Udiv:
138    case kX64Udiv32:
139      return (instr->addressing_mode() == kMode_None)
140                 ? kMayNeedDeoptCheck
141                 : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
142
143    case kX64Movsxbl:
144    case kX64Movzxbl:
145    case kX64Movsxbq:
146    case kX64Movzxbq:
147    case kX64Movsxwl:
148    case kX64Movzxwl:
149    case kX64Movsxwq:
150    case kX64Movzxwq:
151    case kX64Movsxlq:
152      DCHECK(instr->InputCount() >= 1);
153      return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
154                                             : kIsLoadOperation;
155
156    case kX64Movb:
157    case kX64Movw:
158      return kHasSideEffect;
159
160    case kX64Movl:
161      if (instr->HasOutput()) {
162        DCHECK(instr->InputCount() >= 1);
163        return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
164                                               : kIsLoadOperation;
165      } else {
166        return kHasSideEffect;
167      }
168
169    case kX64Movq:
170    case kX64Movsd:
171    case kX64Movss:
172      return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
173
174    case kX64StackCheck:
175      return kIsLoadOperation;
176
177    case kX64Push:
178    case kX64Poke:
179      return kHasSideEffect;
180
181    case kX64Xchgb:
182    case kX64Xchgw:
183    case kX64Xchgl:
184      return kIsLoadOperation | kHasSideEffect;
185
186#define CASE(Name) case k##Name:
187    COMMON_ARCH_OPCODE_LIST(CASE)
188#undef CASE
189      // Already covered in architecture independent code.
190      UNREACHABLE();
191  }
192
193  UNREACHABLE();
194  return kNoOpcodeFlags;
195}
196
197
198int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
199  // Basic latency modeling for x64 instructions. They have been determined
200  // in an empirical way.
201  switch (instr->arch_opcode()) {
202    case kCheckedLoadInt8:
203    case kCheckedLoadUint8:
204    case kCheckedLoadInt16:
205    case kCheckedLoadUint16:
206    case kCheckedLoadWord32:
207    case kCheckedLoadWord64:
208    case kCheckedLoadFloat32:
209    case kCheckedLoadFloat64:
210    case kCheckedStoreWord8:
211    case kCheckedStoreWord16:
212    case kCheckedStoreWord32:
213    case kCheckedStoreWord64:
214    case kCheckedStoreFloat32:
215    case kCheckedStoreFloat64:
216    case kSSEFloat64Mul:
217      return 5;
218    case kX64Imul:
219    case kX64Imul32:
220    case kX64ImulHigh32:
221    case kX64UmulHigh32:
222    case kSSEFloat32Cmp:
223    case kSSEFloat32Add:
224    case kSSEFloat32Sub:
225    case kSSEFloat32Abs:
226    case kSSEFloat32Neg:
227    case kSSEFloat64Cmp:
228    case kSSEFloat64Add:
229    case kSSEFloat64Sub:
230    case kSSEFloat64Max:
231    case kSSEFloat64Min:
232    case kSSEFloat64Abs:
233    case kSSEFloat64Neg:
234      return 3;
235    case kSSEFloat32Mul:
236    case kSSEFloat32ToFloat64:
237    case kSSEFloat64ToFloat32:
238    case kSSEFloat32Round:
239    case kSSEFloat64Round:
240    case kSSEFloat32ToInt32:
241    case kSSEFloat32ToUint32:
242    case kSSEFloat64ToInt32:
243    case kSSEFloat64ToUint32:
244      return 4;
245    case kX64Idiv:
246      return 49;
247    case kX64Idiv32:
248      return 35;
249    case kX64Udiv:
250      return 38;
251    case kX64Udiv32:
252      return 26;
253    case kSSEFloat32Div:
254    case kSSEFloat64Div:
255    case kSSEFloat32Sqrt:
256    case kSSEFloat64Sqrt:
257      return 13;
258    case kSSEFloat32ToInt64:
259    case kSSEFloat64ToInt64:
260    case kSSEFloat32ToUint64:
261    case kSSEFloat64ToUint64:
262      return 10;
263    case kSSEFloat64Mod:
264      return 50;
265    case kArchTruncateDoubleToI:
266      return 6;
267    default:
268      return 1;
269  }
270}
271
272}  // namespace compiler
273}  // namespace internal
274}  // namespace v8
275