instruction-selector.h revision 537ba893e2530051ec7f296e769fdd37bb4ae4a0
1// Copyright 2014 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_ 6#define V8_COMPILER_INSTRUCTION_SELECTOR_H_ 7 8#include <map> 9 10#include "src/compiler/common-operator.h" 11#include "src/compiler/instruction.h" 12#include "src/compiler/instruction-scheduler.h" 13#include "src/compiler/machine-operator.h" 14#include "src/compiler/node.h" 15#include "src/zone-containers.h" 16 17namespace v8 { 18namespace internal { 19namespace compiler { 20 21// Forward declarations. 22class BasicBlock; 23struct CallBuffer; // TODO(bmeurer): Remove this. 24class FlagsContinuation; 25class Linkage; 26class OperandGenerator; 27struct SwitchInfo; 28 29// This struct connects nodes of parameters which are going to be pushed on the 30// call stack with their parameter index in the call descriptor of the callee. 31class PushParameter { 32 public: 33 PushParameter() : node_(nullptr), type_(MachineType::None()) {} 34 PushParameter(Node* node, MachineType type) : node_(node), type_(type) {} 35 36 Node* node() const { return node_; } 37 MachineType type() const { return type_; } 38 39 private: 40 Node* node_; 41 MachineType type_; 42}; 43 44// Instruction selection generates an InstructionSequence for a given Schedule. 45class InstructionSelector final { 46 public: 47 // Forward declarations. 48 class Features; 49 50 enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions }; 51 52 InstructionSelector( 53 Zone* zone, size_t node_count, Linkage* linkage, 54 InstructionSequence* sequence, Schedule* schedule, 55 SourcePositionTable* source_positions, Frame* frame, 56 SourcePositionMode source_position_mode = kCallSourcePositions, 57 Features features = SupportedFeatures()); 58 59 // Visit code for the entire graph with the included schedule. 60 void SelectInstructions(); 61 62 void StartBlock(RpoNumber rpo); 63 void EndBlock(RpoNumber rpo); 64 void AddInstruction(Instruction* instr); 65 66 // =========================================================================== 67 // ============= Architecture-independent code emission methods. ============= 68 // =========================================================================== 69 70 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 71 size_t temp_count = 0, InstructionOperand* temps = nullptr); 72 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 73 InstructionOperand a, size_t temp_count = 0, 74 InstructionOperand* temps = nullptr); 75 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 76 InstructionOperand a, InstructionOperand b, 77 size_t temp_count = 0, InstructionOperand* temps = nullptr); 78 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 79 InstructionOperand a, InstructionOperand b, 80 InstructionOperand c, size_t temp_count = 0, 81 InstructionOperand* temps = nullptr); 82 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 83 InstructionOperand a, InstructionOperand b, 84 InstructionOperand c, InstructionOperand d, 85 size_t temp_count = 0, InstructionOperand* temps = nullptr); 86 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 87 InstructionOperand a, InstructionOperand b, 88 InstructionOperand c, InstructionOperand d, 89 InstructionOperand e, size_t temp_count = 0, 90 InstructionOperand* temps = nullptr); 91 Instruction* Emit(InstructionCode opcode, InstructionOperand output, 92 InstructionOperand a, InstructionOperand b, 93 InstructionOperand c, InstructionOperand d, 94 InstructionOperand e, InstructionOperand f, 95 size_t temp_count = 0, InstructionOperand* temps = nullptr); 96 Instruction* Emit(InstructionCode opcode, size_t output_count, 97 InstructionOperand* outputs, size_t input_count, 98 InstructionOperand* inputs, size_t temp_count = 0, 99 InstructionOperand* temps = nullptr); 100 Instruction* Emit(Instruction* instr); 101 102 // =========================================================================== 103 // ===== Architecture-independent deoptimization exit emission methods. ====== 104 // =========================================================================== 105 106 Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output, 107 InstructionOperand a, InstructionOperand b, 108 Node* frame_state); 109 Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count, 110 InstructionOperand* outputs, size_t input_count, 111 InstructionOperand* inputs, Node* frame_state); 112 113 // =========================================================================== 114 // ============== Architecture-independent CPU feature methods. ============== 115 // =========================================================================== 116 117 class Features final { 118 public: 119 Features() : bits_(0) {} 120 explicit Features(unsigned bits) : bits_(bits) {} 121 explicit Features(CpuFeature f) : bits_(1u << f) {} 122 Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {} 123 124 bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); } 125 126 private: 127 unsigned bits_; 128 }; 129 130 bool IsSupported(CpuFeature feature) const { 131 return features_.Contains(feature); 132 } 133 134 // Returns the features supported on the target platform. 135 static Features SupportedFeatures() { 136 return Features(CpuFeatures::SupportedFeatures()); 137 } 138 139 // TODO(sigurds) This should take a CpuFeatures argument. 140 static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags(); 141 142 // =========================================================================== 143 // ============ Architecture-independent graph covering methods. ============= 144 // =========================================================================== 145 146 // Used in pattern matching during code generation. 147 // Check if {node} can be covered while generating code for the current 148 // instruction. A node can be covered if the {user} of the node has the only 149 // edge and the two are in the same basic block. 150 bool CanCover(Node* user, Node* node) const; 151 152 // Checks if {node} was already defined, and therefore code was already 153 // generated for it. 154 bool IsDefined(Node* node) const; 155 156 // Checks if {node} has any uses, and therefore code has to be generated for 157 // it. 158 bool IsUsed(Node* node) const; 159 160 // Checks if {node} is currently live. 161 bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); } 162 163 // Gets the effect level of {node}. 164 int GetEffectLevel(Node* node) const; 165 166 int GetVirtualRegister(const Node* node); 167 const std::map<NodeId, int> GetVirtualRegistersForTesting() const; 168 169 Isolate* isolate() const { return sequence()->isolate(); } 170 171 private: 172 friend class OperandGenerator; 173 174 void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand); 175 void EmitLookupSwitch(const SwitchInfo& sw, 176 InstructionOperand& value_operand); 177 178 // Inform the instruction selection that {node} was just defined. 179 void MarkAsDefined(Node* node); 180 181 // Inform the instruction selection that {node} has at least one use and we 182 // will need to generate code for it. 183 void MarkAsUsed(Node* node); 184 185 // Sets the effect level of {node}. 186 void SetEffectLevel(Node* node, int effect_level); 187 188 // Inform the register allocation of the representation of the value produced 189 // by {node}. 190 void MarkAsRepresentation(MachineRepresentation rep, Node* node); 191 void MarkAsWord32(Node* node) { 192 MarkAsRepresentation(MachineRepresentation::kWord32, node); 193 } 194 void MarkAsWord64(Node* node) { 195 MarkAsRepresentation(MachineRepresentation::kWord64, node); 196 } 197 void MarkAsFloat32(Node* node) { 198 MarkAsRepresentation(MachineRepresentation::kFloat32, node); 199 } 200 void MarkAsFloat64(Node* node) { 201 MarkAsRepresentation(MachineRepresentation::kFloat64, node); 202 } 203 void MarkAsReference(Node* node) { 204 MarkAsRepresentation(MachineRepresentation::kTagged, node); 205 } 206 207 // Inform the register allocation of the representation of the unallocated 208 // operand {op}. 209 void MarkAsRepresentation(MachineRepresentation rep, 210 const InstructionOperand& op); 211 212 enum CallBufferFlag { 213 kCallCodeImmediate = 1u << 0, 214 kCallAddressImmediate = 1u << 1, 215 kCallTail = 1u << 2 216 }; 217 typedef base::Flags<CallBufferFlag> CallBufferFlags; 218 219 // Initialize the call buffer with the InstructionOperands, nodes, etc, 220 // corresponding 221 // to the inputs and outputs of the call. 222 // {call_code_immediate} to generate immediate operands to calls of code. 223 // {call_address_immediate} to generate immediate operands to address calls. 224 void InitializeCallBuffer(Node* call, CallBuffer* buffer, 225 CallBufferFlags flags, int stack_param_delta = 0); 226 bool IsTailCallAddressImmediate(); 227 int GetTempsCountForTailCallFromJSFunction(); 228 229 FrameStateDescriptor* GetFrameStateDescriptor(Node* node); 230 231 // =========================================================================== 232 // ============= Architecture-specific graph covering methods. =============== 233 // =========================================================================== 234 235 // Visit nodes in the given block and generate code. 236 void VisitBlock(BasicBlock* block); 237 238 // Visit the node for the control flow at the end of the block, generating 239 // code if necessary. 240 void VisitControl(BasicBlock* block); 241 242 // Visit the node and generate code, if any. 243 void VisitNode(Node* node); 244 245#define DECLARE_GENERATOR(x) void Visit##x(Node* node); 246 MACHINE_OP_LIST(DECLARE_GENERATOR) 247#undef DECLARE_GENERATOR 248 249 void VisitFinishRegion(Node* node); 250 void VisitParameter(Node* node); 251 void VisitIfException(Node* node); 252 void VisitOsrValue(Node* node); 253 void VisitPhi(Node* node); 254 void VisitProjection(Node* node); 255 void VisitConstant(Node* node); 256 void VisitCall(Node* call, BasicBlock* handler = nullptr); 257 void VisitDeoptimizeIf(Node* node); 258 void VisitDeoptimizeUnless(Node* node); 259 void VisitTailCall(Node* call); 260 void VisitGoto(BasicBlock* target); 261 void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch); 262 void VisitSwitch(Node* node, const SwitchInfo& sw); 263 void VisitDeoptimize(DeoptimizeKind kind, Node* value); 264 void VisitReturn(Node* ret); 265 void VisitThrow(Node* value); 266 267 void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments, 268 const CallDescriptor* descriptor, Node* node); 269 270 // =========================================================================== 271 272 Schedule* schedule() const { return schedule_; } 273 Linkage* linkage() const { return linkage_; } 274 InstructionSequence* sequence() const { return sequence_; } 275 Zone* instruction_zone() const { return sequence()->zone(); } 276 Zone* zone() const { return zone_; } 277 278 // =========================================================================== 279 280 Zone* const zone_; 281 Linkage* const linkage_; 282 InstructionSequence* const sequence_; 283 SourcePositionTable* const source_positions_; 284 SourcePositionMode const source_position_mode_; 285 Features features_; 286 Schedule* const schedule_; 287 BasicBlock* current_block_; 288 ZoneVector<Instruction*> instructions_; 289 BoolVector defined_; 290 BoolVector used_; 291 IntVector effect_level_; 292 IntVector virtual_registers_; 293 InstructionScheduler* scheduler_; 294 Frame* frame_; 295}; 296 297} // namespace compiler 298} // namespace internal 299} // namespace v8 300 301#endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_ 302