1// Copyright 2013 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "src/v8.h" 6 7#include "src/arm64/delayed-masm-arm64-inl.h" 8#include "src/arm64/lithium-codegen-arm64.h" 9#include "src/arm64/lithium-gap-resolver-arm64.h" 10 11namespace v8 { 12namespace internal { 13 14#define __ ACCESS_MASM((&masm_)) 15 16 17void DelayedGapMasm::EndDelayedUse() { 18 DelayedMasm::EndDelayedUse(); 19 if (scratch_register_used()) { 20 DCHECK(ScratchRegister().Is(root)); 21 DCHECK(!pending()); 22 InitializeRootRegister(); 23 reset_scratch_register_used(); 24 } 25} 26 27 28LGapResolver::LGapResolver(LCodeGen* owner) 29 : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()), 30 root_index_(0), in_cycle_(false), saved_destination_(NULL) { 31} 32 33 34void LGapResolver::Resolve(LParallelMove* parallel_move) { 35 DCHECK(moves_.is_empty()); 36 DCHECK(!masm_.pending()); 37 38 // Build up a worklist of moves. 39 BuildInitialMoveList(parallel_move); 40 41 for (int i = 0; i < moves_.length(); ++i) { 42 LMoveOperands move = moves_[i]; 43 44 // Skip constants to perform them last. They don't block other moves 45 // and skipping such moves with register destinations keeps those 46 // registers free for the whole algorithm. 47 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { 48 root_index_ = i; // Any cycle is found when we reach this move again. 49 PerformMove(i); 50 if (in_cycle_) RestoreValue(); 51 } 52 } 53 54 // Perform the moves with constant sources. 55 for (int i = 0; i < moves_.length(); ++i) { 56 LMoveOperands move = moves_[i]; 57 58 if (!move.IsEliminated()) { 59 DCHECK(move.source()->IsConstantOperand()); 60 EmitMove(i); 61 } 62 } 63 64 __ EndDelayedUse(); 65 66 moves_.Rewind(0); 67} 68 69 70void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { 71 // Perform a linear sweep of the moves to add them to the initial list of 72 // moves to perform, ignoring any move that is redundant (the source is 73 // the same as the destination, the destination is ignored and 74 // unallocated, or the move was already eliminated). 75 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); 76 for (int i = 0; i < moves->length(); ++i) { 77 LMoveOperands move = moves->at(i); 78 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); 79 } 80 Verify(); 81} 82 83 84void LGapResolver::PerformMove(int index) { 85 // Each call to this function performs a move and deletes it from the move 86 // graph. We first recursively perform any move blocking this one. We 87 // mark a move as "pending" on entry to PerformMove in order to detect 88 // cycles in the move graph. 89 LMoveOperands& current_move = moves_[index]; 90 91 DCHECK(!current_move.IsPending()); 92 DCHECK(!current_move.IsRedundant()); 93 94 // Clear this move's destination to indicate a pending move. The actual 95 // destination is saved in a stack allocated local. Multiple moves can 96 // be pending because this function is recursive. 97 DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated. 98 LOperand* destination = current_move.destination(); 99 current_move.set_destination(NULL); 100 101 // Perform a depth-first traversal of the move graph to resolve 102 // dependencies. Any unperformed, unpending move with a source the same 103 // as this one's destination blocks this one so recursively perform all 104 // such moves. 105 for (int i = 0; i < moves_.length(); ++i) { 106 LMoveOperands other_move = moves_[i]; 107 if (other_move.Blocks(destination) && !other_move.IsPending()) { 108 PerformMove(i); 109 // If there is a blocking, pending move it must be moves_[root_index_] 110 // and all other moves with the same source as moves_[root_index_] are 111 // sucessfully executed (because they are cycle-free) by this loop. 112 } 113 } 114 115 // We are about to resolve this move and don't need it marked as 116 // pending, so restore its destination. 117 current_move.set_destination(destination); 118 119 // The move may be blocked on a pending move, which must be the starting move. 120 // In this case, we have a cycle, and we save the source of this move to 121 // a scratch register to break it. 122 LMoveOperands other_move = moves_[root_index_]; 123 if (other_move.Blocks(destination)) { 124 DCHECK(other_move.IsPending()); 125 BreakCycle(index); 126 return; 127 } 128 129 // This move is no longer blocked. 130 EmitMove(index); 131} 132 133 134void LGapResolver::Verify() { 135#ifdef ENABLE_SLOW_DCHECKS 136 // No operand should be the destination for more than one move. 137 for (int i = 0; i < moves_.length(); ++i) { 138 LOperand* destination = moves_[i].destination(); 139 for (int j = i + 1; j < moves_.length(); ++j) { 140 SLOW_DCHECK(!destination->Equals(moves_[j].destination())); 141 } 142 } 143#endif 144} 145 146 147void LGapResolver::BreakCycle(int index) { 148 DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source())); 149 DCHECK(!in_cycle_); 150 151 // We save in a register the source of that move and we remember its 152 // destination. Then we mark this move as resolved so the cycle is 153 // broken and we can perform the other moves. 154 in_cycle_ = true; 155 LOperand* source = moves_[index].source(); 156 saved_destination_ = moves_[index].destination(); 157 158 if (source->IsRegister()) { 159 AcquireSavedValueRegister(); 160 __ Mov(SavedValueRegister(), cgen_->ToRegister(source)); 161 } else if (source->IsStackSlot()) { 162 AcquireSavedValueRegister(); 163 __ Load(SavedValueRegister(), cgen_->ToMemOperand(source)); 164 } else if (source->IsDoubleRegister()) { 165 __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source)); 166 } else if (source->IsDoubleStackSlot()) { 167 __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source)); 168 } else { 169 UNREACHABLE(); 170 } 171 172 // Mark this move as resolved. 173 // This move will be actually performed by moving the saved value to this 174 // move's destination in LGapResolver::RestoreValue(). 175 moves_[index].Eliminate(); 176} 177 178 179void LGapResolver::RestoreValue() { 180 DCHECK(in_cycle_); 181 DCHECK(saved_destination_ != NULL); 182 183 if (saved_destination_->IsRegister()) { 184 __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister()); 185 ReleaseSavedValueRegister(); 186 } else if (saved_destination_->IsStackSlot()) { 187 __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_)); 188 ReleaseSavedValueRegister(); 189 } else if (saved_destination_->IsDoubleRegister()) { 190 __ Fmov(cgen_->ToDoubleRegister(saved_destination_), 191 SavedFPValueRegister()); 192 } else if (saved_destination_->IsDoubleStackSlot()) { 193 __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_)); 194 } else { 195 UNREACHABLE(); 196 } 197 198 in_cycle_ = false; 199 saved_destination_ = NULL; 200} 201 202 203void LGapResolver::EmitMove(int index) { 204 LOperand* source = moves_[index].source(); 205 LOperand* destination = moves_[index].destination(); 206 207 // Dispatch on the source and destination operand kinds. Not all 208 // combinations are possible. 209 210 if (source->IsRegister()) { 211 Register source_register = cgen_->ToRegister(source); 212 if (destination->IsRegister()) { 213 __ Mov(cgen_->ToRegister(destination), source_register); 214 } else { 215 DCHECK(destination->IsStackSlot()); 216 __ Store(source_register, cgen_->ToMemOperand(destination)); 217 } 218 219 } else if (source->IsStackSlot()) { 220 MemOperand source_operand = cgen_->ToMemOperand(source); 221 if (destination->IsRegister()) { 222 __ Load(cgen_->ToRegister(destination), source_operand); 223 } else { 224 DCHECK(destination->IsStackSlot()); 225 EmitStackSlotMove(index); 226 } 227 228 } else if (source->IsConstantOperand()) { 229 LConstantOperand* constant_source = LConstantOperand::cast(source); 230 if (destination->IsRegister()) { 231 Register dst = cgen_->ToRegister(destination); 232 if (cgen_->IsSmi(constant_source)) { 233 __ Mov(dst, cgen_->ToSmi(constant_source)); 234 } else if (cgen_->IsInteger32Constant(constant_source)) { 235 __ Mov(dst, cgen_->ToInteger32(constant_source)); 236 } else { 237 __ LoadObject(dst, cgen_->ToHandle(constant_source)); 238 } 239 } else if (destination->IsDoubleRegister()) { 240 DoubleRegister result = cgen_->ToDoubleRegister(destination); 241 __ Fmov(result, cgen_->ToDouble(constant_source)); 242 } else { 243 DCHECK(destination->IsStackSlot()); 244 DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone. 245 if (cgen_->IsSmi(constant_source)) { 246 Smi* smi = cgen_->ToSmi(constant_source); 247 __ StoreConstant(reinterpret_cast<intptr_t>(smi), 248 cgen_->ToMemOperand(destination)); 249 } else if (cgen_->IsInteger32Constant(constant_source)) { 250 __ StoreConstant(cgen_->ToInteger32(constant_source), 251 cgen_->ToMemOperand(destination)); 252 } else { 253 Handle<Object> handle = cgen_->ToHandle(constant_source); 254 AllowDeferredHandleDereference smi_object_check; 255 if (handle->IsSmi()) { 256 Object* obj = *handle; 257 DCHECK(!obj->IsHeapObject()); 258 __ StoreConstant(reinterpret_cast<intptr_t>(obj), 259 cgen_->ToMemOperand(destination)); 260 } else { 261 AcquireSavedValueRegister(); 262 __ LoadObject(SavedValueRegister(), handle); 263 __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination)); 264 ReleaseSavedValueRegister(); 265 } 266 } 267 } 268 269 } else if (source->IsDoubleRegister()) { 270 DoubleRegister src = cgen_->ToDoubleRegister(source); 271 if (destination->IsDoubleRegister()) { 272 __ Fmov(cgen_->ToDoubleRegister(destination), src); 273 } else { 274 DCHECK(destination->IsDoubleStackSlot()); 275 __ Store(src, cgen_->ToMemOperand(destination)); 276 } 277 278 } else if (source->IsDoubleStackSlot()) { 279 MemOperand src = cgen_->ToMemOperand(source); 280 if (destination->IsDoubleRegister()) { 281 __ Load(cgen_->ToDoubleRegister(destination), src); 282 } else { 283 DCHECK(destination->IsDoubleStackSlot()); 284 EmitStackSlotMove(index); 285 } 286 287 } else { 288 UNREACHABLE(); 289 } 290 291 // The move has been emitted, we can eliminate it. 292 moves_[index].Eliminate(); 293} 294 295} } // namespace v8::internal 296