register_allocator.cc revision 46fbaab1bf2981f2768b046abf43e368663daacd
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "register_allocator.h"
18
19#include <sstream>
20
21#include "base/bit_vector-inl.h"
22#include "code_generator.h"
23#include "ssa_liveness_analysis.h"
24
25namespace art {
26
27static constexpr size_t kMaxLifetimePosition = -1;
28static constexpr size_t kDefaultNumberOfSpillSlots = 4;
29
30RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
31                                     CodeGenerator* codegen,
32                                     const SsaLivenessAnalysis& liveness)
33      : allocator_(allocator),
34        codegen_(codegen),
35        liveness_(liveness),
36        unhandled_core_intervals_(allocator, 0),
37        unhandled_fp_intervals_(allocator, 0),
38        unhandled_(nullptr),
39        handled_(allocator, 0),
40        active_(allocator, 0),
41        inactive_(allocator, 0),
42        physical_core_register_intervals_(allocator, codegen->GetNumberOfCoreRegisters()),
43        physical_fp_register_intervals_(allocator, codegen->GetNumberOfFloatingPointRegisters()),
44        temp_intervals_(allocator, 4),
45        spill_slots_(allocator, kDefaultNumberOfSpillSlots),
46        safepoints_(allocator, 0),
47        processing_core_registers_(false),
48        number_of_registers_(-1),
49        registers_array_(nullptr),
50        blocked_core_registers_(codegen->GetBlockedCoreRegisters()),
51        blocked_fp_registers_(codegen->GetBlockedFloatingPointRegisters()),
52        reserved_out_slots_(0),
53        maximum_number_of_live_registers_(0) {
54  codegen->SetupBlockedRegisters();
55  physical_core_register_intervals_.SetSize(codegen->GetNumberOfCoreRegisters());
56  physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters());
57  // Always reserve for the current method and the graph's max out registers.
58  // TODO: compute it instead.
59  reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
60}
61
62bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph,
63                                                InstructionSet instruction_set) {
64  if (!Supports(instruction_set)) {
65    return false;
66  }
67  for (size_t i = 0, e = graph.GetBlocks().Size(); i < e; ++i) {
68    for (HInstructionIterator it(graph.GetBlocks().Get(i)->GetInstructions());
69         !it.Done();
70         it.Advance()) {
71      HInstruction* current = it.Current();
72      if (current->GetType() == Primitive::kPrimLong && instruction_set != kX86_64) return false;
73      if ((current->GetType() == Primitive::kPrimFloat || current->GetType() == Primitive::kPrimDouble)
74          && instruction_set != kX86_64) {
75        return false;
76      }
77    }
78  }
79  return true;
80}
81
82static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
83  if (interval == nullptr) return false;
84  bool is_core_register = (interval->GetType() != Primitive::kPrimDouble)
85      && (interval->GetType() != Primitive::kPrimFloat);
86  return processing_core_registers == is_core_register;
87}
88
89void RegisterAllocator::AllocateRegisters() {
90  AllocateRegistersInternal();
91  Resolve();
92
93  if (kIsDebugBuild) {
94    processing_core_registers_ = true;
95    ValidateInternal(true);
96    processing_core_registers_ = false;
97    ValidateInternal(true);
98  }
99}
100
101void RegisterAllocator::BlockRegister(Location location,
102                                      size_t start,
103                                      size_t end) {
104  int reg = location.reg();
105  DCHECK(location.IsRegister() || location.IsFpuRegister());
106  LiveInterval* interval = location.IsRegister()
107      ? physical_core_register_intervals_.Get(reg)
108      : physical_fp_register_intervals_.Get(reg);
109  Primitive::Type type = location.IsRegister()
110      ? Primitive::kPrimInt
111      : Primitive::kPrimDouble;
112  if (interval == nullptr) {
113    interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
114    if (location.IsRegister()) {
115      physical_core_register_intervals_.Put(reg, interval);
116    } else {
117      physical_fp_register_intervals_.Put(reg, interval);
118    }
119  }
120  DCHECK(interval->GetRegister() == reg);
121  interval->AddRange(start, end);
122}
123
124void RegisterAllocator::AllocateRegistersInternal() {
125  // Iterate post-order, to ensure the list is sorted, and the last added interval
126  // is the one with the lowest start position.
127  for (HLinearPostOrderIterator it(liveness_); !it.Done(); it.Advance()) {
128    HBasicBlock* block = it.Current();
129    for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
130         back_it.Advance()) {
131      ProcessInstruction(back_it.Current());
132    }
133    for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
134      ProcessInstruction(inst_it.Current());
135    }
136  }
137
138  number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
139  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
140  processing_core_registers_ = true;
141  unhandled_ = &unhandled_core_intervals_;
142  for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
143    LiveInterval* fixed = physical_core_register_intervals_.Get(i);
144    if (fixed != nullptr) {
145      // Fixed interval is added to inactive_ instead of unhandled_.
146      // It's also the only type of inactive interval whose start position
147      // can be after the current interval during linear scan.
148      // Fixed interval is never split and never moves to unhandled_.
149      inactive_.Add(fixed);
150    }
151  }
152  LinearScan();
153
154  size_t saved_maximum_number_of_live_registers = maximum_number_of_live_registers_;
155  maximum_number_of_live_registers_ = 0;
156
157  inactive_.Reset();
158  active_.Reset();
159  handled_.Reset();
160
161  number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
162  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
163  processing_core_registers_ = false;
164  unhandled_ = &unhandled_fp_intervals_;
165  for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
166    LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
167    if (fixed != nullptr) {
168      // Fixed interval is added to inactive_ instead of unhandled_.
169      // It's also the only type of inactive interval whose start position
170      // can be after the current interval during linear scan.
171      // Fixed interval is never split and never moves to unhandled_.
172      inactive_.Add(fixed);
173    }
174  }
175  LinearScan();
176  maximum_number_of_live_registers_ += saved_maximum_number_of_live_registers;
177}
178
179void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
180  LocationSummary* locations = instruction->GetLocations();
181  size_t position = instruction->GetLifetimePosition();
182
183  if (locations == nullptr) return;
184
185  // Create synthesized intervals for temporaries.
186  for (size_t i = 0; i < locations->GetTempCount(); ++i) {
187    Location temp = locations->GetTemp(i);
188    if (temp.IsRegister() || temp.IsFpuRegister()) {
189      BlockRegister(temp, position, position + 1);
190    } else {
191      DCHECK(temp.IsUnallocated());
192      DCHECK(temp.GetPolicy() == Location::kRequiresRegister);
193      LiveInterval* interval = LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
194      temp_intervals_.Add(interval);
195      interval->AddRange(position, position + 1);
196      unhandled_core_intervals_.Add(interval);
197    }
198  }
199
200  bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
201      && (instruction->GetType() != Primitive::kPrimFloat);
202
203  if (locations->CanCall()) {
204    if (!instruction->IsSuspendCheck()) {
205      codegen_->MarkNotLeaf();
206    }
207    safepoints_.Add(instruction);
208    if (locations->OnlyCallsOnSlowPath()) {
209      // We add a synthesized range at this position to record the live registers
210      // at this position. Ideally, we could just update the safepoints when locations
211      // are updated, but we currently need to know the full stack size before updating
212      // locations (because of parameters and the fact that we don't have a frame pointer).
213      // And knowing the full stack size requires to know the maximum number of live
214      // registers at calls in slow paths.
215      // By adding the following interval in the algorithm, we can compute this
216      // maximum before updating locations.
217      LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
218      // The start of the interval must be after the position of the safepoint, so that
219      // we can just check the number of active registers at that position. Note that this
220      // will include the current interval in the computation of
221      // `maximum_number_of_live_registers`, so we need a better strategy if this becomes
222      // a problem.
223      // TODO: We could put the logic in AddSorted, to ensure the safepoint range is
224      // after all other intervals starting at that same position.
225      interval->AddRange(position + 1, position + 2);
226      AddSorted(&unhandled_core_intervals_, interval);
227      AddSorted(&unhandled_fp_intervals_, interval);
228    }
229  }
230
231  if (locations->WillCall()) {
232    // Block all registers.
233    for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
234      BlockRegister(Location::RegisterLocation(i),
235                    position,
236                    position + 1);
237    }
238    for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
239      BlockRegister(Location::FpuRegisterLocation(i),
240                    position,
241                    position + 1);
242    }
243  }
244
245  for (size_t i = 0; i < instruction->InputCount(); ++i) {
246    Location input = locations->InAt(i);
247    if (input.IsRegister() || input.IsFpuRegister()) {
248      BlockRegister(input, position, position + 1);
249    }
250  }
251
252  LiveInterval* current = instruction->GetLiveInterval();
253  if (current == nullptr) return;
254
255  GrowableArray<LiveInterval*>& unhandled = core_register
256      ? unhandled_core_intervals_
257      : unhandled_fp_intervals_;
258
259  DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
260
261  // Some instructions define their output in fixed register/stack slot. We need
262  // to ensure we know these locations before doing register allocation. For a
263  // given register, we create an interval that covers these locations. The register
264  // will be unavailable at these locations when trying to allocate one for an
265  // interval.
266  //
267  // The backwards walking ensures the ranges are ordered on increasing start positions.
268  Location output = locations->Out();
269  if (output.IsUnallocated() && output.GetPolicy() == Location::kSameAsFirstInput) {
270    Location first = locations->InAt(0);
271    if (first.IsRegister() || first.IsFpuRegister()) {
272      current->SetFrom(position + 1);
273      current->SetRegister(first.reg());
274    }
275  } else if (output.IsRegister() || output.IsFpuRegister()) {
276    // Shift the interval's start by one to account for the blocked register.
277    current->SetFrom(position + 1);
278    current->SetRegister(output.reg());
279    BlockRegister(output, position, position + 1);
280  } else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
281    current->SetSpillSlot(output.GetStackIndex());
282  }
283
284  // If needed, add interval to the list of unhandled intervals.
285  if (current->HasSpillSlot() || instruction->IsConstant()) {
286    // Split just before first register use.
287    size_t first_register_use = current->FirstRegisterUse();
288    if (first_register_use != kNoLifetime) {
289      LiveInterval* split = Split(current, first_register_use - 1);
290      // Don't add directly to `unhandled`, it needs to be sorted and the start
291      // of this new interval might be after intervals already in the list.
292      AddSorted(&unhandled, split);
293    } else {
294      // Nothing to do, we won't allocate a register for this value.
295    }
296  } else {
297    // Don't add directly to `unhandled`, temp or safepoint intervals
298    // for this instruction may have been added, and those can be
299    // processed first.
300    AddSorted(&unhandled, current);
301  }
302}
303
304class AllRangesIterator : public ValueObject {
305 public:
306  explicit AllRangesIterator(LiveInterval* interval)
307      : current_interval_(interval),
308        current_range_(interval->GetFirstRange()) {}
309
310  bool Done() const { return current_interval_ == nullptr; }
311  LiveRange* CurrentRange() const { return current_range_; }
312  LiveInterval* CurrentInterval() const { return current_interval_; }
313
314  void Advance() {
315    current_range_ = current_range_->GetNext();
316    if (current_range_ == nullptr) {
317      current_interval_ = current_interval_->GetNextSibling();
318      if (current_interval_ != nullptr) {
319        current_range_ = current_interval_->GetFirstRange();
320      }
321    }
322  }
323
324 private:
325  LiveInterval* current_interval_;
326  LiveRange* current_range_;
327
328  DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
329};
330
331bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const {
332  // To simplify unit testing, we eagerly create the array of intervals, and
333  // call the helper method.
334  GrowableArray<LiveInterval*> intervals(allocator_, 0);
335  for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
336    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
337    if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
338      intervals.Add(instruction->GetLiveInterval());
339    }
340  }
341
342  if (processing_core_registers_) {
343    for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
344      LiveInterval* fixed = physical_core_register_intervals_.Get(i);
345      if (fixed != nullptr) {
346        intervals.Add(fixed);
347      }
348    }
349  } else {
350    for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
351      LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
352      if (fixed != nullptr) {
353        intervals.Add(fixed);
354      }
355    }
356  }
357
358  for (size_t i = 0, e = temp_intervals_.Size(); i < e; ++i) {
359    LiveInterval* temp = temp_intervals_.Get(i);
360    if (ShouldProcess(processing_core_registers_, temp)) {
361      intervals.Add(temp);
362    }
363  }
364
365  return ValidateIntervals(intervals, spill_slots_.Size(), reserved_out_slots_, *codegen_,
366                           allocator_, processing_core_registers_, log_fatal_on_failure);
367}
368
369bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& intervals,
370                                          size_t number_of_spill_slots,
371                                          size_t number_of_out_slots,
372                                          const CodeGenerator& codegen,
373                                          ArenaAllocator* allocator,
374                                          bool processing_core_registers,
375                                          bool log_fatal_on_failure) {
376  size_t number_of_registers = processing_core_registers
377      ? codegen.GetNumberOfCoreRegisters()
378      : codegen.GetNumberOfFloatingPointRegisters();
379  GrowableArray<ArenaBitVector*> liveness_of_values(
380      allocator, number_of_registers + number_of_spill_slots);
381
382  // Allocate a bit vector per register. A live interval that has a register
383  // allocated will populate the associated bit vector based on its live ranges.
384  for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
385    liveness_of_values.Add(new (allocator) ArenaBitVector(allocator, 0, true));
386  }
387
388  for (size_t i = 0, e = intervals.Size(); i < e; ++i) {
389    for (AllRangesIterator it(intervals.Get(i)); !it.Done(); it.Advance()) {
390      LiveInterval* current = it.CurrentInterval();
391      HInstruction* defined_by = current->GetParent()->GetDefinedBy();
392      if (current->GetParent()->HasSpillSlot()
393           // Parameters have their own stack slot.
394           && !(defined_by != nullptr && defined_by->IsParameterValue())) {
395        BitVector* liveness_of_spill_slot = liveness_of_values.Get(number_of_registers
396            + current->GetParent()->GetSpillSlot() / kVRegSize
397            - number_of_out_slots);
398        for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
399          if (liveness_of_spill_slot->IsBitSet(j)) {
400            if (log_fatal_on_failure) {
401              std::ostringstream message;
402              message << "Spill slot conflict at " << j;
403              LOG(FATAL) << message.str();
404            } else {
405              return false;
406            }
407          } else {
408            liveness_of_spill_slot->SetBit(j);
409          }
410        }
411      }
412
413      if (current->HasRegister()) {
414        BitVector* liveness_of_register = liveness_of_values.Get(current->GetRegister());
415        for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
416          if (liveness_of_register->IsBitSet(j)) {
417            if (log_fatal_on_failure) {
418              std::ostringstream message;
419              message << "Register conflict at " << j << " ";
420              if (defined_by != nullptr) {
421                message << "(" << defined_by->DebugName() << ")";
422              }
423              message << "for ";
424              if (processing_core_registers) {
425                codegen.DumpCoreRegister(message, current->GetRegister());
426              } else {
427                codegen.DumpFloatingPointRegister(message, current->GetRegister());
428              }
429              LOG(FATAL) << message.str();
430            } else {
431              return false;
432            }
433          } else {
434            liveness_of_register->SetBit(j);
435          }
436        }
437      }
438    }
439  }
440  return true;
441}
442
443void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
444  interval->Dump(stream);
445  stream << ": ";
446  if (interval->HasRegister()) {
447    if (interval->IsFloatingPoint()) {
448      codegen_->DumpFloatingPointRegister(stream, interval->GetRegister());
449    } else {
450      codegen_->DumpCoreRegister(stream, interval->GetRegister());
451    }
452  } else {
453    stream << "spilled";
454  }
455  stream << std::endl;
456}
457
458void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
459  stream << "inactive: " << std::endl;
460  for (size_t i = 0; i < inactive_.Size(); i ++) {
461    DumpInterval(stream, inactive_.Get(i));
462  }
463  stream << "active: " << std::endl;
464  for (size_t i = 0; i < active_.Size(); i ++) {
465    DumpInterval(stream, active_.Get(i));
466  }
467  stream << "unhandled: " << std::endl;
468  auto unhandled = (unhandled_ != nullptr) ?
469      unhandled_ : &unhandled_core_intervals_;
470  for (size_t i = 0; i < unhandled->Size(); i ++) {
471    DumpInterval(stream, unhandled->Get(i));
472  }
473  stream << "handled: " << std::endl;
474  for (size_t i = 0; i < handled_.Size(); i ++) {
475    DumpInterval(stream, handled_.Get(i));
476  }
477}
478
479// By the book implementation of a linear scan register allocator.
480void RegisterAllocator::LinearScan() {
481  while (!unhandled_->IsEmpty()) {
482    // (1) Remove interval with the lowest start position from unhandled.
483    LiveInterval* current = unhandled_->Pop();
484    DCHECK(!current->IsFixed() && !current->HasSpillSlot());
485    DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
486
487    if (current->IsSlowPathSafepoint()) {
488      // Synthesized interval to record the maximum number of live registers
489      // at safepoints. No need to allocate a register for it.
490      // We know that current actives are all live at the safepoint (modulo
491      // the one created by the safepoint).
492      maximum_number_of_live_registers_ =
493          std::max(maximum_number_of_live_registers_, active_.Size());
494      continue;
495    }
496
497    size_t position = current->GetStart();
498
499    // Remember the inactive_ size here since the ones moved to inactive_ from
500    // active_ below shouldn't need to be re-checked.
501    size_t inactive_intervals_to_handle = inactive_.Size();
502
503    // (2) Remove currently active intervals that are dead at this position.
504    //     Move active intervals that have a lifetime hole at this position
505    //     to inactive.
506    for (size_t i = 0; i < active_.Size(); ++i) {
507      LiveInterval* interval = active_.Get(i);
508      if (interval->IsDeadAt(position)) {
509        active_.Delete(interval);
510        --i;
511        handled_.Add(interval);
512      } else if (!interval->Covers(position)) {
513        active_.Delete(interval);
514        --i;
515        inactive_.Add(interval);
516      }
517    }
518
519    // (3) Remove currently inactive intervals that are dead at this position.
520    //     Move inactive intervals that cover this position to active.
521    for (size_t i = 0; i < inactive_intervals_to_handle; ++i) {
522      LiveInterval* interval = inactive_.Get(i);
523      DCHECK(interval->GetStart() < position || interval->IsFixed());
524      if (interval->IsDeadAt(position)) {
525        inactive_.Delete(interval);
526        --i;
527        --inactive_intervals_to_handle;
528        handled_.Add(interval);
529      } else if (interval->Covers(position)) {
530        inactive_.Delete(interval);
531        --i;
532        --inactive_intervals_to_handle;
533        active_.Add(interval);
534      }
535    }
536
537    // (4) Try to find an available register.
538    bool success = TryAllocateFreeReg(current);
539
540    // (5) If no register could be found, we need to spill.
541    if (!success) {
542      success = AllocateBlockedReg(current);
543    }
544
545    // (6) If the interval had a register allocated, add it to the list of active
546    //     intervals.
547    if (success) {
548      active_.Add(current);
549    }
550  }
551}
552
553// Find a free register. If multiple are found, pick the register that
554// is free the longest.
555bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
556  size_t* free_until = registers_array_;
557
558  // First set all registers to be free.
559  for (size_t i = 0; i < number_of_registers_; ++i) {
560    free_until[i] = kMaxLifetimePosition;
561  }
562
563  // For each active interval, set its register to not free.
564  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
565    LiveInterval* interval = active_.Get(i);
566    DCHECK(interval->HasRegister());
567    free_until[interval->GetRegister()] = 0;
568  }
569
570  // For each inactive interval, set its register to be free until
571  // the next intersection with `current`.
572  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
573    LiveInterval* inactive = inactive_.Get(i);
574    // Temp/Slow-path-safepoint interval has no holes.
575    DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
576    if (!current->IsSplit() && !inactive->IsFixed()) {
577      // Neither current nor inactive are fixed.
578      // Thanks to SSA, a non-split interval starting in a hole of an
579      // inactive interval should never intersect with that inactive interval.
580      // Only if it's not fixed though, because fixed intervals don't come from SSA.
581      DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
582      continue;
583    }
584
585    DCHECK(inactive->HasRegister());
586    if (free_until[inactive->GetRegister()] == 0) {
587      // Already used by some active interval. No need to intersect.
588      continue;
589    }
590    size_t next_intersection = inactive->FirstIntersectionWith(current);
591    if (next_intersection != kNoLifetime) {
592      free_until[inactive->GetRegister()] =
593          std::min(free_until[inactive->GetRegister()], next_intersection);
594    }
595  }
596
597  int reg = -1;
598  if (current->HasRegister()) {
599    // Some instructions have a fixed register output.
600    reg = current->GetRegister();
601    DCHECK_NE(free_until[reg], 0u);
602  } else {
603    int hint = current->FindFirstRegisterHint(free_until);
604    if (hint != kNoRegister) {
605      DCHECK(!IsBlocked(hint));
606      reg = hint;
607    } else {
608      // Pick the register that is free the longest.
609      for (size_t i = 0; i < number_of_registers_; ++i) {
610        if (IsBlocked(i)) continue;
611        if (reg == -1 || free_until[i] > free_until[reg]) {
612          reg = i;
613          if (free_until[i] == kMaxLifetimePosition) break;
614        }
615      }
616    }
617  }
618
619  // If we could not find a register, we need to spill.
620  if (reg == -1 || free_until[reg] == 0) {
621    return false;
622  }
623
624  current->SetRegister(reg);
625  if (!current->IsDeadAt(free_until[reg])) {
626    // If the register is only available for a subset of live ranges
627    // covered by `current`, split `current` at the position where
628    // the register is not available anymore.
629    LiveInterval* split = Split(current, free_until[reg]);
630    DCHECK(split != nullptr);
631    AddSorted(unhandled_, split);
632  }
633  return true;
634}
635
636bool RegisterAllocator::IsBlocked(int reg) const {
637  return processing_core_registers_
638      ? blocked_core_registers_[reg]
639      : blocked_fp_registers_[reg];
640}
641
642// Find the register that is used the last, and spill the interval
643// that holds it. If the first use of `current` is after that register
644// we spill `current` instead.
645bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
646  size_t first_register_use = current->FirstRegisterUse();
647  if (first_register_use == kNoLifetime) {
648    AllocateSpillSlotFor(current);
649    return false;
650  }
651
652  // First set all registers as not being used.
653  size_t* next_use = registers_array_;
654  for (size_t i = 0; i < number_of_registers_; ++i) {
655    next_use[i] = kMaxLifetimePosition;
656  }
657
658  // For each active interval, find the next use of its register after the
659  // start of current.
660  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
661    LiveInterval* active = active_.Get(i);
662    DCHECK(active->HasRegister());
663    if (active->IsFixed()) {
664      next_use[active->GetRegister()] = current->GetStart();
665    } else {
666      size_t use = active->FirstRegisterUseAfter(current->GetStart());
667      if (use != kNoLifetime) {
668        next_use[active->GetRegister()] = use;
669      }
670    }
671  }
672
673  // For each inactive interval, find the next use of its register after the
674  // start of current.
675  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
676    LiveInterval* inactive = inactive_.Get(i);
677    // Temp/Slow-path-safepoint interval has no holes.
678    DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
679    if (!current->IsSplit() && !inactive->IsFixed()) {
680      // Neither current nor inactive are fixed.
681      // Thanks to SSA, a non-split interval starting in a hole of an
682      // inactive interval should never intersect with that inactive interval.
683      // Only if it's not fixed though, because fixed intervals don't come from SSA.
684      DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
685      continue;
686    }
687    DCHECK(inactive->HasRegister());
688    size_t next_intersection = inactive->FirstIntersectionWith(current);
689    if (next_intersection != kNoLifetime) {
690      if (inactive->IsFixed()) {
691        next_use[inactive->GetRegister()] =
692            std::min(next_intersection, next_use[inactive->GetRegister()]);
693      } else {
694        size_t use = inactive->FirstRegisterUseAfter(current->GetStart());
695        if (use != kNoLifetime) {
696          next_use[inactive->GetRegister()] = std::min(use, next_use[inactive->GetRegister()]);
697        }
698      }
699    }
700  }
701
702  // Pick the register that is used the last.
703  int reg = -1;
704  for (size_t i = 0; i < number_of_registers_; ++i) {
705    if (IsBlocked(i)) continue;
706    if (reg == -1 || next_use[i] > next_use[reg]) {
707      reg = i;
708      if (next_use[i] == kMaxLifetimePosition) break;
709    }
710  }
711
712  if (first_register_use >= next_use[reg]) {
713    // If the first use of that instruction is after the last use of the found
714    // register, we split this interval just before its first register use.
715    AllocateSpillSlotFor(current);
716    LiveInterval* split = Split(current, first_register_use - 1);
717    DCHECK_NE(current, split) << "There is not enough registers available for "
718      << split->GetParent()->GetDefinedBy()->DebugName();
719    AddSorted(unhandled_, split);
720    return false;
721  } else {
722    // Use this register and spill the active and inactives interval that
723    // have that register.
724    current->SetRegister(reg);
725
726    for (size_t i = 0, e = active_.Size(); i < e; ++i) {
727      LiveInterval* active = active_.Get(i);
728      if (active->GetRegister() == reg) {
729        DCHECK(!active->IsFixed());
730        LiveInterval* split = Split(active, current->GetStart());
731        active_.DeleteAt(i);
732        handled_.Add(active);
733        AddSorted(unhandled_, split);
734        break;
735      }
736    }
737
738    for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
739      LiveInterval* inactive = inactive_.Get(i);
740      if (inactive->GetRegister() == reg) {
741        if (!current->IsSplit() && !inactive->IsFixed()) {
742          // Neither current nor inactive are fixed.
743          // Thanks to SSA, a non-split interval starting in a hole of an
744          // inactive interval should never intersect with that inactive interval.
745          // Only if it's not fixed though, because fixed intervals don't come from SSA.
746          DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
747          continue;
748        }
749        size_t next_intersection = inactive->FirstIntersectionWith(current);
750        if (next_intersection != kNoLifetime) {
751          if (inactive->IsFixed()) {
752            LiveInterval* split = Split(current, next_intersection);
753            AddSorted(unhandled_, split);
754          } else {
755            LiveInterval* split = Split(inactive, next_intersection);
756            inactive_.DeleteAt(i);
757            --i;
758            --e;
759            handled_.Add(inactive);
760            AddSorted(unhandled_, split);
761          }
762        }
763      }
764    }
765
766    return true;
767  }
768}
769
770void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval) {
771  DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
772  size_t insert_at = 0;
773  for (size_t i = array->Size(); i > 0; --i) {
774    LiveInterval* current = array->Get(i - 1);
775    if (current->StartsAfter(interval)) {
776      insert_at = i;
777      break;
778    }
779  }
780  array->InsertAt(insert_at, interval);
781}
782
783LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
784  DCHECK(position >= interval->GetStart());
785  DCHECK(!interval->IsDeadAt(position));
786  if (position == interval->GetStart()) {
787    // Spill slot will be allocated when handling `interval` again.
788    interval->ClearRegister();
789    return interval;
790  } else {
791    LiveInterval* new_interval = interval->SplitAt(position);
792    return new_interval;
793  }
794}
795
796void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
797  LiveInterval* parent = interval->GetParent();
798
799  // An instruction gets a spill slot for its entire lifetime. If the parent
800  // of this interval already has a spill slot, there is nothing to do.
801  if (parent->HasSpillSlot()) {
802    return;
803  }
804
805  HInstruction* defined_by = parent->GetDefinedBy();
806  if (defined_by->IsParameterValue()) {
807    // Parameters have their own stack slot.
808    parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue()));
809    return;
810  }
811
812  if (defined_by->IsConstant()) {
813    // Constants don't need a spill slot.
814    return;
815  }
816
817  LiveInterval* last_sibling = interval;
818  while (last_sibling->GetNextSibling() != nullptr) {
819    last_sibling = last_sibling->GetNextSibling();
820  }
821  size_t end = last_sibling->GetEnd();
822
823  // Find an available spill slot.
824  size_t slot = 0;
825  for (size_t e = spill_slots_.Size(); slot < e; ++slot) {
826    // We check if it is less rather than less or equal because the parallel move
827    // resolver does not work when a single spill slot needs to be exchanged with
828    // a double spill slot. The strict comparison avoids needing to exchange these
829    // locations at the same lifetime position.
830    if (spill_slots_.Get(slot) < parent->GetStart()
831        && (slot == (e - 1) || spill_slots_.Get(slot + 1) < parent->GetStart())) {
832      break;
833    }
834  }
835
836  if (parent->NeedsTwoSpillSlots()) {
837    if (slot == spill_slots_.Size()) {
838      // We need a new spill slot.
839      spill_slots_.Add(end);
840      spill_slots_.Add(end);
841    } else if (slot == spill_slots_.Size() - 1) {
842      spill_slots_.Put(slot, end);
843      spill_slots_.Add(end);
844    } else {
845      spill_slots_.Put(slot, end);
846      spill_slots_.Put(slot + 1, end);
847    }
848  } else {
849    if (slot == spill_slots_.Size()) {
850      // We need a new spill slot.
851      spill_slots_.Add(end);
852    } else {
853      spill_slots_.Put(slot, end);
854    }
855  }
856
857  parent->SetSpillSlot((slot + reserved_out_slots_) * kVRegSize);
858}
859
860static bool IsValidDestination(Location destination) {
861  return destination.IsRegister()
862      || destination.IsFpuRegister()
863      || destination.IsStackSlot()
864      || destination.IsDoubleStackSlot();
865}
866
867void RegisterAllocator::AddInputMoveFor(HInstruction* user,
868                                        Location source,
869                                        Location destination) const {
870  DCHECK(IsValidDestination(destination));
871  if (source.Equals(destination)) return;
872
873  DCHECK(!user->IsPhi());
874
875  HInstruction* previous = user->GetPrevious();
876  HParallelMove* move = nullptr;
877  if (previous == nullptr
878      || !previous->IsParallelMove()
879      || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
880    move = new (allocator_) HParallelMove(allocator_);
881    move->SetLifetimePosition(user->GetLifetimePosition());
882    user->GetBlock()->InsertInstructionBefore(move, user);
883  } else {
884    move = previous->AsParallelMove();
885  }
886  DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
887  move->AddMove(new (allocator_) MoveOperands(source, destination, nullptr));
888}
889
890static bool IsInstructionStart(size_t position) {
891  return (position & 1) == 0;
892}
893
894static bool IsInstructionEnd(size_t position) {
895  return (position & 1) == 1;
896}
897
898void RegisterAllocator::InsertParallelMoveAt(size_t position,
899                                             HInstruction* instruction,
900                                             Location source,
901                                             Location destination) const {
902  DCHECK(IsValidDestination(destination));
903  if (source.Equals(destination)) return;
904
905  HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
906  HParallelMove* move;
907  if (at == nullptr) {
908    if (IsInstructionStart(position)) {
909      // Block boundary, don't do anything the connection of split siblings will handle it.
910      return;
911    } else {
912      // Move must happen before the first instruction of the block.
913      at = liveness_.GetInstructionFromPosition((position + 1) / 2);
914      move = at->AsParallelMove();
915      if (move == nullptr || move->GetLifetimePosition() < position) {
916        move = new (allocator_) HParallelMove(allocator_);
917        move->SetLifetimePosition(position);
918        at->GetBlock()->InsertInstructionBefore(move, at);
919      }
920    }
921  } else if (IsInstructionEnd(position)) {
922    // Move must happen after the instruction.
923    DCHECK(!at->IsControlFlow());
924    move = at->GetNext()->AsParallelMove();
925    // This is a parallel move for connecting siblings in a same block. We need to
926    // differentiate it with moves for connecting blocks, and input moves.
927    if (move == nullptr || move->GetLifetimePosition() > position) {
928      move = new (allocator_) HParallelMove(allocator_);
929      move->SetLifetimePosition(position);
930      at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
931    }
932  } else {
933    // Move must happen before the instruction.
934    HInstruction* previous = at->GetPrevious();
935    if (previous == nullptr
936        || !previous->IsParallelMove()
937        || previous->GetLifetimePosition() != position) {
938      // If the previous is a parallel move, then its position must be lower
939      // than the given `position`: it was added just after the non-parallel
940      // move instruction that precedes `instruction`.
941      DCHECK(previous == nullptr
942             || !previous->IsParallelMove()
943             || previous->GetLifetimePosition() < position);
944      move = new (allocator_) HParallelMove(allocator_);
945      move->SetLifetimePosition(position);
946      at->GetBlock()->InsertInstructionBefore(move, at);
947    } else {
948      move = previous->AsParallelMove();
949    }
950  }
951  DCHECK_EQ(move->GetLifetimePosition(), position);
952  move->AddMove(new (allocator_) MoveOperands(source, destination, instruction));
953}
954
955void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
956                                                   HInstruction* instruction,
957                                                   Location source,
958                                                   Location destination) const {
959  DCHECK(IsValidDestination(destination));
960  if (source.Equals(destination)) return;
961
962  DCHECK_EQ(block->GetSuccessors().Size(), 1u);
963  HInstruction* last = block->GetLastInstruction();
964  // We insert moves at exit for phi predecessors and connecting blocks.
965  // A block ending with an if cannot branch to a block with phis because
966  // we do not allow critical edges. It can also not connect
967  // a split interval between two blocks: the move has to happen in the successor.
968  DCHECK(!last->IsIf());
969  HInstruction* previous = last->GetPrevious();
970  HParallelMove* move;
971  // This is a parallel move for connecting blocks. We need to differentiate
972  // it with moves for connecting siblings in a same block, and output moves.
973  if (previous == nullptr || !previous->IsParallelMove()
974      || previous->AsParallelMove()->GetLifetimePosition() != block->GetLifetimeEnd()) {
975    move = new (allocator_) HParallelMove(allocator_);
976    move->SetLifetimePosition(block->GetLifetimeEnd());
977    block->InsertInstructionBefore(move, last);
978  } else {
979    move = previous->AsParallelMove();
980  }
981  move->AddMove(new (allocator_) MoveOperands(source, destination, instruction));
982}
983
984void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block,
985                                                    HInstruction* instruction,
986                                                    Location source,
987                                                    Location destination) const {
988  DCHECK(IsValidDestination(destination));
989  if (source.Equals(destination)) return;
990
991  HInstruction* first = block->GetFirstInstruction();
992  HParallelMove* move = first->AsParallelMove();
993  // This is a parallel move for connecting blocks. We need to differentiate
994  // it with moves for connecting siblings in a same block, and input moves.
995  if (move == nullptr || move->GetLifetimePosition() != block->GetLifetimeStart()) {
996    move = new (allocator_) HParallelMove(allocator_);
997    move->SetLifetimePosition(block->GetLifetimeStart());
998    block->InsertInstructionBefore(move, first);
999  }
1000  move->AddMove(new (allocator_) MoveOperands(source, destination, instruction));
1001}
1002
1003void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
1004                                        Location source,
1005                                        Location destination) const {
1006  DCHECK(IsValidDestination(destination));
1007  if (source.Equals(destination)) return;
1008
1009  if (instruction->IsPhi()) {
1010    InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
1011    return;
1012  }
1013
1014  size_t position = instruction->GetLifetimePosition() + 1;
1015  HParallelMove* move = instruction->GetNext()->AsParallelMove();
1016  // This is a parallel move for moving the output of an instruction. We need
1017  // to differentiate with input moves, moves for connecting siblings in a
1018  // and moves for connecting blocks.
1019  if (move == nullptr || move->GetLifetimePosition() != position) {
1020    move = new (allocator_) HParallelMove(allocator_);
1021    move->SetLifetimePosition(position);
1022    instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
1023  }
1024  move->AddMove(new (allocator_) MoveOperands(source, destination, instruction));
1025}
1026
1027void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
1028  LiveInterval* current = interval;
1029  if (current->HasSpillSlot() && current->HasRegister()) {
1030    // We spill eagerly, so move must be at definition.
1031    InsertMoveAfter(interval->GetDefinedBy(),
1032                    interval->IsFloatingPoint()
1033                        ? Location::FpuRegisterLocation(interval->GetRegister())
1034                        : Location::RegisterLocation(interval->GetRegister()),
1035                    interval->NeedsTwoSpillSlots()
1036                        ? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
1037                        : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
1038  }
1039  UsePosition* use = current->GetFirstUse();
1040
1041  // Walk over all siblings, updating locations of use positions, and
1042  // connecting them when they are adjacent.
1043  do {
1044    Location source = current->ToLocation();
1045
1046    // Walk over all uses covered by this interval, and update the location
1047    // information.
1048    while (use != nullptr && use->GetPosition() <= current->GetEnd()) {
1049      LocationSummary* locations = use->GetUser()->GetLocations();
1050      if (use->GetIsEnvironment()) {
1051        locations->SetEnvironmentAt(use->GetInputIndex(), source);
1052      } else {
1053        Location expected_location = locations->InAt(use->GetInputIndex());
1054        if (expected_location.IsUnallocated()) {
1055          locations->SetInAt(use->GetInputIndex(), source);
1056        } else if (!expected_location.IsConstant()) {
1057          AddInputMoveFor(use->GetUser(), source, expected_location);
1058        }
1059      }
1060      use = use->GetNext();
1061    }
1062
1063    // If the next interval starts just after this one, and has a register,
1064    // insert a move.
1065    LiveInterval* next_sibling = current->GetNextSibling();
1066    if (next_sibling != nullptr
1067        && next_sibling->HasRegister()
1068        && current->GetEnd() == next_sibling->GetStart()) {
1069      Location destination = next_sibling->ToLocation();
1070      InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
1071    }
1072
1073    // At each safepoint, we record stack and register information.
1074    for (size_t i = 0, e = safepoints_.Size(); i < e; ++i) {
1075      HInstruction* safepoint = safepoints_.Get(i);
1076      size_t position = safepoint->GetLifetimePosition();
1077      LocationSummary* locations = safepoint->GetLocations();
1078      if (!current->Covers(position)) {
1079        continue;
1080      }
1081      if (interval->GetStart() == position) {
1082        // The safepoint is for this instruction, so the location of the instruction
1083        // does not need to be saved.
1084        continue;
1085      }
1086
1087      if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
1088        locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
1089      }
1090
1091      switch (source.GetKind()) {
1092        case Location::kRegister: {
1093          locations->AddLiveRegister(source);
1094          DCHECK_LE(locations->GetNumberOfLiveRegisters(), maximum_number_of_live_registers_);
1095          if (current->GetType() == Primitive::kPrimNot) {
1096            locations->SetRegisterBit(source.reg());
1097          }
1098          break;
1099        }
1100        case Location::kFpuRegister: {
1101          locations->AddLiveRegister(source);
1102          break;
1103        }
1104        case Location::kStackSlot:  // Fall-through
1105        case Location::kDoubleStackSlot:  // Fall-through
1106        case Location::kConstant: {
1107          // Nothing to do.
1108          break;
1109        }
1110        default: {
1111          LOG(FATAL) << "Unexpected location for object";
1112        }
1113      }
1114    }
1115    current = next_sibling;
1116  } while (current != nullptr);
1117  DCHECK(use == nullptr);
1118}
1119
1120void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
1121                                             HBasicBlock* from,
1122                                             HBasicBlock* to) const {
1123  if (interval->GetNextSibling() == nullptr) {
1124    // Nothing to connect. The whole range was allocated to the same location.
1125    return;
1126  }
1127
1128  // Intervals end at the lifetime end of a block. The decrement by one
1129  // ensures the `Cover` call will return true.
1130  size_t from_position = from->GetLifetimeEnd() - 1;
1131  size_t to_position = to->GetLifetimeStart();
1132
1133  LiveInterval* destination = nullptr;
1134  LiveInterval* source = nullptr;
1135
1136  LiveInterval* current = interval;
1137
1138  // Check the intervals that cover `from` and `to`.
1139  while ((current != nullptr) && (source == nullptr || destination == nullptr)) {
1140    if (current->Covers(from_position)) {
1141      DCHECK(source == nullptr);
1142      source = current;
1143    }
1144    if (current->Covers(to_position)) {
1145      DCHECK(destination == nullptr);
1146      destination = current;
1147    }
1148
1149    current = current->GetNextSibling();
1150  }
1151
1152  if (destination == source) {
1153    // Interval was not split.
1154    return;
1155  }
1156
1157  DCHECK(destination != nullptr && source != nullptr);
1158
1159  if (!destination->HasRegister()) {
1160    // Values are eagerly spilled. Spill slot already contains appropriate value.
1161    return;
1162  }
1163
1164  // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
1165  // we need to put the moves at the entry of `to`.
1166  if (from->GetSuccessors().Size() == 1) {
1167    InsertParallelMoveAtExitOf(from,
1168                               interval->GetParent()->GetDefinedBy(),
1169                               source->ToLocation(),
1170                               destination->ToLocation());
1171  } else {
1172    DCHECK_EQ(to->GetPredecessors().Size(), 1u);
1173    InsertParallelMoveAtEntryOf(to,
1174                                interval->GetParent()->GetDefinedBy(),
1175                                source->ToLocation(),
1176                                destination->ToLocation());
1177  }
1178}
1179
1180void RegisterAllocator::Resolve() {
1181  codegen_->ComputeFrameSize(
1182      spill_slots_.Size(), maximum_number_of_live_registers_, reserved_out_slots_);
1183
1184  // Adjust the Out Location of instructions.
1185  // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
1186  for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
1187    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
1188    LiveInterval* current = instruction->GetLiveInterval();
1189    LocationSummary* locations = instruction->GetLocations();
1190    Location location = locations->Out();
1191    if (instruction->IsParameterValue()) {
1192      // Now that we know the frame size, adjust the parameter's location.
1193      if (location.IsStackSlot()) {
1194        location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
1195        current->SetSpillSlot(location.GetStackIndex());
1196        locations->UpdateOut(location);
1197      } else if (location.IsDoubleStackSlot()) {
1198        location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
1199        current->SetSpillSlot(location.GetStackIndex());
1200        locations->UpdateOut(location);
1201      } else if (current->HasSpillSlot()) {
1202        current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
1203      }
1204    }
1205
1206    Location source = current->ToLocation();
1207
1208    if (location.IsUnallocated()) {
1209      if (location.GetPolicy() == Location::kSameAsFirstInput) {
1210        if (locations->InAt(0).IsUnallocated()) {
1211          locations->SetInAt(0, source);
1212        } else {
1213          DCHECK(locations->InAt(0).Equals(source));
1214        }
1215      }
1216      locations->SetOut(source);
1217    } else {
1218      DCHECK(source.Equals(location));
1219    }
1220  }
1221
1222  // Connect siblings.
1223  for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
1224    HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
1225    ConnectSiblings(instruction->GetLiveInterval());
1226  }
1227
1228  // Resolve non-linear control flow across branches. Order does not matter.
1229  for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
1230    HBasicBlock* block = it.Current();
1231    BitVector* live = liveness_.GetLiveInSet(*block);
1232    for (uint32_t idx : live->Indexes()) {
1233      HInstruction* current = liveness_.GetInstructionFromSsaIndex(idx);
1234      LiveInterval* interval = current->GetLiveInterval();
1235      for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
1236        ConnectSplitSiblings(interval, block->GetPredecessors().Get(i), block);
1237      }
1238    }
1239  }
1240
1241  // Resolve phi inputs. Order does not matter.
1242  for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
1243    HBasicBlock* current = it.Current();
1244    for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
1245      HInstruction* phi = inst_it.Current();
1246      for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) {
1247        HBasicBlock* predecessor = current->GetPredecessors().Get(i);
1248        DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u);
1249        HInstruction* input = phi->InputAt(i);
1250        Location source = input->GetLiveInterval()->GetLocationAt(
1251            predecessor->GetLifetimeEnd() - 1);
1252        Location destination = phi->GetLiveInterval()->ToLocation();
1253        InsertParallelMoveAtExitOf(predecessor, nullptr, source, destination);
1254      }
1255    }
1256  }
1257
1258  // Assign temp locations.
1259  HInstruction* current = nullptr;
1260  size_t temp_index = 0;
1261  for (size_t i = 0; i < temp_intervals_.Size(); ++i) {
1262    LiveInterval* temp = temp_intervals_.Get(i);
1263    HInstruction* at = liveness_.GetTempUser(temp);
1264    if (at != current) {
1265      temp_index = 0;
1266      current = at;
1267    }
1268    LocationSummary* locations = at->GetLocations();
1269    DCHECK(temp->GetType() == Primitive::kPrimInt);
1270    locations->SetTempAt(
1271        temp_index++, Location::RegisterLocation(temp->GetRegister()));
1272  }
1273}
1274
1275}  // namespace art
1276