1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "mips/lithium-gap-resolver-mips.h"
31#include "mips/lithium-codegen-mips.h"
32
33namespace v8 {
34namespace internal {
35
36LGapResolver::LGapResolver(LCodeGen* owner)
37    : cgen_(owner),
38      moves_(32),
39      root_index_(0),
40      in_cycle_(false),
41      saved_destination_(NULL) {}
42
43
44void LGapResolver::Resolve(LParallelMove* parallel_move) {
45  ASSERT(moves_.is_empty());
46  // Build up a worklist of moves.
47  BuildInitialMoveList(parallel_move);
48
49  for (int i = 0; i < moves_.length(); ++i) {
50    LMoveOperands move = moves_[i];
51    // Skip constants to perform them last.  They don't block other moves
52    // and skipping such moves with register destinations keeps those
53    // registers free for the whole algorithm.
54    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
55      root_index_ = i;  // Any cycle is found when by reaching this move again.
56      PerformMove(i);
57      if (in_cycle_) {
58        RestoreValue();
59      }
60    }
61  }
62
63  // Perform the moves with constant sources.
64  for (int i = 0; i < moves_.length(); ++i) {
65    if (!moves_[i].IsEliminated()) {
66      ASSERT(moves_[i].source()->IsConstantOperand());
67      EmitMove(i);
68    }
69  }
70
71  moves_.Rewind(0);
72}
73
74
75void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
76  // Perform a linear sweep of the moves to add them to the initial list of
77  // moves to perform, ignoring any move that is redundant (the source is
78  // the same as the destination, the destination is ignored and
79  // unallocated, or the move was already eliminated).
80  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
81  for (int i = 0; i < moves->length(); ++i) {
82    LMoveOperands move = moves->at(i);
83    if (!move.IsRedundant()) moves_.Add(move);
84  }
85  Verify();
86}
87
88
89void LGapResolver::PerformMove(int index) {
90  // Each call to this function performs a move and deletes it from the move
91  // graph.  We first recursively perform any move blocking this one.  We
92  // mark a move as "pending" on entry to PerformMove in order to detect
93  // cycles in the move graph.
94
95  // We can only find a cycle, when doing a depth-first traversal of moves,
96  // be encountering the starting move again. So by spilling the source of
97  // the starting move, we break the cycle.  All moves are then unblocked,
98  // and the starting move is completed by writing the spilled value to
99  // its destination.  All other moves from the spilled source have been
100  // completed prior to breaking the cycle.
101  // An additional complication is that moves to MemOperands with large
102  // offsets (more than 1K or 4K) require us to spill this spilled value to
103  // the stack, to free up the register.
104  ASSERT(!moves_[index].IsPending());
105  ASSERT(!moves_[index].IsRedundant());
106
107  // Clear this move's destination to indicate a pending move.  The actual
108  // destination is saved in a stack allocated local.  Multiple moves can
109  // be pending because this function is recursive.
110  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
111  LOperand* destination = moves_[index].destination();
112  moves_[index].set_destination(NULL);
113
114  // Perform a depth-first traversal of the move graph to resolve
115  // dependencies.  Any unperformed, unpending move with a source the same
116  // as this one's destination blocks this one so recursively perform all
117  // such moves.
118  for (int i = 0; i < moves_.length(); ++i) {
119    LMoveOperands other_move = moves_[i];
120    if (other_move.Blocks(destination) && !other_move.IsPending()) {
121      PerformMove(i);
122      // If there is a blocking, pending move it must be moves_[root_index_]
123      // and all other moves with the same source as moves_[root_index_] are
124      // sucessfully executed (because they are cycle-free) by this loop.
125    }
126  }
127
128  // We are about to resolve this move and don't need it marked as
129  // pending, so restore its destination.
130  moves_[index].set_destination(destination);
131
132  // The move may be blocked on a pending move, which must be the starting move.
133  // In this case, we have a cycle, and we save the source of this move to
134  // a scratch register to break it.
135  LMoveOperands other_move = moves_[root_index_];
136  if (other_move.Blocks(destination)) {
137    ASSERT(other_move.IsPending());
138    BreakCycle(index);
139    return;
140  }
141
142  // This move is no longer blocked.
143  EmitMove(index);
144}
145
146
147void LGapResolver::Verify() {
148#ifdef ENABLE_SLOW_ASSERTS
149  // No operand should be the destination for more than one move.
150  for (int i = 0; i < moves_.length(); ++i) {
151    LOperand* destination = moves_[i].destination();
152    for (int j = i + 1; j < moves_.length(); ++j) {
153      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
154    }
155  }
156#endif
157}
158
159#define __ ACCESS_MASM(cgen_->masm())
160
161void LGapResolver::BreakCycle(int index) {
162  // We save in a register the value that should end up in the source of
163  // moves_[root_index].  After performing all moves in the tree rooted
164  // in that move, we save the value to that source.
165  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
166  ASSERT(!in_cycle_);
167  in_cycle_ = true;
168  LOperand* source = moves_[index].source();
169  saved_destination_ = moves_[index].destination();
170  if (source->IsRegister()) {
171    __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
172  } else if (source->IsStackSlot()) {
173    __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
174  } else if (source->IsDoubleRegister()) {
175    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
176  } else if (source->IsDoubleStackSlot()) {
177    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
178  } else {
179    UNREACHABLE();
180  }
181  // This move will be done by restoring the saved value to the destination.
182  moves_[index].Eliminate();
183}
184
185
186void LGapResolver::RestoreValue() {
187  ASSERT(in_cycle_);
188  ASSERT(saved_destination_ != NULL);
189
190  // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
191  if (saved_destination_->IsRegister()) {
192    __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
193  } else if (saved_destination_->IsStackSlot()) {
194    __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
195  } else if (saved_destination_->IsDoubleRegister()) {
196    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
197            kLithiumScratchDouble);
198  } else if (saved_destination_->IsDoubleStackSlot()) {
199    __ sdc1(kLithiumScratchDouble,
200            cgen_->ToMemOperand(saved_destination_));
201  } else {
202    UNREACHABLE();
203  }
204
205  in_cycle_ = false;
206  saved_destination_ = NULL;
207}
208
209
210void LGapResolver::EmitMove(int index) {
211  LOperand* source = moves_[index].source();
212  LOperand* destination = moves_[index].destination();
213
214  // Dispatch on the source and destination operand kinds.  Not all
215  // combinations are possible.
216
217  if (source->IsRegister()) {
218    Register source_register = cgen_->ToRegister(source);
219    if (destination->IsRegister()) {
220      __ mov(cgen_->ToRegister(destination), source_register);
221    } else {
222      ASSERT(destination->IsStackSlot());
223      __ sw(source_register, cgen_->ToMemOperand(destination));
224    }
225
226  } else if (source->IsStackSlot()) {
227    MemOperand source_operand = cgen_->ToMemOperand(source);
228    if (destination->IsRegister()) {
229      __ lw(cgen_->ToRegister(destination), source_operand);
230    } else {
231      ASSERT(destination->IsStackSlot());
232      MemOperand destination_operand = cgen_->ToMemOperand(destination);
233      if (in_cycle_) {
234        if (!destination_operand.OffsetIsInt16Encodable()) {
235          // 'at' is overwritten while saving the value to the destination.
236          // Therefore we can't use 'at'.  It is OK if the read from the source
237          // destroys 'at', since that happens before the value is read.
238          // This uses only a single reg of the double reg-pair.
239          __ lwc1(kLithiumScratchDouble, source_operand);
240          __ swc1(kLithiumScratchDouble, destination_operand);
241        } else {
242          __ lw(at, source_operand);
243          __ sw(at, destination_operand);
244        }
245      } else {
246        __ lw(kLithiumScratchReg, source_operand);
247        __ sw(kLithiumScratchReg, destination_operand);
248      }
249    }
250
251  } else if (source->IsConstantOperand()) {
252    LConstantOperand* constant_source = LConstantOperand::cast(source);
253    if (destination->IsRegister()) {
254      Register dst = cgen_->ToRegister(destination);
255      if (cgen_->IsInteger32(constant_source)) {
256        __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
257      } else {
258        __ LoadObject(dst, cgen_->ToHandle(constant_source));
259      }
260    } else {
261      ASSERT(destination->IsStackSlot());
262      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
263      if (cgen_->IsInteger32(constant_source)) {
264        __ li(kLithiumScratchReg,
265              Operand(cgen_->ToInteger32(constant_source)));
266      } else {
267        __ LoadObject(kLithiumScratchReg,
268                      cgen_->ToHandle(constant_source));
269      }
270      __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
271    }
272
273  } else if (source->IsDoubleRegister()) {
274    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
275    if (destination->IsDoubleRegister()) {
276      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
277    } else {
278      ASSERT(destination->IsDoubleStackSlot());
279      MemOperand destination_operand = cgen_->ToMemOperand(destination);
280      __ sdc1(source_register, destination_operand);
281    }
282
283  } else if (source->IsDoubleStackSlot()) {
284    MemOperand source_operand = cgen_->ToMemOperand(source);
285    if (destination->IsDoubleRegister()) {
286      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
287    } else {
288      ASSERT(destination->IsDoubleStackSlot());
289      MemOperand destination_operand = cgen_->ToMemOperand(destination);
290      if (in_cycle_) {
291        // kLithiumScratchDouble was used to break the cycle,
292        // but kLithiumScratchReg is free.
293        MemOperand source_high_operand =
294            cgen_->ToHighMemOperand(source);
295        MemOperand destination_high_operand =
296            cgen_->ToHighMemOperand(destination);
297        __ lw(kLithiumScratchReg, source_operand);
298        __ sw(kLithiumScratchReg, destination_operand);
299        __ lw(kLithiumScratchReg, source_high_operand);
300        __ sw(kLithiumScratchReg, destination_high_operand);
301      } else {
302        __ ldc1(kLithiumScratchDouble, source_operand);
303        __ sdc1(kLithiumScratchDouble, destination_operand);
304      }
305    }
306  } else {
307    UNREACHABLE();
308  }
309
310  moves_[index].Eliminate();
311}
312
313
314#undef __
315
316} }  // namespace v8::internal
317