codegen_util.cc revision d69835d841cb7663faaa2f1996e73e8c0b3f6d76
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_internals.h"
18#include "dex_file-inl.h"
19#include "gc_map.h"
20#include "mapping_table.h"
21#include "mir_to_lir-inl.h"
22#include "dex/quick/dex_file_method_inliner.h"
23#include "dex/quick/dex_file_to_method_inliner_map.h"
24#include "dex/verification_results.h"
25#include "dex/verified_method.h"
26#include "verifier/dex_gc_map.h"
27#include "verifier/method_verifier.h"
28
29namespace art {
30
31namespace {
32
33/* Dump a mapping table */
34template <typename It>
35void DumpMappingTable(const char* table_name, const char* descriptor, const char* name,
36                      const Signature& signature, uint32_t size, It first) {
37  if (size != 0) {
38    std::string line(StringPrintf("\n  %s %s%s_%s_table[%u] = {", table_name,
39                     descriptor, name, signature.ToString().c_str(), size));
40    std::replace(line.begin(), line.end(), ';', '_');
41    LOG(INFO) << line;
42    for (uint32_t i = 0; i != size; ++i) {
43      line = StringPrintf("    {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc());
44      ++first;
45      LOG(INFO) << line;
46    }
47    LOG(INFO) <<"  };\n\n";
48  }
49}
50
51}  // anonymous namespace
52
53bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
54  bool res = false;
55  if (rl_src.is_const) {
56    if (rl_src.wide) {
57      if (rl_src.fp) {
58         res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
59      } else {
60         res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
61      }
62    } else {
63      if (rl_src.fp) {
64         res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
65      } else {
66         res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
67      }
68    }
69  }
70  return res;
71}
72
73void Mir2Lir::MarkSafepointPC(LIR* inst) {
74  DCHECK(!inst->flags.use_def_invalid);
75  inst->u.m.def_mask = ENCODE_ALL;
76  LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
77  DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
78}
79
80bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
81  return cu_->compiler_driver->ComputeInstanceFieldInfo(
82      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
83}
84
85/* Remove a LIR from the list. */
86void Mir2Lir::UnlinkLIR(LIR* lir) {
87  if (UNLIKELY(lir == first_lir_insn_)) {
88    first_lir_insn_ = lir->next;
89    if (lir->next != NULL) {
90      lir->next->prev = NULL;
91    } else {
92      DCHECK(lir->next == NULL);
93      DCHECK(lir == last_lir_insn_);
94      last_lir_insn_ = NULL;
95    }
96  } else if (lir == last_lir_insn_) {
97    last_lir_insn_ = lir->prev;
98    lir->prev->next = NULL;
99  } else if ((lir->prev != NULL) && (lir->next != NULL)) {
100    lir->prev->next = lir->next;
101    lir->next->prev = lir->prev;
102  }
103}
104
105/* Convert an instruction to a NOP */
106void Mir2Lir::NopLIR(LIR* lir) {
107  lir->flags.is_nop = true;
108  if (!cu_->verbose) {
109    UnlinkLIR(lir);
110  }
111}
112
113void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
114  uint64_t *mask_ptr;
115  uint64_t mask = ENCODE_MEM;
116  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
117  DCHECK(!lir->flags.use_def_invalid);
118  if (is_load) {
119    mask_ptr = &lir->u.m.use_mask;
120  } else {
121    mask_ptr = &lir->u.m.def_mask;
122  }
123  /* Clear out the memref flags */
124  *mask_ptr &= ~mask;
125  /* ..and then add back the one we need */
126  switch (mem_type) {
127    case kLiteral:
128      DCHECK(is_load);
129      *mask_ptr |= ENCODE_LITERAL;
130      break;
131    case kDalvikReg:
132      *mask_ptr |= ENCODE_DALVIK_REG;
133      break;
134    case kHeapRef:
135      *mask_ptr |= ENCODE_HEAP_REF;
136      break;
137    case kMustNotAlias:
138      /* Currently only loads can be marked as kMustNotAlias */
139      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
140      *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
141      break;
142    default:
143      LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
144  }
145}
146
147/*
148 * Mark load/store instructions that access Dalvik registers through the stack.
149 */
150void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
151                                      bool is64bit) {
152  SetMemRefType(lir, is_load, kDalvikReg);
153
154  /*
155   * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
156   * access.
157   */
158  lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
159}
160
161/*
162 * Debugging macros
163 */
164#define DUMP_RESOURCE_MASK(X)
165
166/* Pretty-print a LIR instruction */
167void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
168  int offset = lir->offset;
169  int dest = lir->operands[0];
170  const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
171
172  /* Handle pseudo-ops individually, and all regular insns as a group */
173  switch (lir->opcode) {
174    case kPseudoMethodEntry:
175      LOG(INFO) << "-------- method entry "
176                << PrettyMethod(cu_->method_idx, *cu_->dex_file);
177      break;
178    case kPseudoMethodExit:
179      LOG(INFO) << "-------- Method_Exit";
180      break;
181    case kPseudoBarrier:
182      LOG(INFO) << "-------- BARRIER";
183      break;
184    case kPseudoEntryBlock:
185      LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
186      break;
187    case kPseudoDalvikByteCodeBoundary:
188      if (lir->operands[0] == 0) {
189         // NOTE: only used for debug listings.
190         lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string"));
191      }
192      LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
193                << lir->dalvik_offset << " @ "
194                << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0]));
195      break;
196    case kPseudoExitBlock:
197      LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
198      break;
199    case kPseudoPseudoAlign4:
200      LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
201                << offset << "): .align4";
202      break;
203    case kPseudoEHBlockLabel:
204      LOG(INFO) << "Exception_Handling:";
205      break;
206    case kPseudoTargetLabel:
207    case kPseudoNormalBlockLabel:
208      LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
209      break;
210    case kPseudoThrowTarget:
211      LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
212      break;
213    case kPseudoIntrinsicRetry:
214      LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
215      break;
216    case kPseudoSuspendTarget:
217      LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
218      break;
219    case kPseudoSafepointPC:
220      LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
221      break;
222    case kPseudoExportedPC:
223      LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
224      break;
225    case kPseudoCaseLabel:
226      LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
227                << std::hex << lir->operands[0] << "|" << std::dec <<
228        lir->operands[0];
229      break;
230    default:
231      if (lir->flags.is_nop && !dump_nop) {
232        break;
233      } else {
234        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
235                                               lir, base_addr));
236        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
237                                                    lir, base_addr));
238        LOG(INFO) << StringPrintf("%5p: %-9s%s%s",
239                                  base_addr + offset,
240                                  op_name.c_str(), op_operands.c_str(),
241                                  lir->flags.is_nop ? "(nop)" : "");
242      }
243      break;
244  }
245
246  if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) {
247    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.use_mask, "use"));
248  }
249  if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) {
250    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.def_mask, "def"));
251  }
252}
253
254void Mir2Lir::DumpPromotionMap() {
255  int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps();
256  for (int i = 0; i < num_regs; i++) {
257    PromotionMap v_reg_map = promotion_map_[i];
258    std::string buf;
259    if (v_reg_map.fp_location == kLocPhysReg) {
260      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
261    }
262
263    std::string buf3;
264    if (i < cu_->num_dalvik_registers) {
265      StringAppendF(&buf3, "%02d", i);
266    } else if (i == mir_graph_->GetMethodSReg()) {
267      buf3 = "Method*";
268    } else {
269      StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
270    }
271
272    LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
273                              v_reg_map.core_location == kLocPhysReg ?
274                              "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
275                              v_reg_map.core_reg : SRegOffset(i),
276                              buf.c_str());
277  }
278}
279
280/* Dump instructions and constant pool contents */
281void Mir2Lir::CodegenDump() {
282  LOG(INFO) << "Dumping LIR insns for "
283            << PrettyMethod(cu_->method_idx, *cu_->dex_file);
284  LIR* lir_insn;
285  int insns_size = cu_->code_item->insns_size_in_code_units_;
286
287  LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
288  LOG(INFO) << "Ins          : " << cu_->num_ins;
289  LOG(INFO) << "Outs         : " << cu_->num_outs;
290  LOG(INFO) << "CoreSpills       : " << num_core_spills_;
291  LOG(INFO) << "FPSpills       : " << num_fp_spills_;
292  LOG(INFO) << "CompilerTemps    : " << mir_graph_->GetNumUsedCompilerTemps();
293  LOG(INFO) << "Frame size       : " << frame_size_;
294  LOG(INFO) << "code size is " << total_size_ <<
295    " bytes, Dalvik size is " << insns_size * 2;
296  LOG(INFO) << "expansion factor: "
297            << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
298  DumpPromotionMap();
299  for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
300    DumpLIRInsn(lir_insn, 0);
301  }
302  for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
303    LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
304                              lir_insn->operands[0]);
305  }
306
307  const DexFile::MethodId& method_id =
308      cu_->dex_file->GetMethodId(cu_->method_idx);
309  const Signature signature = cu_->dex_file->GetMethodSignature(method_id);
310  const char* name = cu_->dex_file->GetMethodName(method_id);
311  const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
312
313  // Dump mapping tables
314  if (!encoded_mapping_table_.empty()) {
315    MappingTable table(&encoded_mapping_table_[0]);
316    DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature,
317                     table.PcToDexSize(), table.PcToDexBegin());
318    DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature,
319                     table.DexToPcSize(), table.DexToPcBegin());
320  }
321}
322
323/*
324 * Search the existing constants in the literal pool for an exact or close match
325 * within specified delta (greater or equal to 0).
326 */
327LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
328  while (data_target) {
329    if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
330      return data_target;
331    data_target = data_target->next;
332  }
333  return NULL;
334}
335
336/* Search the existing constants in the literal pool for an exact wide match */
337LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
338  bool lo_match = false;
339  LIR* lo_target = NULL;
340  while (data_target) {
341    if (lo_match && (data_target->operands[0] == val_hi)) {
342      // Record high word in case we need to expand this later.
343      lo_target->operands[1] = val_hi;
344      return lo_target;
345    }
346    lo_match = false;
347    if (data_target->operands[0] == val_lo) {
348      lo_match = true;
349      lo_target = data_target;
350    }
351    data_target = data_target->next;
352  }
353  return NULL;
354}
355
356/*
357 * The following are building blocks to insert constants into the pool or
358 * instruction streams.
359 */
360
361/* Add a 32-bit constant to the constant pool */
362LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
363  /* Add the constant to the literal pool */
364  if (constant_list_p) {
365    LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData));
366    new_value->operands[0] = value;
367    new_value->next = *constant_list_p;
368    *constant_list_p = new_value;
369    estimated_native_code_size_ += sizeof(value);
370    return new_value;
371  }
372  return NULL;
373}
374
375/* Add a 64-bit constant to the constant pool or mixed with code */
376LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
377  AddWordData(constant_list_p, val_hi);
378  return AddWordData(constant_list_p, val_lo);
379}
380
381static void PushWord(std::vector<uint8_t>&buf, int data) {
382  buf.push_back(data & 0xff);
383  buf.push_back((data >> 8) & 0xff);
384  buf.push_back((data >> 16) & 0xff);
385  buf.push_back((data >> 24) & 0xff);
386}
387
388// Push 8 bytes on 64-bit systems; 4 on 32-bit systems.
389static void PushPointer(std::vector<uint8_t>&buf, void const* pointer) {
390  uintptr_t data = reinterpret_cast<uintptr_t>(pointer);
391  if (sizeof(void*) == sizeof(uint64_t)) {
392    PushWord(buf, (data >> (sizeof(void*) * 4)) & 0xFFFFFFFF);
393    PushWord(buf, data & 0xFFFFFFFF);
394  } else {
395    PushWord(buf, data);
396  }
397}
398
399static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
400  while (buf.size() < offset) {
401    buf.push_back(0);
402  }
403}
404
405/* Write the literal pool to the output stream */
406void Mir2Lir::InstallLiteralPools() {
407  AlignBuffer(code_buffer_, data_offset_);
408  LIR* data_lir = literal_list_;
409  while (data_lir != NULL) {
410    PushWord(code_buffer_, data_lir->operands[0]);
411    data_lir = NEXT_LIR(data_lir);
412  }
413  // Push code and method literals, record offsets for the compiler to patch.
414  data_lir = code_literal_list_;
415  while (data_lir != NULL) {
416    uint32_t target = data_lir->operands[0];
417    cu_->compiler_driver->AddCodePatch(cu_->dex_file,
418                                       cu_->class_def_idx,
419                                       cu_->method_idx,
420                                       cu_->invoke_type,
421                                       target,
422                                       static_cast<InvokeType>(data_lir->operands[1]),
423                                       code_buffer_.size());
424    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
425    // unique value based on target to ensure code deduplication works
426    PushPointer(code_buffer_, &id);
427    data_lir = NEXT_LIR(data_lir);
428  }
429  data_lir = method_literal_list_;
430  while (data_lir != NULL) {
431    uint32_t target = data_lir->operands[0];
432    cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
433                                         cu_->class_def_idx,
434                                         cu_->method_idx,
435                                         cu_->invoke_type,
436                                         target,
437                                         static_cast<InvokeType>(data_lir->operands[1]),
438                                         code_buffer_.size());
439    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
440    // unique value based on target to ensure code deduplication works
441    PushPointer(code_buffer_, &id);
442    data_lir = NEXT_LIR(data_lir);
443  }
444  // Push class literals.
445  data_lir = class_literal_list_;
446  while (data_lir != NULL) {
447    uint32_t target = data_lir->operands[0];
448    cu_->compiler_driver->AddClassPatch(cu_->dex_file,
449                                        cu_->class_def_idx,
450                                        cu_->method_idx,
451                                        target,
452                                        code_buffer_.size());
453    const DexFile::TypeId& id = cu_->dex_file->GetTypeId(target);
454    // unique value based on target to ensure code deduplication works
455    PushPointer(code_buffer_, &id);
456    data_lir = NEXT_LIR(data_lir);
457  }
458}
459
460/* Write the switch tables to the output stream */
461void Mir2Lir::InstallSwitchTables() {
462  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
463  while (true) {
464    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
465    if (tab_rec == NULL) break;
466    AlignBuffer(code_buffer_, tab_rec->offset);
467    /*
468     * For Arm, our reference point is the address of the bx
469     * instruction that does the launch, so we have to subtract
470     * the auto pc-advance.  For other targets the reference point
471     * is a label, so we can use the offset as-is.
472     */
473    int bx_offset = INVALID_OFFSET;
474    switch (cu_->instruction_set) {
475      case kThumb2:
476        DCHECK(tab_rec->anchor->flags.fixup != kFixupNone);
477        bx_offset = tab_rec->anchor->offset + 4;
478        break;
479      case kX86:
480        bx_offset = 0;
481        break;
482      case kMips:
483        bx_offset = tab_rec->anchor->offset;
484        break;
485      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
486    }
487    if (cu_->verbose) {
488      LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
489    }
490    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
491      const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2]));
492      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
493        int disp = tab_rec->targets[elems]->offset - bx_offset;
494        if (cu_->verbose) {
495          LOG(INFO) << "  Case[" << elems << "] key: 0x"
496                    << std::hex << keys[elems] << ", disp: 0x"
497                    << std::hex << disp;
498        }
499        PushWord(code_buffer_, keys[elems]);
500        PushWord(code_buffer_,
501          tab_rec->targets[elems]->offset - bx_offset);
502      }
503    } else {
504      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
505                static_cast<int>(Instruction::kPackedSwitchSignature));
506      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
507        int disp = tab_rec->targets[elems]->offset - bx_offset;
508        if (cu_->verbose) {
509          LOG(INFO) << "  Case[" << elems << "] disp: 0x"
510                    << std::hex << disp;
511        }
512        PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
513      }
514    }
515  }
516}
517
518/* Write the fill array dta to the output stream */
519void Mir2Lir::InstallFillArrayData() {
520  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
521  while (true) {
522    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
523    if (tab_rec == NULL) break;
524    AlignBuffer(code_buffer_, tab_rec->offset);
525    for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
526      code_buffer_.push_back(tab_rec->table[i] & 0xFF);
527      code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
528    }
529  }
530}
531
532static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
533  for (; lir != NULL; lir = lir->next) {
534    lir->offset = offset;
535    offset += 4;
536  }
537  return offset;
538}
539
540static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset) {
541  unsigned int element_size = sizeof(void*);
542  // Align to natural pointer size.
543  offset = (offset + (element_size - 1)) & ~(element_size - 1);
544  for (; lir != NULL; lir = lir->next) {
545    lir->offset = offset;
546    offset += element_size;
547  }
548  return offset;
549}
550
551// Make sure we have a code address for every declared catch entry
552bool Mir2Lir::VerifyCatchEntries() {
553  MappingTable table(&encoded_mapping_table_[0]);
554  std::vector<uint32_t> dex_pcs;
555  dex_pcs.reserve(table.DexToPcSize());
556  for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) {
557    dex_pcs.push_back(it.DexPc());
558  }
559  // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_.
560  std::sort(dex_pcs.begin(), dex_pcs.end());
561
562  bool success = true;
563  auto it = dex_pcs.begin(), end = dex_pcs.end();
564  for (uint32_t dex_pc : mir_graph_->catches_) {
565    while (it != end && *it < dex_pc) {
566      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it;
567      ++it;
568      success = false;
569    }
570    if (it == end || *it > dex_pc) {
571      LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
572      success = false;
573    } else {
574      ++it;
575    }
576  }
577  if (!success) {
578    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
579    LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
580              << table.DexToPcSize();
581  }
582  return success;
583}
584
585
586void Mir2Lir::CreateMappingTables() {
587  uint32_t pc2dex_data_size = 0u;
588  uint32_t pc2dex_entries = 0u;
589  uint32_t pc2dex_offset = 0u;
590  uint32_t pc2dex_dalvik_offset = 0u;
591  uint32_t dex2pc_data_size = 0u;
592  uint32_t dex2pc_entries = 0u;
593  uint32_t dex2pc_offset = 0u;
594  uint32_t dex2pc_dalvik_offset = 0u;
595  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
596    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
597      pc2dex_entries += 1;
598      DCHECK(pc2dex_offset <= tgt_lir->offset);
599      pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset);
600      pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) -
601                                           static_cast<int32_t>(pc2dex_dalvik_offset));
602      pc2dex_offset = tgt_lir->offset;
603      pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
604    }
605    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
606      dex2pc_entries += 1;
607      DCHECK(dex2pc_offset <= tgt_lir->offset);
608      dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset);
609      dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) -
610                                           static_cast<int32_t>(dex2pc_dalvik_offset));
611      dex2pc_offset = tgt_lir->offset;
612      dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
613    }
614  }
615
616  uint32_t total_entries = pc2dex_entries + dex2pc_entries;
617  uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
618  uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
619  encoded_mapping_table_.resize(data_size);
620  uint8_t* write_pos = &encoded_mapping_table_[0];
621  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
622  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
623  DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size);
624  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
625
626  pc2dex_offset = 0u;
627  pc2dex_dalvik_offset = 0u;
628  dex2pc_offset = 0u;
629  dex2pc_dalvik_offset = 0u;
630  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
631    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
632      DCHECK(pc2dex_offset <= tgt_lir->offset);
633      write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
634      write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) -
635                                     static_cast<int32_t>(pc2dex_dalvik_offset));
636      pc2dex_offset = tgt_lir->offset;
637      pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
638    }
639    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
640      DCHECK(dex2pc_offset <= tgt_lir->offset);
641      write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset);
642      write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) -
643                                      static_cast<int32_t>(dex2pc_dalvik_offset));
644      dex2pc_offset = tgt_lir->offset;
645      dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
646    }
647  }
648  DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]),
649            hdr_data_size + pc2dex_data_size);
650  DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size);
651
652  if (kIsDebugBuild) {
653    CHECK(VerifyCatchEntries());
654
655    // Verify the encoded table holds the expected data.
656    MappingTable table(&encoded_mapping_table_[0]);
657    CHECK_EQ(table.TotalSize(), total_entries);
658    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
659    auto it = table.PcToDexBegin();
660    auto it2 = table.DexToPcBegin();
661    for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
662      if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
663        CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
664        CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
665        ++it;
666      }
667      if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
668        CHECK_EQ(tgt_lir->offset, it2.NativePcOffset());
669        CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc());
670        ++it2;
671      }
672    }
673    CHECK(it == table.PcToDexEnd());
674    CHECK(it2 == table.DexToPcEnd());
675  }
676}
677
678class NativePcToReferenceMapBuilder {
679 public:
680  NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
681                                size_t entries, uint32_t max_native_offset,
682                                size_t references_width) : entries_(entries),
683                                references_width_(references_width), in_use_(entries),
684                                table_(table) {
685    // Compute width in bytes needed to hold max_native_offset.
686    native_offset_width_ = 0;
687    while (max_native_offset != 0) {
688      native_offset_width_++;
689      max_native_offset >>= 8;
690    }
691    // Resize table and set up header.
692    table->resize((EntryWidth() * entries) + sizeof(uint32_t));
693    CHECK_LT(native_offset_width_, 1U << 3);
694    (*table)[0] = native_offset_width_ & 7;
695    CHECK_LT(references_width_, 1U << 13);
696    (*table)[0] |= (references_width_ << 3) & 0xFF;
697    (*table)[1] = (references_width_ >> 5) & 0xFF;
698    CHECK_LT(entries, 1U << 16);
699    (*table)[2] = entries & 0xFF;
700    (*table)[3] = (entries >> 8) & 0xFF;
701  }
702
703  void AddEntry(uint32_t native_offset, const uint8_t* references) {
704    size_t table_index = TableIndex(native_offset);
705    while (in_use_[table_index]) {
706      table_index = (table_index + 1) % entries_;
707    }
708    in_use_[table_index] = true;
709    SetCodeOffset(table_index, native_offset);
710    DCHECK_EQ(native_offset, GetCodeOffset(table_index));
711    SetReferences(table_index, references);
712  }
713
714 private:
715  size_t TableIndex(uint32_t native_offset) {
716    return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
717  }
718
719  uint32_t GetCodeOffset(size_t table_index) {
720    uint32_t native_offset = 0;
721    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
722    for (size_t i = 0; i < native_offset_width_; i++) {
723      native_offset |= (*table_)[table_offset + i] << (i * 8);
724    }
725    return native_offset;
726  }
727
728  void SetCodeOffset(size_t table_index, uint32_t native_offset) {
729    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
730    for (size_t i = 0; i < native_offset_width_; i++) {
731      (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
732    }
733  }
734
735  void SetReferences(size_t table_index, const uint8_t* references) {
736    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
737    memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
738  }
739
740  size_t EntryWidth() const {
741    return native_offset_width_ + references_width_;
742  }
743
744  // Number of entries in the table.
745  const size_t entries_;
746  // Number of bytes used to encode the reference bitmap.
747  const size_t references_width_;
748  // Number of bytes used to encode a native offset.
749  size_t native_offset_width_;
750  // Entries that are in use.
751  std::vector<bool> in_use_;
752  // The table we're building.
753  std::vector<uint8_t>* const table_;
754};
755
756void Mir2Lir::CreateNativeGcMap() {
757  DCHECK(!encoded_mapping_table_.empty());
758  MappingTable mapping_table(&encoded_mapping_table_[0]);
759  uint32_t max_native_offset = 0;
760  for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
761    uint32_t native_offset = it.NativePcOffset();
762    if (native_offset > max_native_offset) {
763      max_native_offset = native_offset;
764    }
765  }
766  MethodReference method_ref(cu_->dex_file, cu_->method_idx);
767  const std::vector<uint8_t>& gc_map_raw =
768      mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap();
769  verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
770  DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize());
771  // Compute native offset to references size.
772  NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
773                                                      mapping_table.PcToDexSize(),
774                                                      max_native_offset, dex_gc_map.RegWidth());
775
776  for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
777    uint32_t native_offset = it.NativePcOffset();
778    uint32_t dex_pc = it.DexPc();
779    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
780    CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
781    native_gc_map_builder.AddEntry(native_offset, references);
782  }
783}
784
785/* Determine the offset of each literal field */
786int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
787  offset = AssignLiteralOffsetCommon(literal_list_, offset);
788  offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset);
789  offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset);
790  offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset);
791  return offset;
792}
793
794int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
795  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
796  while (true) {
797    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
798    if (tab_rec == NULL) break;
799    tab_rec->offset = offset;
800    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
801      offset += tab_rec->table[1] * (sizeof(int) * 2);
802    } else {
803      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
804                static_cast<int>(Instruction::kPackedSwitchSignature));
805      offset += tab_rec->table[1] * sizeof(int);
806    }
807  }
808  return offset;
809}
810
811int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
812  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
813  while (true) {
814    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
815    if (tab_rec == NULL) break;
816    tab_rec->offset = offset;
817    offset += tab_rec->size;
818    // word align
819    offset = (offset + 3) & ~3;
820    }
821  return offset;
822}
823
824/*
825 * Insert a kPseudoCaseLabel at the beginning of the Dalvik
826 * offset vaddr if pretty-printing, otherise use the standard block
827 * label.  The selected label will be used to fix up the case
828 * branch table during the assembly phase.  All resource flags
829 * are set to prevent code motion.  KeyVal is just there for debugging.
830 */
831LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
832  LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
833  LIR* res = boundary_lir;
834  if (cu_->verbose) {
835    // Only pay the expense if we're pretty-printing.
836    LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
837    new_label->dalvik_offset = vaddr;
838    new_label->opcode = kPseudoCaseLabel;
839    new_label->operands[0] = keyVal;
840    new_label->flags.fixup = kFixupLabel;
841    DCHECK(!new_label->flags.use_def_invalid);
842    new_label->u.m.def_mask = ENCODE_ALL;
843    InsertLIRAfter(boundary_lir, new_label);
844    res = new_label;
845  }
846  return res;
847}
848
849void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
850  const uint16_t* table = tab_rec->table;
851  DexOffset base_vaddr = tab_rec->vaddr;
852  const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
853  int entries = table[1];
854  int low_key = s4FromSwitchData(&table[2]);
855  for (int i = 0; i < entries; i++) {
856    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
857  }
858}
859
860void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
861  const uint16_t* table = tab_rec->table;
862  DexOffset base_vaddr = tab_rec->vaddr;
863  int entries = table[1];
864  const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
865  const int32_t* targets = &keys[entries];
866  for (int i = 0; i < entries; i++) {
867    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
868  }
869}
870
871void Mir2Lir::ProcessSwitchTables() {
872  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
873  while (true) {
874    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
875    if (tab_rec == NULL) break;
876    if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
877      MarkPackedCaseLabels(tab_rec);
878    } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
879      MarkSparseCaseLabels(tab_rec);
880    } else {
881      LOG(FATAL) << "Invalid switch table";
882    }
883  }
884}
885
886void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
887  /*
888   * Sparse switch data format:
889   *  ushort ident = 0x0200   magic value
890   *  ushort size       number of entries in the table; > 0
891   *  int keys[size]      keys, sorted low-to-high; 32-bit aligned
892   *  int targets[size]     branch targets, relative to switch opcode
893   *
894   * Total size is (2+size*4) 16-bit code units.
895   */
896  uint16_t ident = table[0];
897  int entries = table[1];
898  const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
899  const int32_t* targets = &keys[entries];
900  LOG(INFO) <<  "Sparse switch table - ident:0x" << std::hex << ident
901            << ", entries: " << std::dec << entries;
902  for (int i = 0; i < entries; i++) {
903    LOG(INFO) << "  Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
904  }
905}
906
907void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
908  /*
909   * Packed switch data format:
910   *  ushort ident = 0x0100   magic value
911   *  ushort size       number of entries in the table
912   *  int first_key       first (and lowest) switch case value
913   *  int targets[size]     branch targets, relative to switch opcode
914   *
915   * Total size is (4+size*2) 16-bit code units.
916   */
917  uint16_t ident = table[0];
918  const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]);
919  int entries = table[1];
920  int low_key = s4FromSwitchData(&table[2]);
921  LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
922            << ", entries: " << std::dec << entries << ", low_key: " << low_key;
923  for (int i = 0; i < entries; i++) {
924    LOG(INFO) << "  Key[" << (i + low_key) << "] -> 0x" << std::hex
925              << targets[i];
926  }
927}
928
929/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
930void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
931  // NOTE: only used for debug listings.
932  NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
933}
934
935bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
936  bool is_taken;
937  switch (opcode) {
938    case Instruction::IF_EQ: is_taken = (src1 == src2); break;
939    case Instruction::IF_NE: is_taken = (src1 != src2); break;
940    case Instruction::IF_LT: is_taken = (src1 < src2); break;
941    case Instruction::IF_GE: is_taken = (src1 >= src2); break;
942    case Instruction::IF_GT: is_taken = (src1 > src2); break;
943    case Instruction::IF_LE: is_taken = (src1 <= src2); break;
944    case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
945    case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
946    case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
947    case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
948    case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
949    case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
950    default:
951      LOG(FATAL) << "Unexpected opcode " << opcode;
952      is_taken = false;
953  }
954  return is_taken;
955}
956
957// Convert relation of src1/src2 to src2/src1
958ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
959  ConditionCode res;
960  switch (before) {
961    case kCondEq: res = kCondEq; break;
962    case kCondNe: res = kCondNe; break;
963    case kCondLt: res = kCondGt; break;
964    case kCondGt: res = kCondLt; break;
965    case kCondLe: res = kCondGe; break;
966    case kCondGe: res = kCondLe; break;
967    default:
968      res = static_cast<ConditionCode>(0);
969      LOG(FATAL) << "Unexpected ccode " << before;
970  }
971  return res;
972}
973
974// TODO: move to mir_to_lir.cc
975Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
976    : Backend(arena),
977      literal_list_(NULL),
978      method_literal_list_(NULL),
979      class_literal_list_(NULL),
980      code_literal_list_(NULL),
981      first_fixup_(NULL),
982      cu_(cu),
983      mir_graph_(mir_graph),
984      switch_tables_(arena, 4, kGrowableArraySwitchTables),
985      fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
986      throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
987      suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
988      intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
989      tempreg_info_(arena, 20, kGrowableArrayMisc),
990      reginfo_map_(arena, 64, kGrowableArrayMisc),
991      pointer_storage_(arena, 128, kGrowableArrayMisc),
992      data_offset_(0),
993      total_size_(0),
994      block_label_list_(NULL),
995      promotion_map_(NULL),
996      current_dalvik_offset_(0),
997      estimated_native_code_size_(0),
998      reg_pool_(NULL),
999      live_sreg_(0),
1000      num_core_spills_(0),
1001      num_fp_spills_(0),
1002      frame_size_(0),
1003      core_spill_mask_(0),
1004      fp_spill_mask_(0),
1005      first_lir_insn_(NULL),
1006      last_lir_insn_(NULL) {
1007  // Reserve pointer id 0 for NULL.
1008  size_t null_idx = WrapPointer(NULL);
1009  DCHECK_EQ(null_idx, 0U);
1010}
1011
1012void Mir2Lir::Materialize() {
1013  cu_->NewTimingSplit("RegisterAllocation");
1014  CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
1015
1016  /* Allocate Registers using simple local allocation scheme */
1017  SimpleRegAlloc();
1018
1019  /*
1020   * Custom codegen for special cases.  If for any reason the
1021   * special codegen doesn't succeed, first_lir_insn_ will be
1022   * set to NULL;
1023   */
1024  // TODO: Clean up GenSpecial() and return true only if special implementation is emitted.
1025  // Currently, GenSpecial() returns IsSpecial() but doesn't check after SpecialMIR2LIR().
1026  DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1027  cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1028      ->GenSpecial(this, cu_->method_idx);
1029
1030  /* Convert MIR to LIR, etc. */
1031  if (first_lir_insn_ == NULL) {
1032    MethodMIR2LIR();
1033  }
1034
1035  /* Method is not empty */
1036  if (first_lir_insn_) {
1037    // mark the targets of switch statement case labels
1038    ProcessSwitchTables();
1039
1040    /* Convert LIR into machine code. */
1041    AssembleLIR();
1042
1043    if (cu_->verbose) {
1044      CodegenDump();
1045    }
1046  }
1047}
1048
1049CompiledMethod* Mir2Lir::GetCompiledMethod() {
1050  // Combine vmap tables - core regs, then fp regs - into vmap_table
1051  std::vector<uint16_t> raw_vmap_table;
1052  // Core regs may have been inserted out of order - sort first
1053  std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
1054  for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) {
1055    // Copy, stripping out the phys register sort key
1056    raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
1057  }
1058  // If we have a frame, push a marker to take place of lr
1059  if (frame_size_ > 0) {
1060    raw_vmap_table.push_back(INVALID_VREG);
1061  } else {
1062    DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
1063    DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
1064  }
1065  // Combine vmap tables - core regs, then fp regs. fp regs already sorted
1066  for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
1067    raw_vmap_table.push_back(fp_vmap_table_[i]);
1068  }
1069  Leb128EncodingVector vmap_encoder;
1070  // Prefix the encoded data with its size.
1071  vmap_encoder.PushBackUnsigned(raw_vmap_table.size());
1072  for (uint16_t cur : raw_vmap_table) {
1073    vmap_encoder.PushBackUnsigned(cur);
1074  }
1075  CompiledMethod* result =
1076      new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
1077                         core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
1078                         vmap_encoder.GetData(), native_gc_map_);
1079  return result;
1080}
1081
1082size_t Mir2Lir::GetMaxPossibleCompilerTemps() const {
1083  // Chose a reasonably small value in order to contain stack growth.
1084  // Backends that are smarter about spill region can return larger values.
1085  const size_t max_compiler_temps = 10;
1086  return max_compiler_temps;
1087}
1088
1089size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() {
1090  // By default assume that the Mir2Lir will need one slot for each temporary.
1091  // If the backend can better determine temps that have non-overlapping ranges and
1092  // temps that do not need spilled, it can actually provide a small region.
1093  return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t));
1094}
1095
1096int Mir2Lir::ComputeFrameSize() {
1097  /* Figure out the frame size */
1098  static const uint32_t kAlignMask = kStackAlignment - 1;
1099  uint32_t size = ((num_core_spills_ + num_fp_spills_ +
1100                   1 /* filler word */ + cu_->num_regs + cu_->num_outs)
1101                   * sizeof(uint32_t)) +
1102                   GetNumBytesForCompilerTempSpillRegion();
1103  /* Align and set */
1104  return (size + kAlignMask) & ~(kAlignMask);
1105}
1106
1107/*
1108 * Append an LIR instruction to the LIR list maintained by a compilation
1109 * unit
1110 */
1111void Mir2Lir::AppendLIR(LIR* lir) {
1112  if (first_lir_insn_ == NULL) {
1113    DCHECK(last_lir_insn_ == NULL);
1114    last_lir_insn_ = first_lir_insn_ = lir;
1115    lir->prev = lir->next = NULL;
1116  } else {
1117    last_lir_insn_->next = lir;
1118    lir->prev = last_lir_insn_;
1119    lir->next = NULL;
1120    last_lir_insn_ = lir;
1121  }
1122}
1123
1124/*
1125 * Insert an LIR instruction before the current instruction, which cannot be the
1126 * first instruction.
1127 *
1128 * prev_lir <-> new_lir <-> current_lir
1129 */
1130void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
1131  DCHECK(current_lir->prev != NULL);
1132  LIR *prev_lir = current_lir->prev;
1133
1134  prev_lir->next = new_lir;
1135  new_lir->prev = prev_lir;
1136  new_lir->next = current_lir;
1137  current_lir->prev = new_lir;
1138}
1139
1140/*
1141 * Insert an LIR instruction after the current instruction, which cannot be the
1142 * first instruction.
1143 *
1144 * current_lir -> new_lir -> old_next
1145 */
1146void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
1147  new_lir->prev = current_lir;
1148  new_lir->next = current_lir->next;
1149  current_lir->next = new_lir;
1150  new_lir->next->prev = new_lir;
1151}
1152
1153bool Mir2Lir::IsPowerOfTwo(uint64_t x) {
1154  return (x & (x - 1)) == 0;
1155}
1156
1157// Returns the index of the lowest set bit in 'x'.
1158int32_t Mir2Lir::LowestSetBit(uint64_t x) {
1159  int bit_posn = 0;
1160  while ((x & 0xf) == 0) {
1161    bit_posn += 4;
1162    x >>= 4;
1163  }
1164  while ((x & 1) == 0) {
1165    bit_posn++;
1166    x >>= 1;
1167  }
1168  return bit_posn;
1169}
1170
1171bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
1172  DCHECK(rl_src.wide);
1173  DCHECK(rl_dest.wide);
1174  return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
1175}
1176
1177LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
1178                                int offset, int check_value, LIR* target) {
1179  // Handle this for architectures that can't compare to memory.
1180  LoadWordDisp(base_reg, offset, temp_reg);
1181  LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
1182  return branch;
1183}
1184
1185}  // namespace art
1186