codegen_util.cc revision 28c2300d9a85f4e7288fb5d94280332f923b4df3
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_internals.h"
18#include "dex_file-inl.h"
19#include "gc_map.h"
20#include "mapping_table.h"
21#include "mir_to_lir-inl.h"
22#include "verifier/dex_gc_map.h"
23#include "verifier/method_verifier.h"
24
25namespace art {
26
27bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
28  bool res = false;
29  if (rl_src.is_const) {
30    if (rl_src.wide) {
31      if (rl_src.fp) {
32         res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
33      } else {
34         res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
35      }
36    } else {
37      if (rl_src.fp) {
38         res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
39      } else {
40         res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
41      }
42    }
43  }
44  return res;
45}
46
47void Mir2Lir::MarkSafepointPC(LIR* inst) {
48  inst->def_mask = ENCODE_ALL;
49  LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
50  DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
51}
52
53bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
54  return cu_->compiler_driver->ComputeInstanceFieldInfo(
55      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
56}
57
58/* Convert an instruction to a NOP */
59void Mir2Lir::NopLIR(LIR* lir) {
60  lir->flags.is_nop = true;
61}
62
63void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
64  uint64_t *mask_ptr;
65  uint64_t mask = ENCODE_MEM;
66  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
67  if (is_load) {
68    mask_ptr = &lir->use_mask;
69  } else {
70    mask_ptr = &lir->def_mask;
71  }
72  /* Clear out the memref flags */
73  *mask_ptr &= ~mask;
74  /* ..and then add back the one we need */
75  switch (mem_type) {
76    case kLiteral:
77      DCHECK(is_load);
78      *mask_ptr |= ENCODE_LITERAL;
79      break;
80    case kDalvikReg:
81      *mask_ptr |= ENCODE_DALVIK_REG;
82      break;
83    case kHeapRef:
84      *mask_ptr |= ENCODE_HEAP_REF;
85      break;
86    case kMustNotAlias:
87      /* Currently only loads can be marked as kMustNotAlias */
88      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
89      *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
90      break;
91    default:
92      LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
93  }
94}
95
96/*
97 * Mark load/store instructions that access Dalvik registers through the stack.
98 */
99void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
100                                      bool is64bit) {
101  SetMemRefType(lir, is_load, kDalvikReg);
102
103  /*
104   * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
105   * access.
106   */
107  lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
108}
109
110/*
111 * Debugging macros
112 */
113#define DUMP_RESOURCE_MASK(X)
114
115/* Pretty-print a LIR instruction */
116void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
117  int offset = lir->offset;
118  int dest = lir->operands[0];
119  const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
120
121  /* Handle pseudo-ops individually, and all regular insns as a group */
122  switch (lir->opcode) {
123    case kPseudoMethodEntry:
124      LOG(INFO) << "-------- method entry "
125                << PrettyMethod(cu_->method_idx, *cu_->dex_file);
126      break;
127    case kPseudoMethodExit:
128      LOG(INFO) << "-------- Method_Exit";
129      break;
130    case kPseudoBarrier:
131      LOG(INFO) << "-------- BARRIER";
132      break;
133    case kPseudoEntryBlock:
134      LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
135      break;
136    case kPseudoDalvikByteCodeBoundary:
137      if (lir->operands[0] == 0) {
138         lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
139      }
140      LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
141                << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
142      break;
143    case kPseudoExitBlock:
144      LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
145      break;
146    case kPseudoPseudoAlign4:
147      LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
148                << offset << "): .align4";
149      break;
150    case kPseudoEHBlockLabel:
151      LOG(INFO) << "Exception_Handling:";
152      break;
153    case kPseudoTargetLabel:
154    case kPseudoNormalBlockLabel:
155      LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
156      break;
157    case kPseudoThrowTarget:
158      LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
159      break;
160    case kPseudoIntrinsicRetry:
161      LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
162      break;
163    case kPseudoSuspendTarget:
164      LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
165      break;
166    case kPseudoSafepointPC:
167      LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
168      break;
169    case kPseudoExportedPC:
170      LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
171      break;
172    case kPseudoCaseLabel:
173      LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
174                << std::hex << lir->operands[0] << "|" << std::dec <<
175        lir->operands[0];
176      break;
177    default:
178      if (lir->flags.is_nop && !dump_nop) {
179        break;
180      } else {
181        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
182                                               lir, base_addr));
183        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
184                                                    lir, base_addr));
185        LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
186                                  reinterpret_cast<unsigned int>(base_addr + offset),
187                                  op_name.c_str(), op_operands.c_str(),
188                                  lir->flags.is_nop ? "(nop)" : "");
189      }
190      break;
191  }
192
193  if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
194    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->use_mask, "use"));
195  }
196  if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
197    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->def_mask, "def"));
198  }
199}
200
201void Mir2Lir::DumpPromotionMap() {
202  int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1;
203  for (int i = 0; i < num_regs; i++) {
204    PromotionMap v_reg_map = promotion_map_[i];
205    std::string buf;
206    if (v_reg_map.fp_location == kLocPhysReg) {
207      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
208    }
209
210    std::string buf3;
211    if (i < cu_->num_dalvik_registers) {
212      StringAppendF(&buf3, "%02d", i);
213    } else if (i == mir_graph_->GetMethodSReg()) {
214      buf3 = "Method*";
215    } else {
216      StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
217    }
218
219    LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
220                              v_reg_map.core_location == kLocPhysReg ?
221                              "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
222                              v_reg_map.core_reg : SRegOffset(i),
223                              buf.c_str());
224  }
225}
226
227/* Dump a mapping table */
228void Mir2Lir::DumpMappingTable(const char* table_name, const std::string& descriptor,
229                               const std::string& name, const std::string& signature,
230                               const std::vector<uint32_t>& v) {
231  if (v.size() > 0) {
232    std::string line(StringPrintf("\n  %s %s%s_%s_table[%zu] = {", table_name,
233                     descriptor.c_str(), name.c_str(), signature.c_str(), v.size()));
234    std::replace(line.begin(), line.end(), ';', '_');
235    LOG(INFO) << line;
236    for (uint32_t i = 0; i < v.size(); i+=2) {
237      line = StringPrintf("    {0x%05x, 0x%04x},", v[i], v[i+1]);
238      LOG(INFO) << line;
239    }
240    LOG(INFO) <<"  };\n\n";
241  }
242}
243
244/* Dump instructions and constant pool contents */
245void Mir2Lir::CodegenDump() {
246  LOG(INFO) << "Dumping LIR insns for "
247            << PrettyMethod(cu_->method_idx, *cu_->dex_file);
248  LIR* lir_insn;
249  int insns_size = cu_->code_item->insns_size_in_code_units_;
250
251  LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
252  LOG(INFO) << "Ins          : " << cu_->num_ins;
253  LOG(INFO) << "Outs         : " << cu_->num_outs;
254  LOG(INFO) << "CoreSpills       : " << num_core_spills_;
255  LOG(INFO) << "FPSpills       : " << num_fp_spills_;
256  LOG(INFO) << "CompilerTemps    : " << cu_->num_compiler_temps;
257  LOG(INFO) << "Frame size       : " << frame_size_;
258  LOG(INFO) << "code size is " << total_size_ <<
259    " bytes, Dalvik size is " << insns_size * 2;
260  LOG(INFO) << "expansion factor: "
261            << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
262  DumpPromotionMap();
263  for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
264    DumpLIRInsn(lir_insn, 0);
265  }
266  for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
267    LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
268                              lir_insn->operands[0]);
269  }
270
271  const DexFile::MethodId& method_id =
272      cu_->dex_file->GetMethodId(cu_->method_idx);
273  std::string signature(cu_->dex_file->GetMethodSignature(method_id));
274  std::string name(cu_->dex_file->GetMethodName(method_id));
275  std::string descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
276
277  // Dump mapping tables
278  DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
279  DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
280}
281
282/*
283 * Search the existing constants in the literal pool for an exact or close match
284 * within specified delta (greater or equal to 0).
285 */
286LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
287  while (data_target) {
288    if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
289      return data_target;
290    data_target = data_target->next;
291  }
292  return NULL;
293}
294
295/* Search the existing constants in the literal pool for an exact wide match */
296LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
297  bool lo_match = false;
298  LIR* lo_target = NULL;
299  while (data_target) {
300    if (lo_match && (data_target->operands[0] == val_hi)) {
301      // Record high word in case we need to expand this later.
302      lo_target->operands[1] = val_hi;
303      return lo_target;
304    }
305    lo_match = false;
306    if (data_target->operands[0] == val_lo) {
307      lo_match = true;
308      lo_target = data_target;
309    }
310    data_target = data_target->next;
311  }
312  return NULL;
313}
314
315/*
316 * The following are building blocks to insert constants into the pool or
317 * instruction streams.
318 */
319
320/* Add a 32-bit constant to the constant pool */
321LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
322  /* Add the constant to the literal pool */
323  if (constant_list_p) {
324    LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData));
325    new_value->operands[0] = value;
326    new_value->next = *constant_list_p;
327    *constant_list_p = new_value;
328    return new_value;
329  }
330  return NULL;
331}
332
333/* Add a 64-bit constant to the constant pool or mixed with code */
334LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
335  AddWordData(constant_list_p, val_hi);
336  return AddWordData(constant_list_p, val_lo);
337}
338
339static void PushWord(std::vector<uint8_t>&buf, int data) {
340  buf.push_back(data & 0xff);
341  buf.push_back((data >> 8) & 0xff);
342  buf.push_back((data >> 16) & 0xff);
343  buf.push_back((data >> 24) & 0xff);
344}
345
346static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
347  while (buf.size() < offset) {
348    buf.push_back(0);
349  }
350}
351
352/* Write the literal pool to the output stream */
353void Mir2Lir::InstallLiteralPools() {
354  AlignBuffer(code_buffer_, data_offset_);
355  LIR* data_lir = literal_list_;
356  while (data_lir != NULL) {
357    PushWord(code_buffer_, data_lir->operands[0]);
358    data_lir = NEXT_LIR(data_lir);
359  }
360  // Push code and method literals, record offsets for the compiler to patch.
361  data_lir = code_literal_list_;
362  while (data_lir != NULL) {
363    uint32_t target = data_lir->operands[0];
364    cu_->compiler_driver->AddCodePatch(cu_->dex_file,
365                                      cu_->method_idx,
366                                      cu_->invoke_type,
367                                      target,
368                                      static_cast<InvokeType>(data_lir->operands[1]),
369                                      code_buffer_.size());
370    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
371    // unique based on target to ensure code deduplication works
372    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
373    PushWord(code_buffer_, unique_patch_value);
374    data_lir = NEXT_LIR(data_lir);
375  }
376  data_lir = method_literal_list_;
377  while (data_lir != NULL) {
378    uint32_t target = data_lir->operands[0];
379    cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
380                                        cu_->method_idx,
381                                        cu_->invoke_type,
382                                        target,
383                                        static_cast<InvokeType>(data_lir->operands[1]),
384                                        code_buffer_.size());
385    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
386    // unique based on target to ensure code deduplication works
387    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
388    PushWord(code_buffer_, unique_patch_value);
389    data_lir = NEXT_LIR(data_lir);
390  }
391}
392
393/* Write the switch tables to the output stream */
394void Mir2Lir::InstallSwitchTables() {
395  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
396  while (true) {
397    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
398    if (tab_rec == NULL) break;
399    AlignBuffer(code_buffer_, tab_rec->offset);
400    /*
401     * For Arm, our reference point is the address of the bx
402     * instruction that does the launch, so we have to subtract
403     * the auto pc-advance.  For other targets the reference point
404     * is a label, so we can use the offset as-is.
405     */
406    int bx_offset = INVALID_OFFSET;
407    switch (cu_->instruction_set) {
408      case kThumb2:
409        bx_offset = tab_rec->anchor->offset + 4;
410        break;
411      case kX86:
412        bx_offset = 0;
413        break;
414      case kMips:
415        bx_offset = tab_rec->anchor->offset;
416        break;
417      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
418    }
419    if (cu_->verbose) {
420      LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
421    }
422    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
423      const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
424      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
425        int disp = tab_rec->targets[elems]->offset - bx_offset;
426        if (cu_->verbose) {
427          LOG(INFO) << "  Case[" << elems << "] key: 0x"
428                    << std::hex << keys[elems] << ", disp: 0x"
429                    << std::hex << disp;
430        }
431        PushWord(code_buffer_, keys[elems]);
432        PushWord(code_buffer_,
433          tab_rec->targets[elems]->offset - bx_offset);
434      }
435    } else {
436      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
437                static_cast<int>(Instruction::kPackedSwitchSignature));
438      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
439        int disp = tab_rec->targets[elems]->offset - bx_offset;
440        if (cu_->verbose) {
441          LOG(INFO) << "  Case[" << elems << "] disp: 0x"
442                    << std::hex << disp;
443        }
444        PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
445      }
446    }
447  }
448}
449
450/* Write the fill array dta to the output stream */
451void Mir2Lir::InstallFillArrayData() {
452  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
453  while (true) {
454    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
455    if (tab_rec == NULL) break;
456    AlignBuffer(code_buffer_, tab_rec->offset);
457    for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
458      code_buffer_.push_back(tab_rec->table[i] & 0xFF);
459      code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
460    }
461  }
462}
463
464static int AssignLiteralOffsetCommon(LIR* lir, int offset) {
465  for (; lir != NULL; lir = lir->next) {
466    lir->offset = offset;
467    offset += 4;
468  }
469  return offset;
470}
471
472// Make sure we have a code address for every declared catch entry
473bool Mir2Lir::VerifyCatchEntries() {
474  bool success = true;
475  for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin();
476       it != mir_graph_->catches_.end(); ++it) {
477    uint32_t dex_pc = *it;
478    bool found = false;
479    for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
480      if (dex_pc == dex2pc_mapping_table_[i+1]) {
481        found = true;
482        break;
483      }
484    }
485    if (!found) {
486      LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
487      success = false;
488    }
489  }
490  // Now, try in the other direction
491  for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
492    uint32_t dex_pc = dex2pc_mapping_table_[i+1];
493    if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) {
494      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
495      success = false;
496    }
497  }
498  if (!success) {
499    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
500    LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
501              << dex2pc_mapping_table_.size()/2;
502  }
503  return success;
504}
505
506
507void Mir2Lir::CreateMappingTables() {
508  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
509    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
510      pc2dex_mapping_table_.push_back(tgt_lir->offset);
511      pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset);
512    }
513    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
514      dex2pc_mapping_table_.push_back(tgt_lir->offset);
515      dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset);
516    }
517  }
518  if (kIsDebugBuild) {
519    CHECK(VerifyCatchEntries());
520  }
521  CHECK_EQ(pc2dex_mapping_table_.size() & 1, 0U);
522  CHECK_EQ(dex2pc_mapping_table_.size() & 1, 0U);
523  uint32_t total_entries = (pc2dex_mapping_table_.size() + dex2pc_mapping_table_.size()) / 2;
524  uint32_t pc2dex_entries = pc2dex_mapping_table_.size() / 2;
525  encoded_mapping_table_.PushBack(total_entries);
526  encoded_mapping_table_.PushBack(pc2dex_entries);
527  encoded_mapping_table_.InsertBack(pc2dex_mapping_table_.begin(), pc2dex_mapping_table_.end());
528  encoded_mapping_table_.InsertBack(dex2pc_mapping_table_.begin(), dex2pc_mapping_table_.end());
529  if (kIsDebugBuild) {
530    // Verify the encoded table holds the expected data.
531    MappingTable table(&encoded_mapping_table_.GetData()[0]);
532    CHECK_EQ(table.TotalSize(), total_entries);
533    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
534    CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2);
535    MappingTable::PcToDexIterator it = table.PcToDexBegin();
536    for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) {
537      CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset());
538      ++i;
539      CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc());
540    }
541    MappingTable::DexToPcIterator it2 = table.DexToPcBegin();
542    for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) {
543      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset());
544      ++i;
545      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc());
546    }
547  }
548}
549
550class NativePcToReferenceMapBuilder {
551 public:
552  NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
553                                size_t entries, uint32_t max_native_offset,
554                                size_t references_width) : entries_(entries),
555                                references_width_(references_width), in_use_(entries),
556                                table_(table) {
557    // Compute width in bytes needed to hold max_native_offset.
558    native_offset_width_ = 0;
559    while (max_native_offset != 0) {
560      native_offset_width_++;
561      max_native_offset >>= 8;
562    }
563    // Resize table and set up header.
564    table->resize((EntryWidth() * entries) + sizeof(uint32_t));
565    CHECK_LT(native_offset_width_, 1U << 3);
566    (*table)[0] = native_offset_width_ & 7;
567    CHECK_LT(references_width_, 1U << 13);
568    (*table)[0] |= (references_width_ << 3) & 0xFF;
569    (*table)[1] = (references_width_ >> 5) & 0xFF;
570    CHECK_LT(entries, 1U << 16);
571    (*table)[2] = entries & 0xFF;
572    (*table)[3] = (entries >> 8) & 0xFF;
573  }
574
575  void AddEntry(uint32_t native_offset, const uint8_t* references) {
576    size_t table_index = TableIndex(native_offset);
577    while (in_use_[table_index]) {
578      table_index = (table_index + 1) % entries_;
579    }
580    in_use_[table_index] = true;
581    SetNativeOffset(table_index, native_offset);
582    DCHECK_EQ(native_offset, GetNativeOffset(table_index));
583    SetReferences(table_index, references);
584  }
585
586 private:
587  size_t TableIndex(uint32_t native_offset) {
588    return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
589  }
590
591  uint32_t GetNativeOffset(size_t table_index) {
592    uint32_t native_offset = 0;
593    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
594    for (size_t i = 0; i < native_offset_width_; i++) {
595      native_offset |= (*table_)[table_offset + i] << (i * 8);
596    }
597    return native_offset;
598  }
599
600  void SetNativeOffset(size_t table_index, uint32_t native_offset) {
601    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
602    for (size_t i = 0; i < native_offset_width_; i++) {
603      (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
604    }
605  }
606
607  void SetReferences(size_t table_index, const uint8_t* references) {
608    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
609    memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
610  }
611
612  size_t EntryWidth() const {
613    return native_offset_width_ + references_width_;
614  }
615
616  // Number of entries in the table.
617  const size_t entries_;
618  // Number of bytes used to encode the reference bitmap.
619  const size_t references_width_;
620  // Number of bytes used to encode a native offset.
621  size_t native_offset_width_;
622  // Entries that are in use.
623  std::vector<bool> in_use_;
624  // The table we're building.
625  std::vector<uint8_t>* const table_;
626};
627
628void Mir2Lir::CreateNativeGcMap() {
629  const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_;
630  uint32_t max_native_offset = 0;
631  for (size_t i = 0; i < mapping_table.size(); i += 2) {
632    uint32_t native_offset = mapping_table[i + 0];
633    if (native_offset > max_native_offset) {
634      max_native_offset = native_offset;
635    }
636  }
637  MethodReference method_ref(cu_->dex_file, cu_->method_idx);
638  const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
639  verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
640  // Compute native offset to references size.
641  NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
642                                                      mapping_table.size() / 2, max_native_offset,
643                                                      dex_gc_map.RegWidth());
644
645  for (size_t i = 0; i < mapping_table.size(); i += 2) {
646    uint32_t native_offset = mapping_table[i + 0];
647    uint32_t dex_pc = mapping_table[i + 1];
648    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
649    CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
650    native_gc_map_builder.AddEntry(native_offset, references);
651  }
652}
653
654/* Determine the offset of each literal field */
655int Mir2Lir::AssignLiteralOffset(int offset) {
656  offset = AssignLiteralOffsetCommon(literal_list_, offset);
657  offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
658  offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
659  return offset;
660}
661
662int Mir2Lir::AssignSwitchTablesOffset(int offset) {
663  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
664  while (true) {
665    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
666    if (tab_rec == NULL) break;
667    tab_rec->offset = offset;
668    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
669      offset += tab_rec->table[1] * (sizeof(int) * 2);
670    } else {
671      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
672                static_cast<int>(Instruction::kPackedSwitchSignature));
673      offset += tab_rec->table[1] * sizeof(int);
674    }
675  }
676  return offset;
677}
678
679int Mir2Lir::AssignFillArrayDataOffset(int offset) {
680  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
681  while (true) {
682    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
683    if (tab_rec == NULL) break;
684    tab_rec->offset = offset;
685    offset += tab_rec->size;
686    // word align
687    offset = (offset + 3) & ~3;
688    }
689  return offset;
690}
691
692// LIR offset assignment.
693int Mir2Lir::AssignInsnOffsets() {
694  LIR* lir;
695  int offset = 0;
696
697  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
698    lir->offset = offset;
699    if (LIKELY(lir->opcode >= 0)) {
700      if (!lir->flags.is_nop) {
701        offset += lir->flags.size;
702      }
703    } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
704      if (offset & 0x2) {
705        offset += 2;
706        lir->operands[0] = 1;
707      } else {
708        lir->operands[0] = 0;
709      }
710    }
711    /* Pseudo opcodes don't consume space */
712  }
713  return offset;
714}
715
716/*
717 * Walk the compilation unit and assign offsets to instructions
718 * and literals and compute the total size of the compiled unit.
719 */
720void Mir2Lir::AssignOffsets() {
721  int offset = AssignInsnOffsets();
722
723  /* Const values have to be word aligned */
724  offset = (offset + 3) & ~3;
725
726  /* Set up offsets for literals */
727  data_offset_ = offset;
728
729  offset = AssignLiteralOffset(offset);
730
731  offset = AssignSwitchTablesOffset(offset);
732
733  offset = AssignFillArrayDataOffset(offset);
734
735  total_size_ = offset;
736}
737
738/*
739 * Go over each instruction in the list and calculate the offset from the top
740 * before sending them off to the assembler. If out-of-range branch distance is
741 * seen rearrange the instructions a bit to correct it.
742 */
743void Mir2Lir::AssembleLIR() {
744  AssignOffsets();
745  int assembler_retries = 0;
746  /*
747   * Assemble here.  Note that we generate code with optimistic assumptions
748   * and if found now to work, we'll have to redo the sequence and retry.
749   */
750
751  while (true) {
752    AssemblerStatus res = AssembleInstructions(0);
753    if (res == kSuccess) {
754      break;
755    } else {
756      assembler_retries++;
757      if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
758        CodegenDump();
759        LOG(FATAL) << "Assembler error - too many retries";
760      }
761      // Redo offsets and try again
762      AssignOffsets();
763      code_buffer_.clear();
764    }
765  }
766
767  // Install literals
768  InstallLiteralPools();
769
770  // Install switch tables
771  InstallSwitchTables();
772
773  // Install fill array data
774  InstallFillArrayData();
775
776  // Create the mapping table and native offset to reference map.
777  CreateMappingTables();
778
779  CreateNativeGcMap();
780}
781
782/*
783 * Insert a kPseudoCaseLabel at the beginning of the Dalvik
784 * offset vaddr.  This label will be used to fix up the case
785 * branch table during the assembly phase.  Be sure to set
786 * all resource flags on this to prevent code motion across
787 * target boundaries.  KeyVal is just there for debugging.
788 */
789LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
790  SafeMap<unsigned int, LIR*>::iterator it;
791  LIR* boundary_lir = boundary_map_.Get(vaddr);
792  if (boundary_lir == NULL) {
793    LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
794  }
795  LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
796  new_label->dalvik_offset = vaddr;
797  new_label->opcode = kPseudoCaseLabel;
798  new_label->operands[0] = keyVal;
799  InsertLIRAfter(boundary_lir, new_label);
800  return new_label;
801}
802
803void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
804  const uint16_t* table = tab_rec->table;
805  int base_vaddr = tab_rec->vaddr;
806  const int *targets = reinterpret_cast<const int*>(&table[4]);
807  int entries = table[1];
808  int low_key = s4FromSwitchData(&table[2]);
809  for (int i = 0; i < entries; i++) {
810    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
811  }
812}
813
814void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
815  const uint16_t* table = tab_rec->table;
816  int base_vaddr = tab_rec->vaddr;
817  int entries = table[1];
818  const int* keys = reinterpret_cast<const int*>(&table[2]);
819  const int* targets = &keys[entries];
820  for (int i = 0; i < entries; i++) {
821    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
822  }
823}
824
825void Mir2Lir::ProcessSwitchTables() {
826  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
827  while (true) {
828    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
829    if (tab_rec == NULL) break;
830    if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
831      MarkPackedCaseLabels(tab_rec);
832    } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
833      MarkSparseCaseLabels(tab_rec);
834    } else {
835      LOG(FATAL) << "Invalid switch table";
836    }
837  }
838}
839
840void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
841  /*
842   * Sparse switch data format:
843   *  ushort ident = 0x0200   magic value
844   *  ushort size       number of entries in the table; > 0
845   *  int keys[size]      keys, sorted low-to-high; 32-bit aligned
846   *  int targets[size]     branch targets, relative to switch opcode
847   *
848   * Total size is (2+size*4) 16-bit code units.
849   */
850  uint16_t ident = table[0];
851  int entries = table[1];
852  const int* keys = reinterpret_cast<const int*>(&table[2]);
853  const int* targets = &keys[entries];
854  LOG(INFO) <<  "Sparse switch table - ident:0x" << std::hex << ident
855            << ", entries: " << std::dec << entries;
856  for (int i = 0; i < entries; i++) {
857    LOG(INFO) << "  Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
858  }
859}
860
861void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
862  /*
863   * Packed switch data format:
864   *  ushort ident = 0x0100   magic value
865   *  ushort size       number of entries in the table
866   *  int first_key       first (and lowest) switch case value
867   *  int targets[size]     branch targets, relative to switch opcode
868   *
869   * Total size is (4+size*2) 16-bit code units.
870   */
871  uint16_t ident = table[0];
872  const int* targets = reinterpret_cast<const int*>(&table[4]);
873  int entries = table[1];
874  int low_key = s4FromSwitchData(&table[2]);
875  LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
876            << ", entries: " << std::dec << entries << ", low_key: " << low_key;
877  for (int i = 0; i < entries; i++) {
878    LOG(INFO) << "  Key[" << (i + low_key) << "] -> 0x" << std::hex
879              << targets[i];
880  }
881}
882
883/*
884 * Set up special LIR to mark a Dalvik byte-code instruction start and
885 * record it in the boundary_map.  NOTE: in cases such as kMirOpCheck in
886 * which we split a single Dalvik instruction, only the first MIR op
887 * associated with a Dalvik PC should be entered into the map.
888 */
889LIR* Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
890  LIR* res = NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
891  if (boundary_map_.Get(offset) == NULL) {
892    boundary_map_.Put(offset, res);
893  }
894  return res;
895}
896
897bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
898  bool is_taken;
899  switch (opcode) {
900    case Instruction::IF_EQ: is_taken = (src1 == src2); break;
901    case Instruction::IF_NE: is_taken = (src1 != src2); break;
902    case Instruction::IF_LT: is_taken = (src1 < src2); break;
903    case Instruction::IF_GE: is_taken = (src1 >= src2); break;
904    case Instruction::IF_GT: is_taken = (src1 > src2); break;
905    case Instruction::IF_LE: is_taken = (src1 <= src2); break;
906    case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
907    case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
908    case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
909    case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
910    case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
911    case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
912    default:
913      LOG(FATAL) << "Unexpected opcode " << opcode;
914      is_taken = false;
915  }
916  return is_taken;
917}
918
919// Convert relation of src1/src2 to src2/src1
920ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
921  ConditionCode res;
922  switch (before) {
923    case kCondEq: res = kCondEq; break;
924    case kCondNe: res = kCondNe; break;
925    case kCondLt: res = kCondGt; break;
926    case kCondGt: res = kCondLt; break;
927    case kCondLe: res = kCondGe; break;
928    case kCondGe: res = kCondLe; break;
929    default:
930      res = static_cast<ConditionCode>(0);
931      LOG(FATAL) << "Unexpected ccode " << before;
932  }
933  return res;
934}
935
936// TODO: move to mir_to_lir.cc
937Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
938    : Backend(arena),
939      literal_list_(NULL),
940      method_literal_list_(NULL),
941      code_literal_list_(NULL),
942      cu_(cu),
943      mir_graph_(mir_graph),
944      switch_tables_(arena, 4, kGrowableArraySwitchTables),
945      fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
946      throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
947      suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
948      intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
949      boundary_map_(arena, 0, kGrowableArrayMisc),
950      data_offset_(0),
951      total_size_(0),
952      block_label_list_(NULL),
953      current_dalvik_offset_(0),
954      reg_pool_(NULL),
955      live_sreg_(0),
956      num_core_spills_(0),
957      num_fp_spills_(0),
958      frame_size_(0),
959      core_spill_mask_(0),
960      fp_spill_mask_(0),
961      first_lir_insn_(NULL),
962      last_lir_insn_(NULL) {
963  promotion_map_ = static_cast<PromotionMap*>
964      (arena_->Alloc((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
965                      sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
966  // Pre-fill with nulls.
967  boundary_map_.SetSize(cu->code_item->insns_size_in_code_units_);
968}
969
970void Mir2Lir::Materialize() {
971  CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
972
973  /* Allocate Registers using simple local allocation scheme */
974  SimpleRegAlloc();
975
976  if (mir_graph_->IsSpecialCase()) {
977      /*
978       * Custom codegen for special cases.  If for any reason the
979       * special codegen doesn't succeed, first_lir_insn_ will
980       * set to NULL;
981       */
982      SpecialMIR2LIR(mir_graph_->GetSpecialCase());
983    }
984
985  /* Convert MIR to LIR, etc. */
986  if (first_lir_insn_ == NULL) {
987    MethodMIR2LIR();
988  }
989
990  /* Method is not empty */
991  if (first_lir_insn_) {
992    // mark the targets of switch statement case labels
993    ProcessSwitchTables();
994
995    /* Convert LIR into machine code. */
996    AssembleLIR();
997
998    if (cu_->verbose) {
999      CodegenDump();
1000    }
1001  }
1002}
1003
1004CompiledMethod* Mir2Lir::GetCompiledMethod() {
1005  // Combine vmap tables - core regs, then fp regs - into vmap_table
1006  std::vector<uint16_t> raw_vmap_table;
1007  // Core regs may have been inserted out of order - sort first
1008  std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
1009  for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) {
1010    // Copy, stripping out the phys register sort key
1011    raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
1012  }
1013  // If we have a frame, push a marker to take place of lr
1014  if (frame_size_ > 0) {
1015    raw_vmap_table.push_back(INVALID_VREG);
1016  } else {
1017    DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
1018    DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
1019  }
1020  // Combine vmap tables - core regs, then fp regs. fp regs already sorted
1021  for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
1022    raw_vmap_table.push_back(fp_vmap_table_[i]);
1023  }
1024  UnsignedLeb128EncodingVector vmap_encoder;
1025  // Prefix the encoded data with its size.
1026  vmap_encoder.PushBack(raw_vmap_table.size());
1027  for (uint16_t cur : raw_vmap_table) {
1028    vmap_encoder.PushBack(cur);
1029  }
1030  CompiledMethod* result =
1031      new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
1032                         core_spill_mask_, fp_spill_mask_, encoded_mapping_table_.GetData(),
1033                         vmap_encoder.GetData(), native_gc_map_);
1034  return result;
1035}
1036
1037int Mir2Lir::ComputeFrameSize() {
1038  /* Figure out the frame size */
1039  static const uint32_t kAlignMask = kStackAlignment - 1;
1040  uint32_t size = (num_core_spills_ + num_fp_spills_ +
1041                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
1042                   cu_->num_compiler_temps + 1 /* cur_method* */)
1043                   * sizeof(uint32_t);
1044  /* Align and set */
1045  return (size + kAlignMask) & ~(kAlignMask);
1046}
1047
1048/*
1049 * Append an LIR instruction to the LIR list maintained by a compilation
1050 * unit
1051 */
1052void Mir2Lir::AppendLIR(LIR* lir) {
1053  if (first_lir_insn_ == NULL) {
1054    DCHECK(last_lir_insn_ == NULL);
1055    last_lir_insn_ = first_lir_insn_ = lir;
1056    lir->prev = lir->next = NULL;
1057  } else {
1058    last_lir_insn_->next = lir;
1059    lir->prev = last_lir_insn_;
1060    lir->next = NULL;
1061    last_lir_insn_ = lir;
1062  }
1063}
1064
1065/*
1066 * Insert an LIR instruction before the current instruction, which cannot be the
1067 * first instruction.
1068 *
1069 * prev_lir <-> new_lir <-> current_lir
1070 */
1071void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
1072  DCHECK(current_lir->prev != NULL);
1073  LIR *prev_lir = current_lir->prev;
1074
1075  prev_lir->next = new_lir;
1076  new_lir->prev = prev_lir;
1077  new_lir->next = current_lir;
1078  current_lir->prev = new_lir;
1079}
1080
1081/*
1082 * Insert an LIR instruction after the current instruction, which cannot be the
1083 * first instruction.
1084 *
1085 * current_lir -> new_lir -> old_next
1086 */
1087void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
1088  new_lir->prev = current_lir;
1089  new_lir->next = current_lir->next;
1090  current_lir->next = new_lir;
1091  new_lir->next->prev = new_lir;
1092}
1093
1094
1095}  // namespace art
1096