codegen_util.cc revision b48819db07f9a0992a72173380c24249d7fc648a
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_internals.h"
18#include "dex_file-inl.h"
19#include "gc_map.h"
20#include "mapping_table.h"
21#include "mir_to_lir-inl.h"
22#include "verifier/dex_gc_map.h"
23#include "verifier/method_verifier.h"
24
25namespace art {
26
27bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
28  bool res = false;
29  if (rl_src.is_const) {
30    if (rl_src.wide) {
31      if (rl_src.fp) {
32         res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
33      } else {
34         res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
35      }
36    } else {
37      if (rl_src.fp) {
38         res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
39      } else {
40         res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
41      }
42    }
43  }
44  return res;
45}
46
47void Mir2Lir::MarkSafepointPC(LIR* inst) {
48  DCHECK(!inst->flags.use_def_invalid);
49  inst->u.m.def_mask = ENCODE_ALL;
50  LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
51  DCHECK_EQ(safepoint_pc->u.m.def_mask, ENCODE_ALL);
52}
53
54bool Mir2Lir::FastInstance(uint32_t field_idx, bool is_put, int* field_offset, bool* is_volatile) {
55  return cu_->compiler_driver->ComputeInstanceFieldInfo(
56      field_idx, mir_graph_->GetCurrentDexCompilationUnit(), is_put, field_offset, is_volatile);
57}
58
59/* Remove a LIR from the list. */
60void Mir2Lir::UnlinkLIR(LIR* lir) {
61  if (UNLIKELY(lir == first_lir_insn_)) {
62    first_lir_insn_ = lir->next;
63    if (lir->next != NULL) {
64      lir->next->prev = NULL;
65    } else {
66      DCHECK(lir->next == NULL);
67      DCHECK(lir == last_lir_insn_);
68      last_lir_insn_ = NULL;
69    }
70  } else if (lir == last_lir_insn_) {
71    last_lir_insn_ = lir->prev;
72    lir->prev->next = NULL;
73  } else if ((lir->prev != NULL) && (lir->next != NULL)) {
74    lir->prev->next = lir->next;
75    lir->next->prev = lir->prev;
76  }
77}
78
79/* Convert an instruction to a NOP */
80void Mir2Lir::NopLIR(LIR* lir) {
81  lir->flags.is_nop = true;
82  if (!cu_->verbose) {
83    UnlinkLIR(lir);
84  }
85}
86
87void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
88  uint64_t *mask_ptr;
89  uint64_t mask = ENCODE_MEM;
90  DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
91  DCHECK(!lir->flags.use_def_invalid);
92  if (is_load) {
93    mask_ptr = &lir->u.m.use_mask;
94  } else {
95    mask_ptr = &lir->u.m.def_mask;
96  }
97  /* Clear out the memref flags */
98  *mask_ptr &= ~mask;
99  /* ..and then add back the one we need */
100  switch (mem_type) {
101    case kLiteral:
102      DCHECK(is_load);
103      *mask_ptr |= ENCODE_LITERAL;
104      break;
105    case kDalvikReg:
106      *mask_ptr |= ENCODE_DALVIK_REG;
107      break;
108    case kHeapRef:
109      *mask_ptr |= ENCODE_HEAP_REF;
110      break;
111    case kMustNotAlias:
112      /* Currently only loads can be marked as kMustNotAlias */
113      DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
114      *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
115      break;
116    default:
117      LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
118  }
119}
120
121/*
122 * Mark load/store instructions that access Dalvik registers through the stack.
123 */
124void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
125                                      bool is64bit) {
126  SetMemRefType(lir, is_load, kDalvikReg);
127
128  /*
129   * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
130   * access.
131   */
132  lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
133}
134
135/*
136 * Debugging macros
137 */
138#define DUMP_RESOURCE_MASK(X)
139
140/* Pretty-print a LIR instruction */
141void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
142  int offset = lir->offset;
143  int dest = lir->operands[0];
144  const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
145
146  /* Handle pseudo-ops individually, and all regular insns as a group */
147  switch (lir->opcode) {
148    case kPseudoMethodEntry:
149      LOG(INFO) << "-------- method entry "
150                << PrettyMethod(cu_->method_idx, *cu_->dex_file);
151      break;
152    case kPseudoMethodExit:
153      LOG(INFO) << "-------- Method_Exit";
154      break;
155    case kPseudoBarrier:
156      LOG(INFO) << "-------- BARRIER";
157      break;
158    case kPseudoEntryBlock:
159      LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
160      break;
161    case kPseudoDalvikByteCodeBoundary:
162      if (lir->operands[0] == 0) {
163         lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
164      }
165      LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
166                << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
167      break;
168    case kPseudoExitBlock:
169      LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
170      break;
171    case kPseudoPseudoAlign4:
172      LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
173                << offset << "): .align4";
174      break;
175    case kPseudoEHBlockLabel:
176      LOG(INFO) << "Exception_Handling:";
177      break;
178    case kPseudoTargetLabel:
179    case kPseudoNormalBlockLabel:
180      LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
181      break;
182    case kPseudoThrowTarget:
183      LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
184      break;
185    case kPseudoIntrinsicRetry:
186      LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
187      break;
188    case kPseudoSuspendTarget:
189      LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
190      break;
191    case kPseudoSafepointPC:
192      LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
193      break;
194    case kPseudoExportedPC:
195      LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
196      break;
197    case kPseudoCaseLabel:
198      LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
199                << std::hex << lir->operands[0] << "|" << std::dec <<
200        lir->operands[0];
201      break;
202    default:
203      if (lir->flags.is_nop && !dump_nop) {
204        break;
205      } else {
206        std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
207                                               lir, base_addr));
208        std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
209                                                    lir, base_addr));
210        LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
211                                  reinterpret_cast<unsigned int>(base_addr + offset),
212                                  op_name.c_str(), op_operands.c_str(),
213                                  lir->flags.is_nop ? "(nop)" : "");
214      }
215      break;
216  }
217
218  if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) {
219    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.use_mask, "use"));
220  }
221  if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) {
222    DUMP_RESOURCE_MASK(DumpResourceMask(lir, lir->u.m.def_mask, "def"));
223  }
224}
225
226void Mir2Lir::DumpPromotionMap() {
227  int num_regs = cu_->num_dalvik_registers + cu_->num_compiler_temps + 1;
228  for (int i = 0; i < num_regs; i++) {
229    PromotionMap v_reg_map = promotion_map_[i];
230    std::string buf;
231    if (v_reg_map.fp_location == kLocPhysReg) {
232      StringAppendF(&buf, " : s%d", v_reg_map.FpReg & FpRegMask());
233    }
234
235    std::string buf3;
236    if (i < cu_->num_dalvik_registers) {
237      StringAppendF(&buf3, "%02d", i);
238    } else if (i == mir_graph_->GetMethodSReg()) {
239      buf3 = "Method*";
240    } else {
241      StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
242    }
243
244    LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
245                              v_reg_map.core_location == kLocPhysReg ?
246                              "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
247                              v_reg_map.core_reg : SRegOffset(i),
248                              buf.c_str());
249  }
250}
251
252/* Dump a mapping table */
253void Mir2Lir::DumpMappingTable(const char* table_name, const char* descriptor,
254                               const char* name, const Signature& signature,
255                               const std::vector<uint32_t>& v) {
256  if (v.size() > 0) {
257    std::string line(StringPrintf("\n  %s %s%s_%s_table[%zu] = {", table_name,
258                     descriptor, name, signature.ToString().c_str(), v.size()));
259    std::replace(line.begin(), line.end(), ';', '_');
260    LOG(INFO) << line;
261    for (uint32_t i = 0; i < v.size(); i+=2) {
262      line = StringPrintf("    {0x%05x, 0x%04x},", v[i], v[i+1]);
263      LOG(INFO) << line;
264    }
265    LOG(INFO) <<"  };\n\n";
266  }
267}
268
269/* Dump instructions and constant pool contents */
270void Mir2Lir::CodegenDump() {
271  LOG(INFO) << "Dumping LIR insns for "
272            << PrettyMethod(cu_->method_idx, *cu_->dex_file);
273  LIR* lir_insn;
274  int insns_size = cu_->code_item->insns_size_in_code_units_;
275
276  LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
277  LOG(INFO) << "Ins          : " << cu_->num_ins;
278  LOG(INFO) << "Outs         : " << cu_->num_outs;
279  LOG(INFO) << "CoreSpills       : " << num_core_spills_;
280  LOG(INFO) << "FPSpills       : " << num_fp_spills_;
281  LOG(INFO) << "CompilerTemps    : " << cu_->num_compiler_temps;
282  LOG(INFO) << "Frame size       : " << frame_size_;
283  LOG(INFO) << "code size is " << total_size_ <<
284    " bytes, Dalvik size is " << insns_size * 2;
285  LOG(INFO) << "expansion factor: "
286            << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
287  DumpPromotionMap();
288  for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
289    DumpLIRInsn(lir_insn, 0);
290  }
291  for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
292    LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
293                              lir_insn->operands[0]);
294  }
295
296  const DexFile::MethodId& method_id =
297      cu_->dex_file->GetMethodId(cu_->method_idx);
298  const Signature signature = cu_->dex_file->GetMethodSignature(method_id);
299  const char* name = cu_->dex_file->GetMethodName(method_id);
300  const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
301
302  // Dump mapping tables
303  DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, pc2dex_mapping_table_);
304  DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, dex2pc_mapping_table_);
305}
306
307/*
308 * Search the existing constants in the literal pool for an exact or close match
309 * within specified delta (greater or equal to 0).
310 */
311LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
312  while (data_target) {
313    if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
314      return data_target;
315    data_target = data_target->next;
316  }
317  return NULL;
318}
319
320/* Search the existing constants in the literal pool for an exact wide match */
321LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
322  bool lo_match = false;
323  LIR* lo_target = NULL;
324  while (data_target) {
325    if (lo_match && (data_target->operands[0] == val_hi)) {
326      // Record high word in case we need to expand this later.
327      lo_target->operands[1] = val_hi;
328      return lo_target;
329    }
330    lo_match = false;
331    if (data_target->operands[0] == val_lo) {
332      lo_match = true;
333      lo_target = data_target;
334    }
335    data_target = data_target->next;
336  }
337  return NULL;
338}
339
340/*
341 * The following are building blocks to insert constants into the pool or
342 * instruction streams.
343 */
344
345/* Add a 32-bit constant to the constant pool */
346LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
347  /* Add the constant to the literal pool */
348  if (constant_list_p) {
349    LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData));
350    new_value->operands[0] = value;
351    new_value->next = *constant_list_p;
352    *constant_list_p = new_value;
353    estimated_native_code_size_ += sizeof(value);
354    return new_value;
355  }
356  return NULL;
357}
358
359/* Add a 64-bit constant to the constant pool or mixed with code */
360LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
361  AddWordData(constant_list_p, val_hi);
362  return AddWordData(constant_list_p, val_lo);
363}
364
365static void PushWord(std::vector<uint8_t>&buf, int data) {
366  buf.push_back(data & 0xff);
367  buf.push_back((data >> 8) & 0xff);
368  buf.push_back((data >> 16) & 0xff);
369  buf.push_back((data >> 24) & 0xff);
370}
371
372static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
373  while (buf.size() < offset) {
374    buf.push_back(0);
375  }
376}
377
378/* Write the literal pool to the output stream */
379void Mir2Lir::InstallLiteralPools() {
380  AlignBuffer(code_buffer_, data_offset_);
381  LIR* data_lir = literal_list_;
382  while (data_lir != NULL) {
383    PushWord(code_buffer_, data_lir->operands[0]);
384    data_lir = NEXT_LIR(data_lir);
385  }
386  // Push code and method literals, record offsets for the compiler to patch.
387  data_lir = code_literal_list_;
388  while (data_lir != NULL) {
389    uint32_t target = data_lir->operands[0];
390    cu_->compiler_driver->AddCodePatch(cu_->dex_file,
391                                       cu_->class_def_idx,
392                                       cu_->method_idx,
393                                       cu_->invoke_type,
394                                       target,
395                                       static_cast<InvokeType>(data_lir->operands[1]),
396                                       code_buffer_.size());
397    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
398    // unique based on target to ensure code deduplication works
399    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
400    PushWord(code_buffer_, unique_patch_value);
401    data_lir = NEXT_LIR(data_lir);
402  }
403  data_lir = method_literal_list_;
404  while (data_lir != NULL) {
405    uint32_t target = data_lir->operands[0];
406    cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
407                                         cu_->class_def_idx,
408                                         cu_->method_idx,
409                                         cu_->invoke_type,
410                                         target,
411                                         static_cast<InvokeType>(data_lir->operands[1]),
412                                         code_buffer_.size());
413    const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
414    // unique based on target to ensure code deduplication works
415    uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
416    PushWord(code_buffer_, unique_patch_value);
417    data_lir = NEXT_LIR(data_lir);
418  }
419}
420
421/* Write the switch tables to the output stream */
422void Mir2Lir::InstallSwitchTables() {
423  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
424  while (true) {
425    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
426    if (tab_rec == NULL) break;
427    AlignBuffer(code_buffer_, tab_rec->offset);
428    /*
429     * For Arm, our reference point is the address of the bx
430     * instruction that does the launch, so we have to subtract
431     * the auto pc-advance.  For other targets the reference point
432     * is a label, so we can use the offset as-is.
433     */
434    int bx_offset = INVALID_OFFSET;
435    switch (cu_->instruction_set) {
436      case kThumb2:
437        DCHECK(tab_rec->anchor->flags.fixup != kFixupNone);
438        bx_offset = tab_rec->anchor->offset + 4;
439        break;
440      case kX86:
441        bx_offset = 0;
442        break;
443      case kMips:
444        bx_offset = tab_rec->anchor->offset;
445        break;
446      default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
447    }
448    if (cu_->verbose) {
449      LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
450    }
451    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
452      const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
453      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
454        int disp = tab_rec->targets[elems]->offset - bx_offset;
455        if (cu_->verbose) {
456          LOG(INFO) << "  Case[" << elems << "] key: 0x"
457                    << std::hex << keys[elems] << ", disp: 0x"
458                    << std::hex << disp;
459        }
460        PushWord(code_buffer_, keys[elems]);
461        PushWord(code_buffer_,
462          tab_rec->targets[elems]->offset - bx_offset);
463      }
464    } else {
465      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
466                static_cast<int>(Instruction::kPackedSwitchSignature));
467      for (int elems = 0; elems < tab_rec->table[1]; elems++) {
468        int disp = tab_rec->targets[elems]->offset - bx_offset;
469        if (cu_->verbose) {
470          LOG(INFO) << "  Case[" << elems << "] disp: 0x"
471                    << std::hex << disp;
472        }
473        PushWord(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
474      }
475    }
476  }
477}
478
479/* Write the fill array dta to the output stream */
480void Mir2Lir::InstallFillArrayData() {
481  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
482  while (true) {
483    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
484    if (tab_rec == NULL) break;
485    AlignBuffer(code_buffer_, tab_rec->offset);
486    for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
487      code_buffer_.push_back(tab_rec->table[i] & 0xFF);
488      code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
489    }
490  }
491}
492
493static int AssignLiteralOffsetCommon(LIR* lir, int offset) {
494  for (; lir != NULL; lir = lir->next) {
495    lir->offset = offset;
496    offset += 4;
497  }
498  return offset;
499}
500
501// Make sure we have a code address for every declared catch entry
502bool Mir2Lir::VerifyCatchEntries() {
503  bool success = true;
504  for (std::set<uint32_t>::const_iterator it = mir_graph_->catches_.begin();
505       it != mir_graph_->catches_.end(); ++it) {
506    uint32_t dex_pc = *it;
507    bool found = false;
508    for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
509      if (dex_pc == dex2pc_mapping_table_[i+1]) {
510        found = true;
511        break;
512      }
513    }
514    if (!found) {
515      LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
516      success = false;
517    }
518  }
519  // Now, try in the other direction
520  for (size_t i = 0; i < dex2pc_mapping_table_.size(); i += 2) {
521    uint32_t dex_pc = dex2pc_mapping_table_[i+1];
522    if (mir_graph_->catches_.find(dex_pc) == mir_graph_->catches_.end()) {
523      LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
524      success = false;
525    }
526  }
527  if (!success) {
528    LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
529    LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
530              << dex2pc_mapping_table_.size()/2;
531  }
532  return success;
533}
534
535
536void Mir2Lir::CreateMappingTables() {
537  for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
538    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
539      pc2dex_mapping_table_.push_back(tgt_lir->offset);
540      pc2dex_mapping_table_.push_back(tgt_lir->dalvik_offset);
541    }
542    if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
543      dex2pc_mapping_table_.push_back(tgt_lir->offset);
544      dex2pc_mapping_table_.push_back(tgt_lir->dalvik_offset);
545    }
546  }
547  if (kIsDebugBuild) {
548    CHECK(VerifyCatchEntries());
549  }
550  CHECK_EQ(pc2dex_mapping_table_.size() & 1, 0U);
551  CHECK_EQ(dex2pc_mapping_table_.size() & 1, 0U);
552  uint32_t total_entries = (pc2dex_mapping_table_.size() + dex2pc_mapping_table_.size()) / 2;
553  uint32_t pc2dex_entries = pc2dex_mapping_table_.size() / 2;
554  encoded_mapping_table_.PushBack(total_entries);
555  encoded_mapping_table_.PushBack(pc2dex_entries);
556  encoded_mapping_table_.InsertBack(pc2dex_mapping_table_.begin(), pc2dex_mapping_table_.end());
557  encoded_mapping_table_.InsertBack(dex2pc_mapping_table_.begin(), dex2pc_mapping_table_.end());
558  if (kIsDebugBuild) {
559    // Verify the encoded table holds the expected data.
560    MappingTable table(&encoded_mapping_table_.GetData()[0]);
561    CHECK_EQ(table.TotalSize(), total_entries);
562    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
563    CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2);
564    MappingTable::PcToDexIterator it = table.PcToDexBegin();
565    for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) {
566      CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset());
567      ++i;
568      CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc());
569    }
570    MappingTable::DexToPcIterator it2 = table.DexToPcBegin();
571    for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) {
572      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset());
573      ++i;
574      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc());
575    }
576  }
577}
578
579class NativePcToReferenceMapBuilder {
580 public:
581  NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
582                                size_t entries, uint32_t max_native_offset,
583                                size_t references_width) : entries_(entries),
584                                references_width_(references_width), in_use_(entries),
585                                table_(table) {
586    // Compute width in bytes needed to hold max_native_offset.
587    native_offset_width_ = 0;
588    while (max_native_offset != 0) {
589      native_offset_width_++;
590      max_native_offset >>= 8;
591    }
592    // Resize table and set up header.
593    table->resize((EntryWidth() * entries) + sizeof(uint32_t));
594    CHECK_LT(native_offset_width_, 1U << 3);
595    (*table)[0] = native_offset_width_ & 7;
596    CHECK_LT(references_width_, 1U << 13);
597    (*table)[0] |= (references_width_ << 3) & 0xFF;
598    (*table)[1] = (references_width_ >> 5) & 0xFF;
599    CHECK_LT(entries, 1U << 16);
600    (*table)[2] = entries & 0xFF;
601    (*table)[3] = (entries >> 8) & 0xFF;
602  }
603
604  void AddEntry(uint32_t native_offset, const uint8_t* references) {
605    size_t table_index = TableIndex(native_offset);
606    while (in_use_[table_index]) {
607      table_index = (table_index + 1) % entries_;
608    }
609    in_use_[table_index] = true;
610    SetNativeOffset(table_index, native_offset);
611    DCHECK_EQ(native_offset, GetNativeOffset(table_index));
612    SetReferences(table_index, references);
613  }
614
615 private:
616  size_t TableIndex(uint32_t native_offset) {
617    return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
618  }
619
620  uint32_t GetNativeOffset(size_t table_index) {
621    uint32_t native_offset = 0;
622    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
623    for (size_t i = 0; i < native_offset_width_; i++) {
624      native_offset |= (*table_)[table_offset + i] << (i * 8);
625    }
626    return native_offset;
627  }
628
629  void SetNativeOffset(size_t table_index, uint32_t native_offset) {
630    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
631    for (size_t i = 0; i < native_offset_width_; i++) {
632      (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
633    }
634  }
635
636  void SetReferences(size_t table_index, const uint8_t* references) {
637    size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
638    memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
639  }
640
641  size_t EntryWidth() const {
642    return native_offset_width_ + references_width_;
643  }
644
645  // Number of entries in the table.
646  const size_t entries_;
647  // Number of bytes used to encode the reference bitmap.
648  const size_t references_width_;
649  // Number of bytes used to encode a native offset.
650  size_t native_offset_width_;
651  // Entries that are in use.
652  std::vector<bool> in_use_;
653  // The table we're building.
654  std::vector<uint8_t>* const table_;
655};
656
657void Mir2Lir::CreateNativeGcMap() {
658  const std::vector<uint32_t>& mapping_table = pc2dex_mapping_table_;
659  uint32_t max_native_offset = 0;
660  for (size_t i = 0; i < mapping_table.size(); i += 2) {
661    uint32_t native_offset = mapping_table[i + 0];
662    if (native_offset > max_native_offset) {
663      max_native_offset = native_offset;
664    }
665  }
666  MethodReference method_ref(cu_->dex_file, cu_->method_idx);
667  const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
668  verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
669  // Compute native offset to references size.
670  NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_,
671                                                      mapping_table.size() / 2, max_native_offset,
672                                                      dex_gc_map.RegWidth());
673
674  for (size_t i = 0; i < mapping_table.size(); i += 2) {
675    uint32_t native_offset = mapping_table[i + 0];
676    uint32_t dex_pc = mapping_table[i + 1];
677    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
678    CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
679    native_gc_map_builder.AddEntry(native_offset, references);
680  }
681}
682
683/* Determine the offset of each literal field */
684int Mir2Lir::AssignLiteralOffset(int offset) {
685  offset = AssignLiteralOffsetCommon(literal_list_, offset);
686  offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
687  offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
688  return offset;
689}
690
691int Mir2Lir::AssignSwitchTablesOffset(int offset) {
692  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
693  while (true) {
694    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
695    if (tab_rec == NULL) break;
696    tab_rec->offset = offset;
697    if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
698      offset += tab_rec->table[1] * (sizeof(int) * 2);
699    } else {
700      DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
701                static_cast<int>(Instruction::kPackedSwitchSignature));
702      offset += tab_rec->table[1] * sizeof(int);
703    }
704  }
705  return offset;
706}
707
708int Mir2Lir::AssignFillArrayDataOffset(int offset) {
709  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
710  while (true) {
711    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
712    if (tab_rec == NULL) break;
713    tab_rec->offset = offset;
714    offset += tab_rec->size;
715    // word align
716    offset = (offset + 3) & ~3;
717    }
718  return offset;
719}
720
721/*
722 * Insert a kPseudoCaseLabel at the beginning of the Dalvik
723 * offset vaddr if pretty-printing, otherise use the standard block
724 * label.  The selected label will be used to fix up the case
725 * branch table during the assembly phase.  All resource flags
726 * are set to prevent code motion.  KeyVal is just there for debugging.
727 */
728LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
729  LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
730  LIR* res = boundary_lir;
731  if (cu_->verbose) {
732    // Only pay the expense if we're pretty-printing.
733    LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
734    new_label->dalvik_offset = vaddr;
735    new_label->opcode = kPseudoCaseLabel;
736    new_label->operands[0] = keyVal;
737    new_label->flags.fixup = kFixupLabel;
738    DCHECK(!new_label->flags.use_def_invalid);
739    new_label->u.m.def_mask = ENCODE_ALL;
740    InsertLIRAfter(boundary_lir, new_label);
741    res = new_label;
742  }
743  return res;
744}
745
746void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
747  const uint16_t* table = tab_rec->table;
748  int base_vaddr = tab_rec->vaddr;
749  const int *targets = reinterpret_cast<const int*>(&table[4]);
750  int entries = table[1];
751  int low_key = s4FromSwitchData(&table[2]);
752  for (int i = 0; i < entries; i++) {
753    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
754  }
755}
756
757void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
758  const uint16_t* table = tab_rec->table;
759  int base_vaddr = tab_rec->vaddr;
760  int entries = table[1];
761  const int* keys = reinterpret_cast<const int*>(&table[2]);
762  const int* targets = &keys[entries];
763  for (int i = 0; i < entries; i++) {
764    tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
765  }
766}
767
768void Mir2Lir::ProcessSwitchTables() {
769  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
770  while (true) {
771    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
772    if (tab_rec == NULL) break;
773    if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
774      MarkPackedCaseLabels(tab_rec);
775    } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
776      MarkSparseCaseLabels(tab_rec);
777    } else {
778      LOG(FATAL) << "Invalid switch table";
779    }
780  }
781}
782
783void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
784  /*
785   * Sparse switch data format:
786   *  ushort ident = 0x0200   magic value
787   *  ushort size       number of entries in the table; > 0
788   *  int keys[size]      keys, sorted low-to-high; 32-bit aligned
789   *  int targets[size]     branch targets, relative to switch opcode
790   *
791   * Total size is (2+size*4) 16-bit code units.
792   */
793  uint16_t ident = table[0];
794  int entries = table[1];
795  const int* keys = reinterpret_cast<const int*>(&table[2]);
796  const int* targets = &keys[entries];
797  LOG(INFO) <<  "Sparse switch table - ident:0x" << std::hex << ident
798            << ", entries: " << std::dec << entries;
799  for (int i = 0; i < entries; i++) {
800    LOG(INFO) << "  Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
801  }
802}
803
804void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
805  /*
806   * Packed switch data format:
807   *  ushort ident = 0x0100   magic value
808   *  ushort size       number of entries in the table
809   *  int first_key       first (and lowest) switch case value
810   *  int targets[size]     branch targets, relative to switch opcode
811   *
812   * Total size is (4+size*2) 16-bit code units.
813   */
814  uint16_t ident = table[0];
815  const int* targets = reinterpret_cast<const int*>(&table[4]);
816  int entries = table[1];
817  int low_key = s4FromSwitchData(&table[2]);
818  LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
819            << ", entries: " << std::dec << entries << ", low_key: " << low_key;
820  for (int i = 0; i < entries; i++) {
821    LOG(INFO) << "  Key[" << (i + low_key) << "] -> 0x" << std::hex
822              << targets[i];
823  }
824}
825
826/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
827void Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
828  NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
829}
830
831bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
832  bool is_taken;
833  switch (opcode) {
834    case Instruction::IF_EQ: is_taken = (src1 == src2); break;
835    case Instruction::IF_NE: is_taken = (src1 != src2); break;
836    case Instruction::IF_LT: is_taken = (src1 < src2); break;
837    case Instruction::IF_GE: is_taken = (src1 >= src2); break;
838    case Instruction::IF_GT: is_taken = (src1 > src2); break;
839    case Instruction::IF_LE: is_taken = (src1 <= src2); break;
840    case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
841    case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
842    case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
843    case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
844    case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
845    case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
846    default:
847      LOG(FATAL) << "Unexpected opcode " << opcode;
848      is_taken = false;
849  }
850  return is_taken;
851}
852
853// Convert relation of src1/src2 to src2/src1
854ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
855  ConditionCode res;
856  switch (before) {
857    case kCondEq: res = kCondEq; break;
858    case kCondNe: res = kCondNe; break;
859    case kCondLt: res = kCondGt; break;
860    case kCondGt: res = kCondLt; break;
861    case kCondLe: res = kCondGe; break;
862    case kCondGe: res = kCondLe; break;
863    default:
864      res = static_cast<ConditionCode>(0);
865      LOG(FATAL) << "Unexpected ccode " << before;
866  }
867  return res;
868}
869
870// TODO: move to mir_to_lir.cc
871Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
872    : Backend(arena),
873      literal_list_(NULL),
874      method_literal_list_(NULL),
875      code_literal_list_(NULL),
876      first_fixup_(NULL),
877      cu_(cu),
878      mir_graph_(mir_graph),
879      switch_tables_(arena, 4, kGrowableArraySwitchTables),
880      fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
881      throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
882      suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
883      intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
884      tempreg_info_(arena, 20, kGrowableArrayMisc),
885      reginfo_map_(arena, 64, kGrowableArrayMisc),
886      data_offset_(0),
887      total_size_(0),
888      block_label_list_(NULL),
889      current_dalvik_offset_(0),
890      estimated_native_code_size_(0),
891      reg_pool_(NULL),
892      live_sreg_(0),
893      num_core_spills_(0),
894      num_fp_spills_(0),
895      frame_size_(0),
896      core_spill_mask_(0),
897      fp_spill_mask_(0),
898      first_lir_insn_(NULL),
899      last_lir_insn_(NULL) {
900  promotion_map_ = static_cast<PromotionMap*>
901      (arena_->Alloc((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
902                      sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
903}
904
905void Mir2Lir::Materialize() {
906  CompilerInitializeRegAlloc();  // Needs to happen after SSA naming
907
908  /* Allocate Registers using simple local allocation scheme */
909  SimpleRegAlloc();
910
911  if (mir_graph_->IsSpecialCase()) {
912      /*
913       * Custom codegen for special cases.  If for any reason the
914       * special codegen doesn't succeed, first_lir_insn_ will
915       * set to NULL;
916       */
917      SpecialMIR2LIR(mir_graph_->GetSpecialCase());
918    }
919
920  /* Convert MIR to LIR, etc. */
921  if (first_lir_insn_ == NULL) {
922    MethodMIR2LIR();
923  }
924
925  /* Method is not empty */
926  if (first_lir_insn_) {
927    // mark the targets of switch statement case labels
928    ProcessSwitchTables();
929
930    /* Convert LIR into machine code. */
931    AssembleLIR();
932
933    if (cu_->verbose) {
934      CodegenDump();
935    }
936  }
937}
938
939CompiledMethod* Mir2Lir::GetCompiledMethod() {
940  // Combine vmap tables - core regs, then fp regs - into vmap_table
941  std::vector<uint16_t> raw_vmap_table;
942  // Core regs may have been inserted out of order - sort first
943  std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
944  for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) {
945    // Copy, stripping out the phys register sort key
946    raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
947  }
948  // If we have a frame, push a marker to take place of lr
949  if (frame_size_ > 0) {
950    raw_vmap_table.push_back(INVALID_VREG);
951  } else {
952    DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
953    DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
954  }
955  // Combine vmap tables - core regs, then fp regs. fp regs already sorted
956  for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
957    raw_vmap_table.push_back(fp_vmap_table_[i]);
958  }
959  UnsignedLeb128EncodingVector vmap_encoder;
960  // Prefix the encoded data with its size.
961  vmap_encoder.PushBack(raw_vmap_table.size());
962  for (uint16_t cur : raw_vmap_table) {
963    vmap_encoder.PushBack(cur);
964  }
965  CompiledMethod* result =
966      new CompiledMethod(*cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
967                         core_spill_mask_, fp_spill_mask_, encoded_mapping_table_.GetData(),
968                         vmap_encoder.GetData(), native_gc_map_);
969  return result;
970}
971
972int Mir2Lir::ComputeFrameSize() {
973  /* Figure out the frame size */
974  static const uint32_t kAlignMask = kStackAlignment - 1;
975  uint32_t size = (num_core_spills_ + num_fp_spills_ +
976                   1 /* filler word */ + cu_->num_regs + cu_->num_outs +
977                   cu_->num_compiler_temps + 1 /* cur_method* */)
978                   * sizeof(uint32_t);
979  /* Align and set */
980  return (size + kAlignMask) & ~(kAlignMask);
981}
982
983/*
984 * Append an LIR instruction to the LIR list maintained by a compilation
985 * unit
986 */
987void Mir2Lir::AppendLIR(LIR* lir) {
988  if (first_lir_insn_ == NULL) {
989    DCHECK(last_lir_insn_ == NULL);
990    last_lir_insn_ = first_lir_insn_ = lir;
991    lir->prev = lir->next = NULL;
992  } else {
993    last_lir_insn_->next = lir;
994    lir->prev = last_lir_insn_;
995    lir->next = NULL;
996    last_lir_insn_ = lir;
997  }
998}
999
1000/*
1001 * Insert an LIR instruction before the current instruction, which cannot be the
1002 * first instruction.
1003 *
1004 * prev_lir <-> new_lir <-> current_lir
1005 */
1006void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
1007  DCHECK(current_lir->prev != NULL);
1008  LIR *prev_lir = current_lir->prev;
1009
1010  prev_lir->next = new_lir;
1011  new_lir->prev = prev_lir;
1012  new_lir->next = current_lir;
1013  current_lir->prev = new_lir;
1014}
1015
1016/*
1017 * Insert an LIR instruction after the current instruction, which cannot be the
1018 * first instruction.
1019 *
1020 * current_lir -> new_lir -> old_next
1021 */
1022void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
1023  new_lir->prev = current_lir;
1024  new_lir->next = current_lir->next;
1025  current_lir->next = new_lir;
1026  new_lir->next->prev = new_lir;
1027}
1028
1029}  // namespace art
1030