image_writer.cc revision 3f735bd4f9d09a0f9b2b01321e4c6917879dcae6
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "image_writer.h"
18
19#include <sys/stat.h>
20
21#include <memory>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/unix_file/fd_file.h"
26#include "class_linker.h"
27#include "compiled_method.h"
28#include "dex_file-inl.h"
29#include "driver/compiler_driver.h"
30#include "elf_file.h"
31#include "elf_utils.h"
32#include "elf_patcher.h"
33#include "elf_writer.h"
34#include "gc/accounting/card_table-inl.h"
35#include "gc/accounting/heap_bitmap.h"
36#include "gc/accounting/space_bitmap-inl.h"
37#include "gc/heap.h"
38#include "gc/space/large_object_space.h"
39#include "gc/space/space-inl.h"
40#include "globals.h"
41#include "image.h"
42#include "intern_table.h"
43#include "lock_word.h"
44#include "mirror/art_field-inl.h"
45#include "mirror/art_method-inl.h"
46#include "mirror/array-inl.h"
47#include "mirror/class-inl.h"
48#include "mirror/class_loader.h"
49#include "mirror/dex_cache-inl.h"
50#include "mirror/object-inl.h"
51#include "mirror/object_array-inl.h"
52#include "mirror/string-inl.h"
53#include "oat.h"
54#include "oat_file.h"
55#include "runtime.h"
56#include "scoped_thread_state_change.h"
57#include "handle_scope-inl.h"
58
59#include <numeric>
60
61using ::art::mirror::ArtField;
62using ::art::mirror::ArtMethod;
63using ::art::mirror::Class;
64using ::art::mirror::DexCache;
65using ::art::mirror::EntryPointFromInterpreter;
66using ::art::mirror::Object;
67using ::art::mirror::ObjectArray;
68using ::art::mirror::String;
69
70namespace art {
71
72// Separate objects into multiple bins to optimize dirty memory use.
73static constexpr bool kBinObjects = true;
74
75bool ImageWriter::Write(const std::string& image_filename,
76                        uintptr_t image_begin,
77                        const std::string& oat_filename,
78                        const std::string& oat_location,
79                        bool compile_pic) {
80  CHECK(!image_filename.empty());
81
82  CHECK_NE(image_begin, 0U);
83  image_begin_ = reinterpret_cast<byte*>(image_begin);
84  compile_pic_ = compile_pic;
85
86  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
87
88  target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
89  std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
90  if (oat_file.get() == NULL) {
91    LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
92    return false;
93  }
94  std::string error_msg;
95  oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, &error_msg);
96  if (oat_file_ == nullptr) {
97    LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
98        << ": " << error_msg;
99    return false;
100  }
101  CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_);
102
103  interpreter_to_interpreter_bridge_offset_ =
104      oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
105  interpreter_to_compiled_code_bridge_offset_ =
106      oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset();
107
108  jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset();
109
110  portable_imt_conflict_trampoline_offset_ =
111      oat_file_->GetOatHeader().GetPortableImtConflictTrampolineOffset();
112  portable_resolution_trampoline_offset_ =
113      oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
114  portable_to_interpreter_bridge_offset_ =
115      oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
116
117  quick_generic_jni_trampoline_offset_ =
118      oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset();
119  quick_imt_conflict_trampoline_offset_ =
120      oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset();
121  quick_resolution_trampoline_offset_ =
122      oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
123  quick_to_interpreter_bridge_offset_ =
124      oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset();
125  {
126    Thread::Current()->TransitionFromSuspendedToRunnable();
127    PruneNonImageClasses();  // Remove junk
128    ComputeLazyFieldsForImageClasses();  // Add useful information
129    ProcessStrings();
130    Thread::Current()->TransitionFromRunnableToSuspended(kNative);
131  }
132  gc::Heap* heap = Runtime::Current()->GetHeap();
133  heap->CollectGarbage(false);  // Remove garbage.
134
135  if (!AllocMemory()) {
136    return false;
137  }
138
139  if (kIsDebugBuild) {
140    ScopedObjectAccess soa(Thread::Current());
141    CheckNonImageClassesRemoved();
142  }
143
144  Thread::Current()->TransitionFromSuspendedToRunnable();
145  size_t oat_loaded_size = 0;
146  size_t oat_data_offset = 0;
147  ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset);
148  CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset);
149  CopyAndFixupObjects();
150
151  PatchOatCodeAndMethods(oat_file.get());
152
153  // Before flushing, which might fail, release the mutator lock.
154  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
155
156  if (oat_file->FlushCloseOrErase() != 0) {
157    LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location;
158    return false;
159  }
160
161  std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
162  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
163  if (image_file.get() == NULL) {
164    LOG(ERROR) << "Failed to open image file " << image_filename;
165    return false;
166  }
167  if (fchmod(image_file->Fd(), 0644) != 0) {
168    PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
169    image_file->Erase();
170    return EXIT_FAILURE;
171  }
172
173  // Write out the image.
174  CHECK_EQ(image_end_, image_header->GetImageSize());
175  if (!image_file->WriteFully(image_->Begin(), image_end_)) {
176    PLOG(ERROR) << "Failed to write image file " << image_filename;
177    image_file->Erase();
178    return false;
179  }
180
181  // Write out the image bitmap at the page aligned start of the image end.
182  CHECK_ALIGNED(image_header->GetImageBitmapOffset(), kPageSize);
183  if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
184                         image_header->GetImageBitmapSize(),
185                         image_header->GetImageBitmapOffset())) {
186    PLOG(ERROR) << "Failed to write image file " << image_filename;
187    image_file->Erase();
188    return false;
189  }
190
191  if (image_file->FlushCloseOrErase() != 0) {
192    PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
193    return false;
194  }
195  return true;
196}
197
198void ImageWriter::SetImageOffset(mirror::Object* object,
199                                 ImageWriter::BinSlot bin_slot,
200                                 size_t offset) {
201  DCHECK(object != nullptr);
202  DCHECK_NE(offset, 0U);
203  mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
204  DCHECK_ALIGNED(obj, kObjectAlignment);
205
206  image_bitmap_->Set(obj);  // Mark the obj as mutated, since we will end up changing it.
207  {
208    // Remember the object-inside-of-the-image's hash code so we can restore it after the copy.
209    auto hash_it = saved_hashes_map_.find(bin_slot);
210    if (hash_it != saved_hashes_map_.end()) {
211      std::pair<BinSlot, uint32_t> slot_hash = *hash_it;
212      saved_hashes_.push_back(std::make_pair(obj, slot_hash.second));
213      saved_hashes_map_.erase(hash_it);
214    }
215  }
216  // The object is already deflated from when we set the bin slot. Just overwrite the lock word.
217  object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
218  DCHECK(IsImageOffsetAssigned(object));
219}
220
221void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) {
222  DCHECK(object != nullptr);
223  DCHECK_NE(image_objects_offset_begin_, 0u);
224
225  size_t previous_bin_sizes = GetBinSizeSum(bin_slot.GetBin());  // sum sizes in [0..bin#)
226  size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
227  DCHECK_ALIGNED(new_offset, kObjectAlignment);
228
229  SetImageOffset(object, bin_slot, new_offset);
230  DCHECK_LT(new_offset, image_end_);
231}
232
233bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const {
234  // Will also return true if the bin slot was assigned since we are reusing the lock word.
235  DCHECK(object != nullptr);
236  return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress;
237}
238
239size_t ImageWriter::GetImageOffset(mirror::Object* object) const {
240  DCHECK(object != nullptr);
241  DCHECK(IsImageOffsetAssigned(object));
242  LockWord lock_word = object->GetLockWord(false);
243  size_t offset = lock_word.ForwardingAddress();
244  DCHECK_LT(offset, image_end_);
245  return offset;
246}
247
248void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
249  DCHECK(object != nullptr);
250  DCHECK(!IsImageOffsetAssigned(object));
251  DCHECK(!IsImageBinSlotAssigned(object));
252
253  // Before we stomp over the lock word, save the hash code for later.
254  Monitor::Deflate(Thread::Current(), object);;
255  LockWord lw(object->GetLockWord(false));
256  switch (lw.GetState()) {
257    case LockWord::kFatLocked: {
258      LOG(FATAL) << "Fat locked object " << object << " found during object copy";
259      break;
260    }
261    case LockWord::kThinLocked: {
262      LOG(FATAL) << "Thin locked object " << object << " found during object copy";
263      break;
264    }
265    case LockWord::kUnlocked:
266      // No hash, don't need to save it.
267      break;
268    case LockWord::kHashCode:
269      saved_hashes_map_[bin_slot] = lw.GetHashCode();
270      break;
271    default:
272      LOG(FATAL) << "Unreachable.";
273      break;
274  }
275  object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)),
276                      false);
277  DCHECK(IsImageBinSlotAssigned(object));
278}
279
280void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
281  DCHECK(object != nullptr);
282  size_t object_size;
283  if (object->IsArtMethod()) {
284    // Methods are sized based on the target pointer size.
285    object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
286  } else {
287    object_size = object->SizeOf();
288  }
289
290  // The magic happens here. We segregate objects into different bins based
291  // on how likely they are to get dirty at runtime.
292  //
293  // Likely-to-dirty objects get packed together into the same bin so that
294  // at runtime their page dirtiness ratio (how many dirty objects a page has) is
295  // maximized.
296  //
297  // This means more pages will stay either clean or shared dirty (with zygote) and
298  // the app will use less of its own (private) memory.
299  Bin bin = kBinRegular;
300
301  if (kBinObjects) {
302    //
303    // Changing the bin of an object is purely a memory-use tuning.
304    // It has no change on runtime correctness.
305    //
306    // Memory analysis has determined that the following types of objects get dirtied
307    // the most:
308    //
309    // * Class'es which are verified [their clinit runs only at runtime]
310    //   - classes in general [because their static fields get overwritten]
311    //   - initialized classes with all-final statics are unlikely to be ever dirty,
312    //     so bin them separately
313    // * Art Methods that are:
314    //   - native [their native entry point is not looked up until runtime]
315    //   - have declaring classes that aren't initialized
316    //            [their interpreter/quick entry points are trampolines until the class
317    //             becomes initialized]
318    //
319    // We also assume the following objects get dirtied either never or extremely rarely:
320    //  * Strings (they are immutable)
321    //  * Art methods that aren't native and have initialized declared classes
322    //
323    // We assume that "regular" bin objects are highly unlikely to become dirtied,
324    // so packing them together will not result in a noticeably tighter dirty-to-clean ratio.
325    //
326    if (object->IsClass()) {
327      bin = kBinClassVerified;
328      mirror::Class* klass = object->AsClass();
329
330      if (klass->GetStatus() == Class::kStatusInitialized) {
331        bin = kBinClassInitialized;
332
333        // If the class's static fields are all final, put it into a separate bin
334        // since it's very likely it will stay clean.
335        uint32_t num_static_fields = klass->NumStaticFields();
336        if (num_static_fields == 0) {
337          bin = kBinClassInitializedFinalStatics;
338        } else {
339          // Maybe all the statics are final?
340          bool all_final = true;
341          for (uint32_t i = 0; i < num_static_fields; ++i) {
342            ArtField* field = klass->GetStaticField(i);
343            if (!field->IsFinal()) {
344              all_final = false;
345              break;
346            }
347          }
348
349          if (all_final) {
350            bin = kBinClassInitializedFinalStatics;
351          }
352        }
353      }
354    } else if (object->IsArtMethod<kVerifyNone>()) {
355      mirror::ArtMethod* art_method = down_cast<ArtMethod*>(object);
356      if (art_method->IsNative()) {
357        bin = kBinArtMethodNative;
358      } else {
359        mirror::Class* declaring_class = art_method->GetDeclaringClass();
360        if (declaring_class->GetStatus() != Class::kStatusInitialized) {
361          bin = kBinArtMethodNotInitialized;
362        } else {
363          // This is highly unlikely to dirty since there's no entry points to mutate.
364          bin = kBinArtMethodsManagedInitialized;
365        }
366      }
367    } else if (object->GetClass<kVerifyNone>()->IsStringClass()) {
368      bin = kBinString;  // Strings are almost always immutable (except for object header).
369    }  // else bin = kBinRegular
370  }
371
372  size_t current_offset = bin_slot_sizes_[bin];  // How many bytes the current bin is at (aligned).
373  // Move the current bin size up to accomodate the object we just assigned a bin slot.
374  size_t offset_delta = RoundUp(object_size, kObjectAlignment);  // 64-bit alignment
375  bin_slot_sizes_[bin] += offset_delta;
376
377  BinSlot new_bin_slot(bin, current_offset);
378  SetImageBinSlot(object, new_bin_slot);
379
380  ++bin_slot_count_[bin];
381
382  DCHECK_LT(GetBinSizeSum(), image_->Size());
383
384  // Grow the image closer to the end by the object we just assigned.
385  image_end_ += offset_delta;
386  DCHECK_LT(image_end_, image_->Size());
387}
388
389bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const {
390  DCHECK(object != nullptr);
391
392  // We always stash the bin slot into a lockword, in the 'forwarding address' state.
393  // If it's in some other state, then we haven't yet assigned an image bin slot.
394  if (object->GetLockWord(false).GetState() != LockWord::kForwardingAddress) {
395    return false;
396  } else if (kIsDebugBuild) {
397    LockWord lock_word = object->GetLockWord(false);
398    size_t offset = lock_word.ForwardingAddress();
399    BinSlot bin_slot(offset);
400    DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()])
401      << "bin slot offset should not exceed the size of that bin";
402  }
403  return true;
404}
405
406ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const {
407  DCHECK(object != nullptr);
408  DCHECK(IsImageBinSlotAssigned(object));
409
410  LockWord lock_word = object->GetLockWord(false);
411  size_t offset = lock_word.ForwardingAddress();  // TODO: ForwardingAddress should be uint32_t
412  DCHECK_LE(offset, std::numeric_limits<uint32_t>::max());
413
414  BinSlot bin_slot(static_cast<uint32_t>(offset));
415  DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()]);
416
417  return bin_slot;
418}
419
420bool ImageWriter::AllocMemory() {
421  size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
422  std::string error_msg;
423  image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
424                                    true, &error_msg));
425  if (UNLIKELY(image_.get() == nullptr)) {
426    LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
427    return false;
428  }
429
430  // Create the image bitmap.
431  image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(),
432                                                                    length));
433  if (image_bitmap_.get() == nullptr) {
434    LOG(ERROR) << "Failed to allocate memory for image bitmap";
435    return false;
436  }
437  return true;
438}
439
440void ImageWriter::ComputeLazyFieldsForImageClasses() {
441  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
442  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
443}
444
445bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
446  Thread* self = Thread::Current();
447  StackHandleScope<1> hs(self);
448  mirror::Class::ComputeName(hs.NewHandle(c));
449  return true;
450}
451
452// Count the number of strings in the heap and put the result in arg as a size_t pointer.
453static void CountStringsCallback(Object* obj, void* arg)
454    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
455  if (obj->GetClass()->IsStringClass()) {
456    ++*reinterpret_cast<size_t*>(arg);
457  }
458}
459
460// Collect all the java.lang.String in the heap and put them in the output strings_ array.
461class StringCollector {
462 public:
463  StringCollector(Handle<mirror::ObjectArray<mirror::String>> strings, size_t index)
464      : strings_(strings), index_(index) {
465  }
466  static void Callback(Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
467    auto* collector = reinterpret_cast<StringCollector*>(arg);
468    if (obj->GetClass()->IsStringClass()) {
469      collector->strings_->SetWithoutChecks<false>(collector->index_++, obj->AsString());
470    }
471  }
472  size_t GetIndex() const {
473    return index_;
474  }
475
476 private:
477  Handle<mirror::ObjectArray<mirror::String>> strings_;
478  size_t index_;
479};
480
481// Compare strings based on length, used for sorting strings by length / reverse length.
482class StringLengthComparator {
483 public:
484  explicit StringLengthComparator(Handle<mirror::ObjectArray<mirror::String>> strings)
485      : strings_(strings) {
486  }
487  bool operator()(size_t a, size_t b) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
488    return strings_->GetWithoutChecks(a)->GetLength() < strings_->GetWithoutChecks(b)->GetLength();
489  }
490
491 private:
492  Handle<mirror::ObjectArray<mirror::String>> strings_;
493};
494
495// Normal string < comparison through the chars_ array.
496class SubstringComparator {
497 public:
498  explicit SubstringComparator(const std::vector<uint16_t>* const chars) : chars_(chars) {
499  }
500  bool operator()(const std::pair<size_t, size_t>& a, const std::pair<size_t, size_t>& b) {
501    return std::lexicographical_compare(chars_->begin() + a.first,
502                                        chars_->begin() + a.first + a.second,
503                                        chars_->begin() + b.first,
504                                        chars_->begin() + b.first + b.second);
505  }
506
507 private:
508  const std::vector<uint16_t>* const chars_;
509};
510
511void ImageWriter::ProcessStrings() {
512  size_t total_strings = 0;
513  gc::Heap* heap = Runtime::Current()->GetHeap();
514  ClassLinker* cl = Runtime::Current()->GetClassLinker();
515  {
516    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
517    heap->VisitObjects(CountStringsCallback, &total_strings);  // Count the strings.
518  }
519  Thread* self = Thread::Current();
520  StackHandleScope<1> hs(self);
521  auto strings = hs.NewHandle(cl->AllocStringArray(self, total_strings));
522  StringCollector string_collector(strings, 0U);
523  {
524    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
525    // Read strings into the array.
526    heap->VisitObjects(StringCollector::Callback, &string_collector);
527  }
528  // Some strings could have gotten freed if AllocStringArray caused a GC.
529  CHECK_LE(string_collector.GetIndex(), total_strings);
530  total_strings = string_collector.GetIndex();
531  size_t total_length = 0;
532  std::vector<size_t> reverse_sorted_strings;
533  for (size_t i = 0; i < total_strings; ++i) {
534    mirror::String* s = strings->GetWithoutChecks(i);
535    // Look up the string in the array.
536    total_length += s->GetLength();
537    reverse_sorted_strings.push_back(i);
538  }
539  // Sort by reverse length.
540  StringLengthComparator comparator(strings);
541  std::sort(reverse_sorted_strings.rbegin(), reverse_sorted_strings.rend(), comparator);
542  // Deduplicate prefixes and add strings to the char array.
543  std::vector<uint16_t> combined_chars(total_length, 0U);
544  size_t num_chars = 0;
545  // Characters of strings which are non equal prefix of another string (not the same string).
546  // We don't count the savings from equal strings since these would get interned later anyways.
547  size_t prefix_saved_chars = 0;
548  std::set<std::pair<size_t, size_t>, SubstringComparator> existing_strings((
549      SubstringComparator(&combined_chars)));
550  for (size_t i = 0; i < total_strings; ++i) {
551    mirror::String* s = strings->GetWithoutChecks(reverse_sorted_strings[i]);
552    // Add the string to the end of the char array.
553    size_t length = s->GetLength();
554    for (size_t j = 0; j < length; ++j) {
555      combined_chars[num_chars++] = s->CharAt(j);
556    }
557    // Try to see if the string exists as a prefix of an existing string.
558    size_t new_offset = 0;
559    std::pair<size_t, size_t> new_string(num_chars - length, length);
560    auto it = existing_strings.lower_bound(new_string);
561    bool is_prefix = true;
562    if (it == existing_strings.end()) {
563      is_prefix = false;
564    } else {
565      CHECK_LE(length, it->second);
566      for (size_t j = 0; j < length; ++j) {
567        if (combined_chars[it->first + j] != s->CharAt(j)) {
568          is_prefix = false;
569          break;
570        }
571      }
572    }
573    if (is_prefix) {
574      // Shares a prefix, set the offset to where the new offset will be.
575      new_offset = it->first;
576      // Remove the added chars.
577      num_chars -= length;
578      if (it->second != length) {
579        prefix_saved_chars += length;
580      }
581    } else {
582      new_offset = new_string.first;
583      existing_strings.insert(new_string);
584    }
585    s->SetOffset(new_offset);
586  }
587  // Allocate and update the char arrays.
588  auto* array = mirror::CharArray::Alloc(self, num_chars);
589  for (size_t i = 0; i < num_chars; ++i) {
590    array->SetWithoutChecks<false>(i, combined_chars[i]);
591  }
592  for (size_t i = 0; i < total_strings; ++i) {
593    strings->GetWithoutChecks(i)->SetArray(array);
594  }
595  LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
596      << total_length << " prefix saved chars=" << prefix_saved_chars;
597  ComputeEagerResolvedStrings();
598}
599
600void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
601  if (!obj->GetClass()->IsStringClass()) {
602    return;
603  }
604  mirror::String* string = obj->AsString();
605  const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
606  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
607  ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
608  size_t dex_cache_count = class_linker->GetDexCacheCount();
609  for (size_t i = 0; i < dex_cache_count; ++i) {
610    DexCache* dex_cache = class_linker->GetDexCache(i);
611    const DexFile& dex_file = *dex_cache->GetDexFile();
612    const DexFile::StringId* string_id;
613    if (UNLIKELY(string->GetLength() == 0)) {
614      string_id = dex_file.FindStringId("");
615    } else {
616      string_id = dex_file.FindStringId(utf16_string);
617    }
618    if (string_id != nullptr) {
619      // This string occurs in this dex file, assign the dex cache entry.
620      uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
621      if (dex_cache->GetResolvedString(string_idx) == NULL) {
622        dex_cache->SetResolvedString(string_idx, string);
623      }
624    }
625  }
626}
627
628void ImageWriter::ComputeEagerResolvedStrings() {
629  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
630  Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this);
631}
632
633bool ImageWriter::IsImageClass(Class* klass) {
634  std::string temp;
635  return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp));
636}
637
638struct NonImageClasses {
639  ImageWriter* image_writer;
640  std::set<std::string>* non_image_classes;
641};
642
643void ImageWriter::PruneNonImageClasses() {
644  if (compiler_driver_.GetImageClasses() == NULL) {
645    return;
646  }
647  Runtime* runtime = Runtime::Current();
648  ClassLinker* class_linker = runtime->GetClassLinker();
649
650  // Make a list of classes we would like to prune.
651  std::set<std::string> non_image_classes;
652  NonImageClasses context;
653  context.image_writer = this;
654  context.non_image_classes = &non_image_classes;
655  class_linker->VisitClasses(NonImageClassesVisitor, &context);
656
657  // Remove the undesired classes from the class roots.
658  for (const std::string& it : non_image_classes) {
659    bool result = class_linker->RemoveClass(it.c_str(), NULL);
660    DCHECK(result);
661  }
662
663  // Clear references to removed classes from the DexCaches.
664  ArtMethod* resolution_method = runtime->GetResolutionMethod();
665  ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
666  size_t dex_cache_count = class_linker->GetDexCacheCount();
667  for (size_t idx = 0; idx < dex_cache_count; ++idx) {
668    DexCache* dex_cache = class_linker->GetDexCache(idx);
669    for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
670      Class* klass = dex_cache->GetResolvedType(i);
671      if (klass != NULL && !IsImageClass(klass)) {
672        dex_cache->SetResolvedType(i, NULL);
673      }
674    }
675    for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
676      ArtMethod* method = dex_cache->GetResolvedMethod(i);
677      if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
678        dex_cache->SetResolvedMethod(i, resolution_method);
679      }
680    }
681    for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
682      ArtField* field = dex_cache->GetResolvedField(i);
683      if (field != NULL && !IsImageClass(field->GetDeclaringClass())) {
684        dex_cache->SetResolvedField(i, NULL);
685      }
686    }
687  }
688}
689
690bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
691  NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
692  if (!context->image_writer->IsImageClass(klass)) {
693    std::string temp;
694    context->non_image_classes->insert(klass->GetDescriptor(&temp));
695  }
696  return true;
697}
698
699void ImageWriter::CheckNonImageClassesRemoved() {
700  if (compiler_driver_.GetImageClasses() != nullptr) {
701    gc::Heap* heap = Runtime::Current()->GetHeap();
702    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
703    heap->VisitObjects(CheckNonImageClassesRemovedCallback, this);
704  }
705}
706
707void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
708  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
709  if (obj->IsClass()) {
710    Class* klass = obj->AsClass();
711    if (!image_writer->IsImageClass(klass)) {
712      image_writer->DumpImageClasses();
713      std::string temp;
714      CHECK(image_writer->IsImageClass(klass)) << klass->GetDescriptor(&temp)
715                                               << " " << PrettyDescriptor(klass);
716    }
717  }
718}
719
720void ImageWriter::DumpImageClasses() {
721  const std::set<std::string>* image_classes = compiler_driver_.GetImageClasses();
722  CHECK(image_classes != NULL);
723  for (const std::string& image_class : *image_classes) {
724    LOG(INFO) << " " << image_class;
725  }
726}
727
728void ImageWriter::CalculateObjectBinSlots(Object* obj) {
729  DCHECK(obj != NULL);
730  // if it is a string, we want to intern it if its not interned.
731  if (obj->GetClass()->IsStringClass()) {
732    // we must be an interned string that was forward referenced and already assigned
733    if (IsImageBinSlotAssigned(obj)) {
734      DCHECK_EQ(obj, obj->AsString()->Intern());
735      return;
736    }
737    mirror::String* const interned = obj->AsString()->Intern();
738    if (obj != interned) {
739      if (!IsImageBinSlotAssigned(interned)) {
740        // interned obj is after us, allocate its location early
741        AssignImageBinSlot(interned);
742      }
743      // point those looking for this object to the interned version.
744      SetImageBinSlot(obj, GetImageBinSlot(interned));
745      return;
746    }
747    // else (obj == interned), nothing to do but fall through to the normal case
748  }
749
750  AssignImageBinSlot(obj);
751}
752
753ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
754  Runtime* runtime = Runtime::Current();
755  ClassLinker* class_linker = runtime->GetClassLinker();
756  Thread* self = Thread::Current();
757  StackHandleScope<3> hs(self);
758  Handle<Class> object_array_class(hs.NewHandle(
759      class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
760
761  // build an Object[] of all the DexCaches used in the source_space_.
762  // Since we can't hold the dex lock when allocating the dex_caches
763  // ObjectArray, we lock the dex lock twice, first to get the number
764  // of dex caches first and then lock it again to copy the dex
765  // caches. We check that the number of dex caches does not change.
766  size_t dex_cache_count;
767  {
768    ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
769    dex_cache_count = class_linker->GetDexCacheCount();
770  }
771  Handle<ObjectArray<Object>> dex_caches(
772      hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(),
773                                              dex_cache_count)));
774  CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array.";
775  {
776    ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
777    CHECK_EQ(dex_cache_count, class_linker->GetDexCacheCount())
778        << "The number of dex caches changed.";
779    for (size_t i = 0; i < dex_cache_count; ++i) {
780      dex_caches->Set<false>(i, class_linker->GetDexCache(i));
781    }
782  }
783
784  // build an Object[] of the roots needed to restore the runtime
785  Handle<ObjectArray<Object>> image_roots(hs.NewHandle(
786      ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
787  image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
788  image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
789  image_roots->Set<false>(ImageHeader::kImtUnimplementedMethod,
790                          runtime->GetImtUnimplementedMethod());
791  image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
792  image_roots->Set<false>(ImageHeader::kCalleeSaveMethod,
793                          runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
794  image_roots->Set<false>(ImageHeader::kRefsOnlySaveMethod,
795                          runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
796  image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
797                          runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
798  image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
799  image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
800  for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
801    CHECK(image_roots->Get(i) != NULL);
802  }
803  return image_roots.Get();
804}
805
806// Walk instance fields of the given Class. Separate function to allow recursion on the super
807// class.
808void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
809  // Visit fields of parent classes first.
810  StackHandleScope<1> hs(Thread::Current());
811  Handle<mirror::Class> h_class(hs.NewHandle(klass));
812  mirror::Class* super = h_class->GetSuperClass();
813  if (super != nullptr) {
814    WalkInstanceFields(obj, super);
815  }
816  //
817  size_t num_reference_fields = h_class->NumReferenceInstanceFields();
818  MemberOffset field_offset = h_class->GetFirstReferenceInstanceFieldOffset();
819  for (size_t i = 0; i < num_reference_fields; ++i) {
820    mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
821    if (value != nullptr) {
822      WalkFieldsInOrder(value);
823    }
824    field_offset = MemberOffset(field_offset.Uint32Value() +
825                                sizeof(mirror::HeapReference<mirror::Object>));
826  }
827}
828
829// For an unvisited object, visit it then all its children found via fields.
830void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
831  // Use our own visitor routine (instead of GC visitor) to get better locality between
832  // an object and its fields
833  if (!IsImageBinSlotAssigned(obj)) {
834    // Walk instance fields of all objects
835    StackHandleScope<2> hs(Thread::Current());
836    Handle<mirror::Object> h_obj(hs.NewHandle(obj));
837    Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
838    // visit the object itself.
839    CalculateObjectBinSlots(h_obj.Get());
840    WalkInstanceFields(h_obj.Get(), klass.Get());
841    // Walk static fields of a Class.
842    if (h_obj->IsClass()) {
843      size_t num_static_fields = klass->NumReferenceStaticFields();
844      MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset();
845      for (size_t i = 0; i < num_static_fields; ++i) {
846        mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
847        if (value != nullptr) {
848          WalkFieldsInOrder(value);
849        }
850        field_offset = MemberOffset(field_offset.Uint32Value() +
851                                    sizeof(mirror::HeapReference<mirror::Object>));
852      }
853    } else if (h_obj->IsObjectArray()) {
854      // Walk elements of an object array.
855      int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
856      for (int32_t i = 0; i < length; i++) {
857        mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>();
858        mirror::Object* value = obj_array->Get(i);
859        if (value != nullptr) {
860          WalkFieldsInOrder(value);
861        }
862      }
863    }
864  }
865}
866
867void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
868  ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
869  DCHECK(writer != nullptr);
870  writer->WalkFieldsInOrder(obj);
871}
872
873void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) {
874  ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
875  DCHECK(writer != nullptr);
876  writer->UnbinObjectsIntoOffset(obj);
877}
878
879void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
880  CHECK(obj != nullptr);
881
882  // We know the bin slot, and the total bin sizes for all objects by now,
883  // so calculate the object's final image offset.
884
885  DCHECK(IsImageBinSlotAssigned(obj));
886  BinSlot bin_slot = GetImageBinSlot(obj);
887  // Change the lockword from a bin slot into an offset
888  AssignImageOffset(obj, bin_slot);
889}
890
891void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
892  CHECK_NE(0U, oat_loaded_size);
893  Thread* self = Thread::Current();
894  StackHandleScope<1> hs(self);
895  Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
896
897  gc::Heap* heap = Runtime::Current()->GetHeap();
898  DCHECK_EQ(0U, image_end_);
899
900  // Leave space for the header, but do not write it yet, we need to
901  // know where image_roots is going to end up
902  image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment);  // 64-bit-alignment
903
904  {
905    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
906    // TODO: Image spaces only?
907    const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
908    DCHECK_LT(image_end_, image_->Size());
909    image_objects_offset_begin_ = image_end_;
910    // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
911    heap->VisitObjects(WalkFieldsCallback, this);
912    // Transform each object's bin slot into an offset which will be used to do the final copy.
913    heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
914    DCHECK(saved_hashes_map_.empty());  // All binslot hashes should've been put into vector by now.
915    self->EndAssertNoThreadSuspension(old);
916  }
917
918  DCHECK_GT(image_end_, GetBinSizeSum());
919
920  if (kIsDebugBuild) {
921    LOG(INFO) << "Bin summary (total size: " << GetBinSizeSum() << "): ";
922    for (size_t bin = 0; bin < kBinSize; ++bin) {
923      LOG(INFO) << "  bin# " << bin << ", number objects: " << bin_slot_count_[bin] << ", "
924                << " total byte size: " << bin_slot_sizes_[bin];
925    }
926  }
927
928  const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize);
929  const byte* oat_file_end = oat_file_begin + oat_loaded_size;
930  oat_data_begin_ = oat_file_begin + oat_data_offset;
931  const byte* oat_data_end = oat_data_begin_ + oat_file_->Size();
932
933  // Return to write header at start of image with future location of image_roots. At this point,
934  // image_end_ is the size of the image (excluding bitmaps).
935  const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment;
936  const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) /
937      heap_bytes_per_bitmap_byte;
938  ImageHeader image_header(PointerToLowMemUInt32(image_begin_),
939                           static_cast<uint32_t>(image_end_),
940                           RoundUp(image_end_, kPageSize),
941                           RoundUp(bitmap_bytes, kPageSize),
942                           PointerToLowMemUInt32(GetImageAddress(image_roots.Get())),
943                           oat_file_->GetOatHeader().GetChecksum(),
944                           PointerToLowMemUInt32(oat_file_begin),
945                           PointerToLowMemUInt32(oat_data_begin_),
946                           PointerToLowMemUInt32(oat_data_end),
947                           PointerToLowMemUInt32(oat_file_end),
948                           compile_pic_);
949  memcpy(image_->Begin(), &image_header, sizeof(image_header));
950
951  // Note that image_end_ is left at end of used space
952}
953
954void ImageWriter::CopyAndFixupObjects() {
955  Thread* self = Thread::Current();
956  const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
957  gc::Heap* heap = Runtime::Current()->GetHeap();
958  // TODO: heap validation can't handle this fix up pass
959  heap->DisableObjectValidation();
960  // TODO: Image spaces only?
961  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
962  heap->VisitObjects(CopyAndFixupObjectsCallback, this);
963  // Fix up the object previously had hash codes.
964  for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
965    hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second), false);
966  }
967  saved_hashes_.clear();
968  self->EndAssertNoThreadSuspension(old_cause);
969}
970
971void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
972  DCHECK(obj != nullptr);
973  DCHECK(arg != nullptr);
974  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
975  // see GetLocalAddress for similar computation
976  size_t offset = image_writer->GetImageOffset(obj);
977  byte* dst = image_writer->image_->Begin() + offset;
978  const byte* src = reinterpret_cast<const byte*>(obj);
979  size_t n;
980  if (obj->IsArtMethod()) {
981    // Size without pointer fields since we don't want to overrun the buffer if target art method
982    // is 32 bits but source is 64 bits.
983    n = mirror::ArtMethod::SizeWithoutPointerFields();
984  } else {
985    n = obj->SizeOf();
986  }
987  DCHECK_LT(offset + n, image_writer->image_->Size());
988  memcpy(dst, src, n);
989  Object* copy = reinterpret_cast<Object*>(dst);
990  // Write in a hash code of objects which have inflated monitors or a hash code in their monitor
991  // word.
992  copy->SetLockWord(LockWord(), false);
993  image_writer->FixupObject(obj, copy);
994}
995
996// Rewrite all the references in the copied object to point to their image address equivalent
997class FixupVisitor {
998 public:
999  FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
1000  }
1001
1002  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
1003      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1004    Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
1005    // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
1006    // image.
1007    copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
1008        offset, image_writer_->GetImageAddress(ref));
1009  }
1010
1011  // java.lang.ref.Reference visitor.
1012  void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
1013      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1014      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1015    copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
1016        mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent()));
1017  }
1018
1019 protected:
1020  ImageWriter* const image_writer_;
1021  mirror::Object* const copy_;
1022};
1023
1024class FixupClassVisitor FINAL : public FixupVisitor {
1025 public:
1026  FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
1027  }
1028
1029  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
1030      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1031    DCHECK(obj->IsClass());
1032    FixupVisitor::operator()(obj, offset, /*is_static*/false);
1033
1034    // TODO: Remove dead code
1035    if (offset.Uint32Value() < mirror::Class::EmbeddedVTableOffset().Uint32Value()) {
1036      return;
1037    }
1038  }
1039
1040  void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
1041      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1042      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1043    LOG(FATAL) << "Reference not expected here.";
1044  }
1045};
1046
1047void ImageWriter::FixupObject(Object* orig, Object* copy) {
1048  DCHECK(orig != nullptr);
1049  DCHECK(copy != nullptr);
1050  if (kUseBakerOrBrooksReadBarrier) {
1051    orig->AssertReadBarrierPointer();
1052    if (kUseBrooksReadBarrier) {
1053      // Note the address 'copy' isn't the same as the image address of 'orig'.
1054      copy->SetReadBarrierPointer(GetImageAddress(orig));
1055      DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig));
1056    }
1057  }
1058  if (orig->IsClass() && orig->AsClass()->ShouldHaveEmbeddedImtAndVTable()) {
1059    FixupClassVisitor visitor(this, copy);
1060    orig->VisitReferences<true /*visit class*/>(visitor, visitor);
1061  } else {
1062    FixupVisitor visitor(this, copy);
1063    orig->VisitReferences<true /*visit class*/>(visitor, visitor);
1064  }
1065  if (orig->IsArtMethod<kVerifyNone>()) {
1066    FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
1067  } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) {
1068    // Set the right size for the target.
1069    size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
1070    down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size);
1071  }
1072}
1073
1074const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) {
1075  DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() &&
1076         !method->IsImtUnimplementedMethod() && !method->IsAbstract()) << PrettyMethod(method);
1077
1078  // Use original code if it exists. Otherwise, set the code pointer to the resolution
1079  // trampoline.
1080
1081  // Quick entrypoint:
1082  const byte* quick_code = GetOatAddress(method->GetQuickOatCodeOffset());
1083  *quick_is_interpreted = false;
1084  if (quick_code != nullptr &&
1085      (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) {
1086    // We have code for a non-static or initialized method, just use the code.
1087  } else if (quick_code == nullptr && method->IsNative() &&
1088      (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) {
1089    // Non-static or initialized native method missing compiled code, use generic JNI version.
1090    quick_code = GetOatAddress(quick_generic_jni_trampoline_offset_);
1091  } else if (quick_code == nullptr && !method->IsNative()) {
1092    // We don't have code at all for a non-native method, use the interpreter.
1093    quick_code = GetOatAddress(quick_to_interpreter_bridge_offset_);
1094    *quick_is_interpreted = true;
1095  } else {
1096    CHECK(!method->GetDeclaringClass()->IsInitialized());
1097    // We have code for a static method, but need to go through the resolution stub for class
1098    // initialization.
1099    quick_code = GetOatAddress(quick_resolution_trampoline_offset_);
1100  }
1101  return quick_code;
1102}
1103
1104const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) {
1105  // Calculate the quick entry point following the same logic as FixupMethod() below.
1106  // The resolution method has a special trampoline to call.
1107  Runtime* runtime = Runtime::Current();
1108  if (UNLIKELY(method == runtime->GetResolutionMethod())) {
1109    return GetOatAddress(quick_resolution_trampoline_offset_);
1110  } else if (UNLIKELY(method == runtime->GetImtConflictMethod() ||
1111                      method == runtime->GetImtUnimplementedMethod())) {
1112    return GetOatAddress(quick_imt_conflict_trampoline_offset_);
1113  } else {
1114    // We assume all methods have code. If they don't currently then we set them to the use the
1115    // resolution trampoline. Abstract methods never have code and so we need to make sure their
1116    // use results in an AbstractMethodError. We use the interpreter to achieve this.
1117    if (UNLIKELY(method->IsAbstract())) {
1118      return GetOatAddress(quick_to_interpreter_bridge_offset_);
1119    } else {
1120      bool quick_is_interpreted;
1121      return GetQuickCode(method, &quick_is_interpreted);
1122    }
1123  }
1124}
1125
1126void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
1127  // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
1128  // oat_begin_
1129  // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
1130  // locations.
1131  // Copy all of the fields from the runtime methods to the target methods first since we did a
1132  // bytewise copy earlier.
1133#if defined(ART_USE_PORTABLE_COMPILER)
1134  copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
1135      orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
1136#endif
1137  copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
1138                                                         target_ptr_size_);
1139  copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
1140  copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
1141      orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_);
1142  copy->SetNativeGcMapPtrSize<kVerifyNone>(orig->GetNativeGcMap(), target_ptr_size_);
1143
1144  // The resolution method has a special trampoline to call.
1145  Runtime* runtime = Runtime::Current();
1146  if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
1147#if defined(ART_USE_PORTABLE_COMPILER)
1148    copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
1149        GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
1150#endif
1151    copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
1152        GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
1153  } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
1154                      orig == runtime->GetImtUnimplementedMethod())) {
1155#if defined(ART_USE_PORTABLE_COMPILER)
1156    copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(
1157        GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
1158#endif
1159    copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
1160        GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
1161  } else {
1162    // We assume all methods have code. If they don't currently then we set them to the use the
1163    // resolution trampoline. Abstract methods never have code and so we need to make sure their
1164    // use results in an AbstractMethodError. We use the interpreter to achieve this.
1165    if (UNLIKELY(orig->IsAbstract())) {
1166#if defined(ART_USE_PORTABLE_COMPILER)
1167      copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(
1168          GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
1169#endif
1170      copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
1171          GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
1172      copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
1173          reinterpret_cast<EntryPointFromInterpreter*>(const_cast<byte*>(
1174              GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
1175    } else {
1176      bool quick_is_interpreted;
1177      const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted);
1178      copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
1179
1180      // Portable entrypoint:
1181      bool portable_is_interpreted = false;
1182#if defined(ART_USE_PORTABLE_COMPILER)
1183      const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
1184      if (portable_code != nullptr && (!orig->IsStatic() || orig->IsConstructor() ||
1185          orig->GetDeclaringClass()->IsInitialized())) {
1186        // We have code for a non-static or initialized method, just use the code.
1187      } else if (portable_code == nullptr && orig->IsNative() &&
1188          (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) {
1189        // Non-static or initialized native method missing compiled code, use generic JNI version.
1190        // TODO: generic JNI support for LLVM.
1191        portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
1192      } else if (portable_code == nullptr && !orig->IsNative()) {
1193        // We don't have code at all for a non-native method, use the interpreter.
1194        portable_code = GetOatAddress(portable_to_interpreter_bridge_offset_);
1195        portable_is_interpreted = true;
1196      } else {
1197        CHECK(!orig->GetDeclaringClass()->IsInitialized());
1198        // We have code for a static method, but need to go through the resolution stub for class
1199        // initialization.
1200        portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
1201      }
1202      copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
1203          portable_code, target_ptr_size_);
1204#endif
1205      // JNI entrypoint:
1206      if (orig->IsNative()) {
1207        // The native method's pointer is set to a stub to lookup via dlsym.
1208        // Note this is not the code_ pointer, that is handled above.
1209        copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
1210                                                       target_ptr_size_);
1211      } else {
1212        // Normal (non-abstract non-native) methods have various tables to relocate.
1213        uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
1214        const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
1215        copy->SetNativeGcMapPtrSize<kVerifyNone>(native_gc_map, target_ptr_size_);
1216      }
1217
1218      // Interpreter entrypoint:
1219      // Set the interpreter entrypoint depending on whether there is compiled code or not.
1220      uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
1221          ? interpreter_to_interpreter_bridge_offset_
1222          : interpreter_to_compiled_code_bridge_offset_;
1223      EntryPointFromInterpreter* interpreter_entrypoint =
1224          reinterpret_cast<EntryPointFromInterpreter*>(
1225              const_cast<byte*>(GetOatAddress(interpreter_code)));
1226      copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
1227          interpreter_entrypoint, target_ptr_size_);
1228    }
1229  }
1230}
1231
1232static OatHeader* GetOatHeaderFromElf(ElfFile* elf) {
1233  Elf32_Shdr* data_sec = elf->FindSectionByName(".rodata");
1234  if (data_sec == nullptr) {
1235    return nullptr;
1236  }
1237  return reinterpret_cast<OatHeader*>(elf->Begin() + data_sec->sh_offset);
1238}
1239
1240void ImageWriter::PatchOatCodeAndMethods(File* elf_file) {
1241  std::string error_msg;
1242  std::unique_ptr<ElfFile> elf(ElfFile::Open(elf_file, PROT_READ|PROT_WRITE,
1243                                             MAP_SHARED, &error_msg));
1244  if (elf.get() == nullptr) {
1245    LOG(FATAL) << "Unable patch oat file: " << error_msg;
1246    return;
1247  }
1248  if (!ElfPatcher::Patch(&compiler_driver_, elf.get(), oat_file_,
1249                         reinterpret_cast<uintptr_t>(oat_data_begin_),
1250                         GetImageAddressCallback, reinterpret_cast<void*>(this),
1251                         &error_msg)) {
1252    LOG(FATAL) << "unable to patch oat file: " << error_msg;
1253    return;
1254  }
1255  OatHeader* oat_header = GetOatHeaderFromElf(elf.get());
1256  CHECK(oat_header != nullptr);
1257  CHECK(oat_header->IsValid());
1258
1259  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
1260  image_header->SetOatChecksum(oat_header->GetChecksum());
1261}
1262
1263size_t ImageWriter::GetBinSizeSum(ImageWriter::Bin up_to) const {
1264  DCHECK_LE(up_to, kBinSize);
1265  return std::accumulate(&bin_slot_sizes_[0], &bin_slot_sizes_[up_to], /*init*/0);
1266}
1267
1268ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) {
1269  // These values may need to get updated if more bins are added to the enum Bin
1270  static_assert(kBinBits == 3, "wrong number of bin bits");
1271  static_assert(kBinShift == 29, "wrong number of shift");
1272  static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes");
1273
1274  DCHECK_LT(GetBin(), kBinSize);
1275  DCHECK_ALIGNED(GetIndex(), kObjectAlignment);
1276}
1277
1278ImageWriter::BinSlot::BinSlot(Bin bin, uint32_t index)
1279    : BinSlot(index | (static_cast<uint32_t>(bin) << kBinShift)) {
1280  DCHECK_EQ(index, GetIndex());
1281}
1282
1283ImageWriter::Bin ImageWriter::BinSlot::GetBin() const {
1284  return static_cast<Bin>((lockword_ & kBinMask) >> kBinShift);
1285}
1286
1287uint32_t ImageWriter::BinSlot::GetIndex() const {
1288  return lockword_ & ~kBinMask;
1289}
1290
1291}  // namespace art
1292