image_writer.cc revision e9e3e697f0c426132bee10aaa6aee9107d2d7dc6
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "image_writer.h"
18
19#include <sys/stat.h>
20
21#include <memory>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/unix_file/fd_file.h"
26#include "class_linker.h"
27#include "compiled_method.h"
28#include "dex_file-inl.h"
29#include "driver/compiler_driver.h"
30#include "elf_writer.h"
31#include "gc/accounting/card_table-inl.h"
32#include "gc/accounting/heap_bitmap.h"
33#include "gc/accounting/space_bitmap-inl.h"
34#include "gc/heap.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
37#include "globals.h"
38#include "image.h"
39#include "intern_table.h"
40#include "lock_word.h"
41#include "mirror/art_field-inl.h"
42#include "mirror/art_method-inl.h"
43#include "mirror/array-inl.h"
44#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache-inl.h"
47#include "mirror/object-inl.h"
48#include "mirror/object_array-inl.h"
49#include "mirror/string-inl.h"
50#include "oat.h"
51#include "oat_file.h"
52#include "object_utils.h"
53#include "runtime.h"
54#include "scoped_thread_state_change.h"
55#include "handle_scope-inl.h"
56#include "utils.h"
57
58using ::art::mirror::ArtField;
59using ::art::mirror::ArtMethod;
60using ::art::mirror::Class;
61using ::art::mirror::DexCache;
62using ::art::mirror::EntryPointFromInterpreter;
63using ::art::mirror::Object;
64using ::art::mirror::ObjectArray;
65using ::art::mirror::String;
66
67namespace art {
68
69bool ImageWriter::Write(const std::string& image_filename,
70                        uintptr_t image_begin,
71                        const std::string& oat_filename,
72                        const std::string& oat_location) {
73  CHECK(!image_filename.empty());
74
75  CHECK_NE(image_begin, 0U);
76  image_begin_ = reinterpret_cast<byte*>(image_begin);
77
78  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
79
80  std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
81  if (oat_file.get() == NULL) {
82    LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
83    return false;
84  }
85  std::string error_msg;
86  oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location, &error_msg);
87  if (oat_file_ == nullptr) {
88    LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
89        << ": " << error_msg;
90    return false;
91  }
92  CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_);
93
94  interpreter_to_interpreter_bridge_offset_ =
95      oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
96  interpreter_to_compiled_code_bridge_offset_ =
97      oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset();
98
99  jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset();
100
101  portable_imt_conflict_trampoline_offset_ =
102      oat_file_->GetOatHeader().GetPortableImtConflictTrampolineOffset();
103  portable_resolution_trampoline_offset_ =
104      oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
105  portable_to_interpreter_bridge_offset_ =
106      oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
107
108  quick_generic_jni_trampoline_offset_ =
109      oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset();
110  quick_imt_conflict_trampoline_offset_ =
111      oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset();
112  quick_resolution_trampoline_offset_ =
113      oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
114  quick_to_interpreter_bridge_offset_ =
115      oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset();
116  {
117    Thread::Current()->TransitionFromSuspendedToRunnable();
118    PruneNonImageClasses();  // Remove junk
119    ComputeLazyFieldsForImageClasses();  // Add useful information
120    ComputeEagerResolvedStrings();
121    Thread::Current()->TransitionFromRunnableToSuspended(kNative);
122  }
123  gc::Heap* heap = Runtime::Current()->GetHeap();
124  heap->CollectGarbage(false);  // Remove garbage.
125
126  if (!AllocMemory()) {
127    return false;
128  }
129
130  if (kIsDebugBuild) {
131    ScopedObjectAccess soa(Thread::Current());
132    CheckNonImageClassesRemoved();
133  }
134
135  Thread::Current()->TransitionFromSuspendedToRunnable();
136  size_t oat_loaded_size = 0;
137  size_t oat_data_offset = 0;
138  ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset);
139  CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset);
140  CopyAndFixupObjects();
141  PatchOatCodeAndMethods();
142  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
143
144  std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
145  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
146  if (image_file.get() == NULL) {
147    LOG(ERROR) << "Failed to open image file " << image_filename;
148    return false;
149  }
150  if (fchmod(image_file->Fd(), 0644) != 0) {
151    PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
152    return EXIT_FAILURE;
153  }
154
155  // Write out the image.
156  CHECK_EQ(image_end_, image_header->GetImageSize());
157  if (!image_file->WriteFully(image_->Begin(), image_end_)) {
158    PLOG(ERROR) << "Failed to write image file " << image_filename;
159    return false;
160  }
161
162  // Write out the image bitmap at the page aligned start of the image end.
163  CHECK_ALIGNED(image_header->GetImageBitmapOffset(), kPageSize);
164  if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
165                         image_header->GetImageBitmapSize(),
166                         image_header->GetImageBitmapOffset())) {
167    PLOG(ERROR) << "Failed to write image file " << image_filename;
168    return false;
169  }
170
171  return true;
172}
173
174void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
175  DCHECK(object != nullptr);
176  DCHECK_NE(offset, 0U);
177  DCHECK(!IsImageOffsetAssigned(object));
178  mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
179  DCHECK_ALIGNED(obj, kObjectAlignment);
180  image_bitmap_->Set(obj);
181  // Before we stomp over the lock word, save the hash code for later.
182  Monitor::Deflate(Thread::Current(), object);;
183  LockWord lw(object->GetLockWord(false));
184  switch (lw.GetState()) {
185    case LockWord::kFatLocked: {
186      LOG(FATAL) << "Fat locked object " << obj << " found during object copy";
187      break;
188    }
189    case LockWord::kThinLocked: {
190      LOG(FATAL) << "Thin locked object " << obj << " found during object copy";
191      break;
192    }
193    case LockWord::kUnlocked:
194      // No hash, don't need to save it.
195      break;
196    case LockWord::kHashCode:
197      saved_hashes_.push_back(std::make_pair(obj, lw.GetHashCode()));
198      break;
199    default:
200      LOG(FATAL) << "Unreachable.";
201      break;
202  }
203  object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
204  DCHECK(IsImageOffsetAssigned(object));
205}
206
207void ImageWriter::AssignImageOffset(mirror::Object* object) {
208  DCHECK(object != nullptr);
209  SetImageOffset(object, image_end_);
210  image_end_ += RoundUp(object->SizeOf(), 8);  // 64-bit alignment
211  DCHECK_LT(image_end_, image_->Size());
212}
213
214bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const {
215  DCHECK(object != nullptr);
216  return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress;
217}
218
219size_t ImageWriter::GetImageOffset(mirror::Object* object) const {
220  DCHECK(object != nullptr);
221  DCHECK(IsImageOffsetAssigned(object));
222  LockWord lock_word = object->GetLockWord(false);
223  size_t offset = lock_word.ForwardingAddress();
224  DCHECK_LT(offset, image_end_);
225  return offset;
226}
227
228bool ImageWriter::AllocMemory() {
229  size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
230  std::string error_msg;
231  image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
232                                    true, &error_msg));
233  if (UNLIKELY(image_.get() == nullptr)) {
234    LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
235    return false;
236  }
237
238  // Create the image bitmap.
239  image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(),
240                                                                    length));
241  if (image_bitmap_.get() == nullptr) {
242    LOG(ERROR) << "Failed to allocate memory for image bitmap";
243    return false;
244  }
245  return true;
246}
247
248void ImageWriter::ComputeLazyFieldsForImageClasses() {
249  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
250  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
251}
252
253bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
254  Thread* self = Thread::Current();
255  StackHandleScope<1> hs(self);
256  mirror::Class::ComputeName(hs.NewHandle(c));
257  return true;
258}
259
260void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
261  if (!obj->GetClass()->IsStringClass()) {
262    return;
263  }
264  mirror::String* string = obj->AsString();
265  const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
266  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
267  ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
268  size_t dex_cache_count = class_linker->GetDexCacheCount();
269  for (size_t i = 0; i < dex_cache_count; ++i) {
270    DexCache* dex_cache = class_linker->GetDexCache(i);
271    const DexFile& dex_file = *dex_cache->GetDexFile();
272    const DexFile::StringId* string_id;
273    if (UNLIKELY(string->GetLength() == 0)) {
274      string_id = dex_file.FindStringId("");
275    } else {
276      string_id = dex_file.FindStringId(utf16_string);
277    }
278    if (string_id != nullptr) {
279      // This string occurs in this dex file, assign the dex cache entry.
280      uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
281      if (dex_cache->GetResolvedString(string_idx) == NULL) {
282        dex_cache->SetResolvedString(string_idx, string);
283      }
284    }
285  }
286}
287
288void ImageWriter::ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
289  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
290  Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this);
291}
292
293bool ImageWriter::IsImageClass(Class* klass) {
294  return compiler_driver_.IsImageClass(klass->GetDescriptor().c_str());
295}
296
297struct NonImageClasses {
298  ImageWriter* image_writer;
299  std::set<std::string>* non_image_classes;
300};
301
302void ImageWriter::PruneNonImageClasses() {
303  if (compiler_driver_.GetImageClasses() == NULL) {
304    return;
305  }
306  Runtime* runtime = Runtime::Current();
307  ClassLinker* class_linker = runtime->GetClassLinker();
308
309  // Make a list of classes we would like to prune.
310  std::set<std::string> non_image_classes;
311  NonImageClasses context;
312  context.image_writer = this;
313  context.non_image_classes = &non_image_classes;
314  class_linker->VisitClasses(NonImageClassesVisitor, &context);
315
316  // Remove the undesired classes from the class roots.
317  for (const std::string& it : non_image_classes) {
318    class_linker->RemoveClass(it.c_str(), NULL);
319  }
320
321  // Clear references to removed classes from the DexCaches.
322  ArtMethod* resolution_method = runtime->GetResolutionMethod();
323  ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
324  size_t dex_cache_count = class_linker->GetDexCacheCount();
325  for (size_t idx = 0; idx < dex_cache_count; ++idx) {
326    DexCache* dex_cache = class_linker->GetDexCache(idx);
327    for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
328      Class* klass = dex_cache->GetResolvedType(i);
329      if (klass != NULL && !IsImageClass(klass)) {
330        dex_cache->SetResolvedType(i, NULL);
331      }
332    }
333    for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
334      ArtMethod* method = dex_cache->GetResolvedMethod(i);
335      if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
336        dex_cache->SetResolvedMethod(i, resolution_method);
337      }
338    }
339    for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
340      ArtField* field = dex_cache->GetResolvedField(i);
341      if (field != NULL && !IsImageClass(field->GetDeclaringClass())) {
342        dex_cache->SetResolvedField(i, NULL);
343      }
344    }
345  }
346}
347
348bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) {
349  NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg);
350  if (!context->image_writer->IsImageClass(klass)) {
351    context->non_image_classes->insert(klass->GetDescriptor());
352  }
353  return true;
354}
355
356void ImageWriter::CheckNonImageClassesRemoved()
357    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
358  if (compiler_driver_.GetImageClasses() != nullptr) {
359    gc::Heap* heap = Runtime::Current()->GetHeap();
360    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
361    heap->VisitObjects(CheckNonImageClassesRemovedCallback, this);
362  }
363}
364
365void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
366  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
367  if (obj->IsClass()) {
368    Class* klass = obj->AsClass();
369    if (!image_writer->IsImageClass(klass)) {
370      image_writer->DumpImageClasses();
371      CHECK(image_writer->IsImageClass(klass)) << klass->GetDescriptor()
372                                               << " " << PrettyDescriptor(klass);
373    }
374  }
375}
376
377void ImageWriter::DumpImageClasses() {
378  CompilerDriver::DescriptorSet* image_classes = compiler_driver_.GetImageClasses();
379  CHECK(image_classes != NULL);
380  for (const std::string& image_class : *image_classes) {
381    LOG(INFO) << " " << image_class;
382  }
383}
384
385void ImageWriter::CalculateObjectOffsets(Object* obj) {
386  DCHECK(obj != NULL);
387  // if it is a string, we want to intern it if its not interned.
388  if (obj->GetClass()->IsStringClass()) {
389    // we must be an interned string that was forward referenced and already assigned
390    if (IsImageOffsetAssigned(obj)) {
391      DCHECK_EQ(obj, obj->AsString()->Intern());
392      return;
393    }
394    mirror::String* const interned = obj->AsString()->Intern();
395    if (obj != interned) {
396      if (!IsImageOffsetAssigned(interned)) {
397        // interned obj is after us, allocate its location early
398        AssignImageOffset(interned);
399      }
400      // point those looking for this object to the interned version.
401      SetImageOffset(obj, GetImageOffset(interned));
402      return;
403    }
404    // else (obj == interned), nothing to do but fall through to the normal case
405  }
406
407  AssignImageOffset(obj);
408}
409
410ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
411  Runtime* runtime = Runtime::Current();
412  ClassLinker* class_linker = runtime->GetClassLinker();
413  Thread* self = Thread::Current();
414  StackHandleScope<3> hs(self);
415  Handle<Class> object_array_class(hs.NewHandle(
416      class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
417
418  // build an Object[] of all the DexCaches used in the source_space_.
419  // Since we can't hold the dex lock when allocating the dex_caches
420  // ObjectArray, we lock the dex lock twice, first to get the number
421  // of dex caches first and then lock it again to copy the dex
422  // caches. We check that the number of dex caches does not change.
423  size_t dex_cache_count;
424  {
425    ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
426    dex_cache_count = class_linker->GetDexCacheCount();
427  }
428  Handle<ObjectArray<Object>> dex_caches(
429      hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(),
430                                              dex_cache_count)));
431  CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array.";
432  {
433    ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
434    CHECK_EQ(dex_cache_count, class_linker->GetDexCacheCount())
435        << "The number of dex caches changed.";
436    for (size_t i = 0; i < dex_cache_count; ++i) {
437      dex_caches->Set<false>(i, class_linker->GetDexCache(i));
438    }
439  }
440
441  // build an Object[] of the roots needed to restore the runtime
442  Handle<ObjectArray<Object>> image_roots(hs.NewHandle(
443      ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
444  image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
445  image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
446  image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
447  image_roots->Set<false>(ImageHeader::kCalleeSaveMethod,
448                          runtime->GetCalleeSaveMethod(Runtime::kSaveAll));
449  image_roots->Set<false>(ImageHeader::kRefsOnlySaveMethod,
450                          runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
451  image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
452                          runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
453  image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
454  image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
455  for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
456    CHECK(image_roots->Get(i) != NULL);
457  }
458  return image_roots.Get();
459}
460
461// Walk instance fields of the given Class. Separate function to allow recursion on the super
462// class.
463void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
464  // Visit fields of parent classes first.
465  StackHandleScope<1> hs(Thread::Current());
466  Handle<mirror::Class> h_class(hs.NewHandle(klass));
467  mirror::Class* super = h_class->GetSuperClass();
468  if (super != nullptr) {
469    WalkInstanceFields(obj, super);
470  }
471  //
472  size_t num_reference_fields = h_class->NumReferenceInstanceFields();
473  for (size_t i = 0; i < num_reference_fields; ++i) {
474    mirror::ArtField* field = h_class->GetInstanceField(i);
475    MemberOffset field_offset = field->GetOffset();
476    mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
477    if (value != nullptr) {
478      WalkFieldsInOrder(value);
479    }
480  }
481}
482
483// For an unvisited object, visit it then all its children found via fields.
484void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
485  if (!IsImageOffsetAssigned(obj)) {
486    // Walk instance fields of all objects
487    StackHandleScope<2> hs(Thread::Current());
488    Handle<mirror::Object> h_obj(hs.NewHandle(obj));
489    Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
490    // visit the object itself.
491    CalculateObjectOffsets(h_obj.Get());
492    WalkInstanceFields(h_obj.Get(), klass.Get());
493    // Walk static fields of a Class.
494    if (h_obj->IsClass()) {
495      size_t num_static_fields = klass->NumReferenceStaticFields();
496      for (size_t i = 0; i < num_static_fields; ++i) {
497        mirror::ArtField* field = klass->GetStaticField(i);
498        MemberOffset field_offset = field->GetOffset();
499        mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
500        if (value != nullptr) {
501          WalkFieldsInOrder(value);
502        }
503      }
504    } else if (h_obj->IsObjectArray()) {
505      // Walk elements of an object array.
506      int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
507      for (int32_t i = 0; i < length; i++) {
508        mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>();
509        mirror::Object* value = obj_array->Get(i);
510        if (value != nullptr) {
511          WalkFieldsInOrder(value);
512        }
513      }
514    }
515  }
516}
517
518void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
519  ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
520  DCHECK(writer != nullptr);
521  writer->WalkFieldsInOrder(obj);
522}
523
524void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
525  CHECK_NE(0U, oat_loaded_size);
526  Thread* self = Thread::Current();
527  StackHandleScope<1> hs(self);
528  Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
529
530  gc::Heap* heap = Runtime::Current()->GetHeap();
531  DCHECK_EQ(0U, image_end_);
532
533  // Leave space for the header, but do not write it yet, we need to
534  // know where image_roots is going to end up
535  image_end_ += RoundUp(sizeof(ImageHeader), 8);  // 64-bit-alignment
536
537  {
538    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
539    // TODO: Image spaces only?
540    const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
541    DCHECK_LT(image_end_, image_->Size());
542    // Clear any pre-existing monitors which may have been in the monitor words.
543    heap->VisitObjects(WalkFieldsCallback, this);
544    self->EndAssertNoThreadSuspension(old);
545  }
546
547  const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize);
548  const byte* oat_file_end = oat_file_begin + oat_loaded_size;
549  oat_data_begin_ = oat_file_begin + oat_data_offset;
550  const byte* oat_data_end = oat_data_begin_ + oat_file_->Size();
551
552  // Return to write header at start of image with future location of image_roots. At this point,
553  // image_end_ is the size of the image (excluding bitmaps).
554  const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment;
555  const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) /
556      heap_bytes_per_bitmap_byte;
557  ImageHeader image_header(PointerToLowMemUInt32(image_begin_),
558                           static_cast<uint32_t>(image_end_),
559                           RoundUp(image_end_, kPageSize),
560                           RoundUp(bitmap_bytes, kPageSize),
561                           PointerToLowMemUInt32(GetImageAddress(image_roots.Get())),
562                           oat_file_->GetOatHeader().GetChecksum(),
563                           PointerToLowMemUInt32(oat_file_begin),
564                           PointerToLowMemUInt32(oat_data_begin_),
565                           PointerToLowMemUInt32(oat_data_end),
566                           PointerToLowMemUInt32(oat_file_end));
567  memcpy(image_->Begin(), &image_header, sizeof(image_header));
568
569  // Note that image_end_ is left at end of used space
570}
571
572void ImageWriter::CopyAndFixupObjects()
573    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
574  Thread* self = Thread::Current();
575  const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
576  gc::Heap* heap = Runtime::Current()->GetHeap();
577  // TODO: heap validation can't handle this fix up pass
578  heap->DisableObjectValidation();
579  // TODO: Image spaces only?
580  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
581  heap->VisitObjects(CopyAndFixupObjectsCallback, this);
582  // Fix up the object previously had hash codes.
583  for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
584    hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second), false);
585  }
586  saved_hashes_.clear();
587  self->EndAssertNoThreadSuspension(old_cause);
588}
589
590void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
591  DCHECK(obj != nullptr);
592  DCHECK(arg != nullptr);
593  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
594  // see GetLocalAddress for similar computation
595  size_t offset = image_writer->GetImageOffset(obj);
596  byte* dst = image_writer->image_->Begin() + offset;
597  const byte* src = reinterpret_cast<const byte*>(obj);
598  size_t n = obj->SizeOf();
599  DCHECK_LT(offset + n, image_writer->image_->Size());
600  memcpy(dst, src, n);
601  Object* copy = reinterpret_cast<Object*>(dst);
602  // Write in a hash code of objects which have inflated monitors or a hash code in their monitor
603  // word.
604  copy->SetLockWord(LockWord(), false);
605  image_writer->FixupObject(obj, copy);
606}
607
608class FixupVisitor {
609 public:
610  FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
611  }
612
613  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
614      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
615    Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
616    // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
617    // image.
618    copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
619        offset, image_writer_->GetImageAddress(ref));
620  }
621
622  // java.lang.ref.Reference visitor.
623  void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
624      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
625      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
626    copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
627        mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent()));
628  }
629
630 private:
631  ImageWriter* const image_writer_;
632  mirror::Object* const copy_;
633};
634
635void ImageWriter::FixupObject(Object* orig, Object* copy) {
636  DCHECK(orig != nullptr);
637  DCHECK(copy != nullptr);
638  if (kUseBakerOrBrooksReadBarrier) {
639    orig->AssertReadBarrierPointer();
640    if (kUseBrooksReadBarrier) {
641      // Note the address 'copy' isn't the same as the image address of 'orig'.
642      copy->SetReadBarrierPointer(GetImageAddress(orig));
643      DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig));
644    }
645  }
646  FixupVisitor visitor(this, copy);
647  orig->VisitReferences<true /*visit class*/>(visitor, visitor);
648  if (orig->IsArtMethod<kVerifyNone>()) {
649    FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
650  }
651}
652
653void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
654  // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
655  // oat_begin_
656
657  // The resolution method has a special trampoline to call.
658  if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) {
659    copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
660    copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
661  } else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) {
662    copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
663    copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
664  } else {
665    // We assume all methods have code. If they don't currently then we set them to the use the
666    // resolution trampoline. Abstract methods never have code and so we need to make sure their
667    // use results in an AbstractMethodError. We use the interpreter to achieve this.
668    if (UNLIKELY(orig->IsAbstract())) {
669      copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
670      copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
671      copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
672          (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
673    } else {
674      // Use original code if it exists. Otherwise, set the code pointer to the resolution
675      // trampoline.
676
677      // Quick entrypoint:
678      const byte* quick_code = GetOatAddress(orig->GetQuickOatCodeOffset());
679      bool quick_is_interpreted = false;
680      if (quick_code != nullptr &&
681          (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
682        // We have code for a non-static or initialized method, just use the code.
683      } else if (quick_code == nullptr && orig->IsNative() &&
684          (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) {
685        // Non-static or initialized native method missing compiled code, use generic JNI version.
686        quick_code = GetOatAddress(quick_generic_jni_trampoline_offset_);
687      } else if (quick_code == nullptr && !orig->IsNative()) {
688        // We don't have code at all for a non-native method, use the interpreter.
689        quick_code = GetOatAddress(quick_to_interpreter_bridge_offset_);
690        quick_is_interpreted = true;
691      } else {
692        CHECK(!orig->GetDeclaringClass()->IsInitialized());
693        // We have code for a static method, but need to go through the resolution stub for class
694        // initialization.
695        quick_code = GetOatAddress(quick_resolution_trampoline_offset_);
696      }
697      copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
698
699      // Portable entrypoint:
700      const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
701      bool portable_is_interpreted = false;
702      if (portable_code != nullptr &&
703          (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
704        // We have code for a non-static or initialized method, just use the code.
705      } else if (portable_code == nullptr && orig->IsNative() &&
706          (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) {
707        // Non-static or initialized native method missing compiled code, use generic JNI version.
708        // TODO: generic JNI support for LLVM.
709        portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
710      } else if (portable_code == nullptr && !orig->IsNative()) {
711        // We don't have code at all for a non-native method, use the interpreter.
712        portable_code = GetOatAddress(portable_to_interpreter_bridge_offset_);
713        portable_is_interpreted = true;
714      } else {
715        CHECK(!orig->GetDeclaringClass()->IsInitialized());
716        // We have code for a static method, but need to go through the resolution stub for class
717        // initialization.
718        portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
719      }
720      copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
721
722      // JNI entrypoint:
723      if (orig->IsNative()) {
724        // The native method's pointer is set to a stub to lookup via dlsym.
725        // Note this is not the code_ pointer, that is handled above.
726        copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
727      } else {
728        // Normal (non-abstract non-native) methods have various tables to relocate.
729        uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
730        const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
731        copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
732      }
733
734      // Interpreter entrypoint:
735      // Set the interpreter entrypoint depending on whether there is compiled code or not.
736      uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
737          ? interpreter_to_interpreter_bridge_offset_
738          : interpreter_to_compiled_code_bridge_offset_;
739      copy->SetEntryPointFromInterpreter<kVerifyNone>(
740          reinterpret_cast<EntryPointFromInterpreter*>(
741              const_cast<byte*>(GetOatAddress(interpreter_code))));
742    }
743  }
744}
745
746static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
747    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
748  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
749  StackHandleScope<1> hs(Thread::Current());
750  Handle<mirror::DexCache> dex_cache(
751      hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
752  ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
753                                                  patch->GetTargetMethodIdx(),
754                                                  dex_cache,
755                                                  NullHandle<mirror::ClassLoader>(),
756                                                  NullHandle<mirror::ArtMethod>(),
757                                                  patch->GetTargetInvokeType());
758  CHECK(method != NULL)
759    << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
760  CHECK(!method->IsRuntimeMethod())
761    << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx();
762  CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method)
763    << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
764    << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " "
765    << PrettyMethod(method);
766  return method;
767}
768
769static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
770    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
771  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
772  StackHandleScope<2> hs(Thread::Current());
773  Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
774  Class* klass = class_linker->ResolveType(patch->GetDexFile(), patch->GetTargetTypeIdx(),
775                                           dex_cache, NullHandle<mirror::ClassLoader>());
776  CHECK(klass != NULL)
777    << patch->GetDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
778  CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass)
779    << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
780    << PrettyClass(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx())) << " "
781    << PrettyClass(klass);
782  return klass;
783}
784
785void ImageWriter::PatchOatCodeAndMethods() {
786  Thread* self = Thread::Current();
787  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
788  const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
789
790  typedef std::vector<const CompilerDriver::CallPatchInformation*> CallPatches;
791  const CallPatches& code_to_patch = compiler_driver_.GetCodeToPatch();
792  for (size_t i = 0; i < code_to_patch.size(); i++) {
793    const CompilerDriver::CallPatchInformation* patch = code_to_patch[i];
794    ArtMethod* target = GetTargetMethod(patch);
795    uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target));
796    DCHECK_NE(quick_code, 0U) << PrettyMethod(target);
797    uintptr_t code_base = reinterpret_cast<uintptr_t>(&oat_file_->GetOatHeader());
798    uintptr_t code_offset = quick_code - code_base;
799    bool is_quick_offset = false;
800    if (quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) {
801      is_quick_offset = true;
802      code_offset = quick_to_interpreter_bridge_offset_;
803    } else if (quick_code ==
804        reinterpret_cast<uintptr_t>(class_linker->GetQuickGenericJniTrampoline())) {
805      CHECK(target->IsNative());
806      is_quick_offset = true;
807      code_offset = quick_generic_jni_trampoline_offset_;
808    }
809    uintptr_t value;
810    if (patch->IsRelative()) {
811      // value to patch is relative to the location being patched
812      const void* quick_oat_code =
813        class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
814                                         patch->GetReferrerClassDefIdx(),
815                                         patch->GetReferrerMethodIdx());
816      if (is_quick_offset) {
817        // If its a quick offset it means that we are doing a relative patch from the class linker
818        // oat_file to the image writer oat_file so we need to adjust the quick oat code to be the
819        // one in the image writer oat_file.
820        quick_code = PointerToLowMemUInt32(GetOatAddress(code_offset));
821        quick_oat_code =
822            reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(quick_oat_code) +
823                reinterpret_cast<uintptr_t>(oat_data_begin_) - code_base);
824      }
825      uintptr_t base = reinterpret_cast<uintptr_t>(quick_oat_code);
826      uintptr_t patch_location = base + patch->GetLiteralOffset();
827      value = quick_code - patch_location + patch->RelativeOffset();
828    } else {
829      value = PointerToLowMemUInt32(GetOatAddress(code_offset));
830    }
831    SetPatchLocation(patch, value);
832  }
833
834  const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
835  for (size_t i = 0; i < methods_to_patch.size(); i++) {
836    const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i];
837    ArtMethod* target = GetTargetMethod(patch);
838    SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)));
839  }
840
841  const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch =
842      compiler_driver_.GetClassesToPatch();
843  for (size_t i = 0; i < classes_to_patch.size(); i++) {
844    const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i];
845    Class* target = GetTargetType(patch);
846    SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)));
847  }
848
849  // Update the image header with the new checksum after patching
850  ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
851  image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum());
852  self->EndAssertNoThreadSuspension(old_cause);
853}
854
855void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value) {
856  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
857  const void* quick_oat_code = class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
858                                                                patch->GetReferrerClassDefIdx(),
859                                                                patch->GetReferrerMethodIdx());
860  OatHeader& oat_header = const_cast<OatHeader&>(oat_file_->GetOatHeader());
861  // TODO: make this Thumb2 specific
862  uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(quick_oat_code) & ~0x1);
863  uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset());
864  if (kIsDebugBuild) {
865    if (patch->IsCall()) {
866      const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall();
867      const DexFile::MethodId& id = cpatch->GetTargetDexFile()->GetMethodId(cpatch->GetTargetMethodIdx());
868      uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
869      uint32_t actual = *patch_location;
870      CHECK(actual == expected || actual == value) << std::hex
871          << "actual=" << actual
872          << "expected=" << expected
873          << "value=" << value;
874    }
875    if (patch->IsType()) {
876      const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
877      const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
878      uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
879      uint32_t actual = *patch_location;
880      CHECK(actual == expected || actual == value) << std::hex
881          << "actual=" << actual
882          << "expected=" << expected
883          << "value=" << value;
884    }
885  }
886  *patch_location = value;
887  oat_header.UpdateChecksum(patch_location, sizeof(value));
888}
889
890}  // namespace art
891