image_writer.cc revision 22d5e735f403c57525fe868304c7123f0ce66399
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "image_writer.h" 18 19#include <sys/stat.h> 20 21#include <memory> 22#include <vector> 23 24#include "base/logging.h" 25#include "base/unix_file/fd_file.h" 26#include "class_linker.h" 27#include "compiled_method.h" 28#include "dex_file-inl.h" 29#include "driver/compiler_driver.h" 30#include "elf_file.h" 31#include "elf_utils.h" 32#include "elf_writer.h" 33#include "gc/accounting/card_table-inl.h" 34#include "gc/accounting/heap_bitmap.h" 35#include "gc/accounting/space_bitmap-inl.h" 36#include "gc/heap.h" 37#include "gc/space/large_object_space.h" 38#include "gc/space/space-inl.h" 39#include "globals.h" 40#include "image.h" 41#include "intern_table.h" 42#include "lock_word.h" 43#include "mirror/art_field-inl.h" 44#include "mirror/art_method-inl.h" 45#include "mirror/array-inl.h" 46#include "mirror/class-inl.h" 47#include "mirror/class_loader.h" 48#include "mirror/dex_cache-inl.h" 49#include "mirror/object-inl.h" 50#include "mirror/object_array-inl.h" 51#include "mirror/string-inl.h" 52#include "oat.h" 53#include "oat_file.h" 54#include "runtime.h" 55#include "scoped_thread_state_change.h" 56#include "handle_scope-inl.h" 57#include "utils.h" 58 59using ::art::mirror::ArtField; 60using ::art::mirror::ArtMethod; 61using ::art::mirror::Class; 62using ::art::mirror::DexCache; 63using ::art::mirror::EntryPointFromInterpreter; 64using ::art::mirror::Object; 65using ::art::mirror::ObjectArray; 66using ::art::mirror::String; 67 68namespace art { 69 70bool ImageWriter::Write(const std::string& image_filename, 71 uintptr_t image_begin, 72 const std::string& oat_filename, 73 const std::string& oat_location) { 74 CHECK(!image_filename.empty()); 75 76 CHECK_NE(image_begin, 0U); 77 image_begin_ = reinterpret_cast<byte*>(image_begin); 78 79 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 80 81 std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str())); 82 if (oat_file.get() == NULL) { 83 LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location; 84 return false; 85 } 86 std::string error_msg; 87 oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location, &error_msg); 88 if (oat_file_ == nullptr) { 89 LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location 90 << ": " << error_msg; 91 return false; 92 } 93 CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_); 94 95 interpreter_to_interpreter_bridge_offset_ = 96 oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset(); 97 interpreter_to_compiled_code_bridge_offset_ = 98 oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset(); 99 100 jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset(); 101 102 portable_imt_conflict_trampoline_offset_ = 103 oat_file_->GetOatHeader().GetPortableImtConflictTrampolineOffset(); 104 portable_resolution_trampoline_offset_ = 105 oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset(); 106 portable_to_interpreter_bridge_offset_ = 107 oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset(); 108 109 quick_generic_jni_trampoline_offset_ = 110 oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset(); 111 quick_imt_conflict_trampoline_offset_ = 112 oat_file_->GetOatHeader().GetQuickImtConflictTrampolineOffset(); 113 quick_resolution_trampoline_offset_ = 114 oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset(); 115 quick_to_interpreter_bridge_offset_ = 116 oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset(); 117 { 118 Thread::Current()->TransitionFromSuspendedToRunnable(); 119 PruneNonImageClasses(); // Remove junk 120 ComputeLazyFieldsForImageClasses(); // Add useful information 121 ComputeEagerResolvedStrings(); 122 Thread::Current()->TransitionFromRunnableToSuspended(kNative); 123 } 124 gc::Heap* heap = Runtime::Current()->GetHeap(); 125 heap->CollectGarbage(false); // Remove garbage. 126 127 if (!AllocMemory()) { 128 return false; 129 } 130 131 if (kIsDebugBuild) { 132 ScopedObjectAccess soa(Thread::Current()); 133 CheckNonImageClassesRemoved(); 134 } 135 136 Thread::Current()->TransitionFromSuspendedToRunnable(); 137 size_t oat_loaded_size = 0; 138 size_t oat_data_offset = 0; 139 ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset); 140 CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset); 141 CopyAndFixupObjects(); 142 143 PatchOatCodeAndMethods(oat_file.get()); 144 Thread::Current()->TransitionFromRunnableToSuspended(kNative); 145 146 std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str())); 147 ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); 148 if (image_file.get() == NULL) { 149 LOG(ERROR) << "Failed to open image file " << image_filename; 150 return false; 151 } 152 if (fchmod(image_file->Fd(), 0644) != 0) { 153 PLOG(ERROR) << "Failed to make image file world readable: " << image_filename; 154 return EXIT_FAILURE; 155 } 156 157 // Write out the image. 158 CHECK_EQ(image_end_, image_header->GetImageSize()); 159 if (!image_file->WriteFully(image_->Begin(), image_end_)) { 160 PLOG(ERROR) << "Failed to write image file " << image_filename; 161 return false; 162 } 163 164 // Write out the image bitmap at the page aligned start of the image end. 165 CHECK_ALIGNED(image_header->GetImageBitmapOffset(), kPageSize); 166 if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()), 167 image_header->GetImageBitmapSize(), 168 image_header->GetImageBitmapOffset())) { 169 PLOG(ERROR) << "Failed to write image file " << image_filename; 170 return false; 171 } 172 173 return true; 174} 175 176void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) { 177 DCHECK(object != nullptr); 178 DCHECK_NE(offset, 0U); 179 DCHECK(!IsImageOffsetAssigned(object)); 180 mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset); 181 DCHECK_ALIGNED(obj, kObjectAlignment); 182 image_bitmap_->Set(obj); 183 // Before we stomp over the lock word, save the hash code for later. 184 Monitor::Deflate(Thread::Current(), object);; 185 LockWord lw(object->GetLockWord(false)); 186 switch (lw.GetState()) { 187 case LockWord::kFatLocked: { 188 LOG(FATAL) << "Fat locked object " << obj << " found during object copy"; 189 break; 190 } 191 case LockWord::kThinLocked: { 192 LOG(FATAL) << "Thin locked object " << obj << " found during object copy"; 193 break; 194 } 195 case LockWord::kUnlocked: 196 // No hash, don't need to save it. 197 break; 198 case LockWord::kHashCode: 199 saved_hashes_.push_back(std::make_pair(obj, lw.GetHashCode())); 200 break; 201 default: 202 LOG(FATAL) << "Unreachable."; 203 break; 204 } 205 object->SetLockWord(LockWord::FromForwardingAddress(offset), false); 206 DCHECK(IsImageOffsetAssigned(object)); 207} 208 209void ImageWriter::AssignImageOffset(mirror::Object* object) { 210 DCHECK(object != nullptr); 211 SetImageOffset(object, image_end_); 212 image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment 213 DCHECK_LT(image_end_, image_->Size()); 214} 215 216bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const { 217 DCHECK(object != nullptr); 218 return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress; 219} 220 221size_t ImageWriter::GetImageOffset(mirror::Object* object) const { 222 DCHECK(object != nullptr); 223 DCHECK(IsImageOffsetAssigned(object)); 224 LockWord lock_word = object->GetLockWord(false); 225 size_t offset = lock_word.ForwardingAddress(); 226 DCHECK_LT(offset, image_end_); 227 return offset; 228} 229 230bool ImageWriter::AllocMemory() { 231 size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize); 232 std::string error_msg; 233 image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE, 234 true, &error_msg)); 235 if (UNLIKELY(image_.get() == nullptr)) { 236 LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg; 237 return false; 238 } 239 240 // Create the image bitmap. 241 image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(), 242 length)); 243 if (image_bitmap_.get() == nullptr) { 244 LOG(ERROR) << "Failed to allocate memory for image bitmap"; 245 return false; 246 } 247 return true; 248} 249 250void ImageWriter::ComputeLazyFieldsForImageClasses() { 251 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 252 class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL); 253} 254 255bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) { 256 Thread* self = Thread::Current(); 257 StackHandleScope<1> hs(self); 258 mirror::Class::ComputeName(hs.NewHandle(c)); 259 return true; 260} 261 262void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) { 263 if (!obj->GetClass()->IsStringClass()) { 264 return; 265 } 266 mirror::String* string = obj->AsString(); 267 const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset(); 268 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 269 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 270 size_t dex_cache_count = class_linker->GetDexCacheCount(); 271 for (size_t i = 0; i < dex_cache_count; ++i) { 272 DexCache* dex_cache = class_linker->GetDexCache(i); 273 const DexFile& dex_file = *dex_cache->GetDexFile(); 274 const DexFile::StringId* string_id; 275 if (UNLIKELY(string->GetLength() == 0)) { 276 string_id = dex_file.FindStringId(""); 277 } else { 278 string_id = dex_file.FindStringId(utf16_string); 279 } 280 if (string_id != nullptr) { 281 // This string occurs in this dex file, assign the dex cache entry. 282 uint32_t string_idx = dex_file.GetIndexForStringId(*string_id); 283 if (dex_cache->GetResolvedString(string_idx) == NULL) { 284 dex_cache->SetResolvedString(string_idx, string); 285 } 286 } 287 } 288} 289 290void ImageWriter::ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 291 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 292 Runtime::Current()->GetHeap()->VisitObjects(ComputeEagerResolvedStringsCallback, this); 293} 294 295bool ImageWriter::IsImageClass(Class* klass) { 296 return compiler_driver_.IsImageClass(klass->GetDescriptor().c_str()); 297} 298 299struct NonImageClasses { 300 ImageWriter* image_writer; 301 std::set<std::string>* non_image_classes; 302}; 303 304void ImageWriter::PruneNonImageClasses() { 305 if (compiler_driver_.GetImageClasses() == NULL) { 306 return; 307 } 308 Runtime* runtime = Runtime::Current(); 309 ClassLinker* class_linker = runtime->GetClassLinker(); 310 311 // Make a list of classes we would like to prune. 312 std::set<std::string> non_image_classes; 313 NonImageClasses context; 314 context.image_writer = this; 315 context.non_image_classes = &non_image_classes; 316 class_linker->VisitClasses(NonImageClassesVisitor, &context); 317 318 // Remove the undesired classes from the class roots. 319 for (const std::string& it : non_image_classes) { 320 class_linker->RemoveClass(it.c_str(), NULL); 321 } 322 323 // Clear references to removed classes from the DexCaches. 324 ArtMethod* resolution_method = runtime->GetResolutionMethod(); 325 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 326 size_t dex_cache_count = class_linker->GetDexCacheCount(); 327 for (size_t idx = 0; idx < dex_cache_count; ++idx) { 328 DexCache* dex_cache = class_linker->GetDexCache(idx); 329 for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { 330 Class* klass = dex_cache->GetResolvedType(i); 331 if (klass != NULL && !IsImageClass(klass)) { 332 dex_cache->SetResolvedType(i, NULL); 333 } 334 } 335 for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { 336 ArtMethod* method = dex_cache->GetResolvedMethod(i); 337 if (method != NULL && !IsImageClass(method->GetDeclaringClass())) { 338 dex_cache->SetResolvedMethod(i, resolution_method); 339 } 340 } 341 for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { 342 ArtField* field = dex_cache->GetResolvedField(i); 343 if (field != NULL && !IsImageClass(field->GetDeclaringClass())) { 344 dex_cache->SetResolvedField(i, NULL); 345 } 346 } 347 } 348} 349 350bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) { 351 NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg); 352 if (!context->image_writer->IsImageClass(klass)) { 353 context->non_image_classes->insert(klass->GetDescriptor()); 354 } 355 return true; 356} 357 358void ImageWriter::CheckNonImageClassesRemoved() 359 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 360 if (compiler_driver_.GetImageClasses() != nullptr) { 361 gc::Heap* heap = Runtime::Current()->GetHeap(); 362 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); 363 heap->VisitObjects(CheckNonImageClassesRemovedCallback, this); 364 } 365} 366 367void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) { 368 ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg); 369 if (obj->IsClass()) { 370 Class* klass = obj->AsClass(); 371 if (!image_writer->IsImageClass(klass)) { 372 image_writer->DumpImageClasses(); 373 CHECK(image_writer->IsImageClass(klass)) << klass->GetDescriptor() 374 << " " << PrettyDescriptor(klass); 375 } 376 } 377} 378 379void ImageWriter::DumpImageClasses() { 380 CompilerDriver::DescriptorSet* image_classes = compiler_driver_.GetImageClasses(); 381 CHECK(image_classes != NULL); 382 for (const std::string& image_class : *image_classes) { 383 LOG(INFO) << " " << image_class; 384 } 385} 386 387void ImageWriter::CalculateObjectOffsets(Object* obj) { 388 DCHECK(obj != NULL); 389 // if it is a string, we want to intern it if its not interned. 390 if (obj->GetClass()->IsStringClass()) { 391 // we must be an interned string that was forward referenced and already assigned 392 if (IsImageOffsetAssigned(obj)) { 393 DCHECK_EQ(obj, obj->AsString()->Intern()); 394 return; 395 } 396 mirror::String* const interned = obj->AsString()->Intern(); 397 if (obj != interned) { 398 if (!IsImageOffsetAssigned(interned)) { 399 // interned obj is after us, allocate its location early 400 AssignImageOffset(interned); 401 } 402 // point those looking for this object to the interned version. 403 SetImageOffset(obj, GetImageOffset(interned)); 404 return; 405 } 406 // else (obj == interned), nothing to do but fall through to the normal case 407 } 408 409 AssignImageOffset(obj); 410} 411 412ObjectArray<Object>* ImageWriter::CreateImageRoots() const { 413 Runtime* runtime = Runtime::Current(); 414 ClassLinker* class_linker = runtime->GetClassLinker(); 415 Thread* self = Thread::Current(); 416 StackHandleScope<3> hs(self); 417 Handle<Class> object_array_class(hs.NewHandle( 418 class_linker->FindSystemClass(self, "[Ljava/lang/Object;"))); 419 420 // build an Object[] of all the DexCaches used in the source_space_. 421 // Since we can't hold the dex lock when allocating the dex_caches 422 // ObjectArray, we lock the dex lock twice, first to get the number 423 // of dex caches first and then lock it again to copy the dex 424 // caches. We check that the number of dex caches does not change. 425 size_t dex_cache_count; 426 { 427 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 428 dex_cache_count = class_linker->GetDexCacheCount(); 429 } 430 Handle<ObjectArray<Object>> dex_caches( 431 hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(), 432 dex_cache_count))); 433 CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array."; 434 { 435 ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock()); 436 CHECK_EQ(dex_cache_count, class_linker->GetDexCacheCount()) 437 << "The number of dex caches changed."; 438 for (size_t i = 0; i < dex_cache_count; ++i) { 439 dex_caches->Set<false>(i, class_linker->GetDexCache(i)); 440 } 441 } 442 443 // build an Object[] of the roots needed to restore the runtime 444 Handle<ObjectArray<Object>> image_roots(hs.NewHandle( 445 ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax))); 446 image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod()); 447 image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod()); 448 image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt()); 449 image_roots->Set<false>(ImageHeader::kCalleeSaveMethod, 450 runtime->GetCalleeSaveMethod(Runtime::kSaveAll)); 451 image_roots->Set<false>(ImageHeader::kRefsOnlySaveMethod, 452 runtime->GetCalleeSaveMethod(Runtime::kRefsOnly)); 453 image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod, 454 runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs)); 455 image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get()); 456 image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots()); 457 for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { 458 CHECK(image_roots->Get(i) != NULL); 459 } 460 return image_roots.Get(); 461} 462 463// Walk instance fields of the given Class. Separate function to allow recursion on the super 464// class. 465void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) { 466 // Visit fields of parent classes first. 467 StackHandleScope<1> hs(Thread::Current()); 468 Handle<mirror::Class> h_class(hs.NewHandle(klass)); 469 mirror::Class* super = h_class->GetSuperClass(); 470 if (super != nullptr) { 471 WalkInstanceFields(obj, super); 472 } 473 // 474 size_t num_reference_fields = h_class->NumReferenceInstanceFields(); 475 for (size_t i = 0; i < num_reference_fields; ++i) { 476 mirror::ArtField* field = h_class->GetInstanceField(i); 477 MemberOffset field_offset = field->GetOffset(); 478 mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset); 479 if (value != nullptr) { 480 WalkFieldsInOrder(value); 481 } 482 } 483} 484 485// For an unvisited object, visit it then all its children found via fields. 486void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { 487 if (!IsImageOffsetAssigned(obj)) { 488 // Walk instance fields of all objects 489 StackHandleScope<2> hs(Thread::Current()); 490 Handle<mirror::Object> h_obj(hs.NewHandle(obj)); 491 Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass())); 492 // visit the object itself. 493 CalculateObjectOffsets(h_obj.Get()); 494 WalkInstanceFields(h_obj.Get(), klass.Get()); 495 // Walk static fields of a Class. 496 if (h_obj->IsClass()) { 497 size_t num_static_fields = klass->NumReferenceStaticFields(); 498 for (size_t i = 0; i < num_static_fields; ++i) { 499 mirror::ArtField* field = klass->GetStaticField(i); 500 MemberOffset field_offset = field->GetOffset(); 501 mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset); 502 if (value != nullptr) { 503 WalkFieldsInOrder(value); 504 } 505 } 506 } else if (h_obj->IsObjectArray()) { 507 // Walk elements of an object array. 508 int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength(); 509 for (int32_t i = 0; i < length; i++) { 510 mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>(); 511 mirror::Object* value = obj_array->Get(i); 512 if (value != nullptr) { 513 WalkFieldsInOrder(value); 514 } 515 } 516 } 517 } 518} 519 520void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) { 521 ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg); 522 DCHECK(writer != nullptr); 523 writer->WalkFieldsInOrder(obj); 524} 525 526void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) { 527 CHECK_NE(0U, oat_loaded_size); 528 Thread* self = Thread::Current(); 529 StackHandleScope<1> hs(self); 530 Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots())); 531 532 gc::Heap* heap = Runtime::Current()->GetHeap(); 533 DCHECK_EQ(0U, image_end_); 534 535 // Leave space for the header, but do not write it yet, we need to 536 // know where image_roots is going to end up 537 image_end_ += RoundUp(sizeof(ImageHeader), 8); // 64-bit-alignment 538 539 { 540 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 541 // TODO: Image spaces only? 542 const char* old = self->StartAssertNoThreadSuspension("ImageWriter"); 543 DCHECK_LT(image_end_, image_->Size()); 544 // Clear any pre-existing monitors which may have been in the monitor words. 545 heap->VisitObjects(WalkFieldsCallback, this); 546 self->EndAssertNoThreadSuspension(old); 547 } 548 549 const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize); 550 const byte* oat_file_end = oat_file_begin + oat_loaded_size; 551 oat_data_begin_ = oat_file_begin + oat_data_offset; 552 const byte* oat_data_end = oat_data_begin_ + oat_file_->Size(); 553 554 // Return to write header at start of image with future location of image_roots. At this point, 555 // image_end_ is the size of the image (excluding bitmaps). 556 const size_t heap_bytes_per_bitmap_byte = kBitsPerByte * kObjectAlignment; 557 const size_t bitmap_bytes = RoundUp(image_end_, heap_bytes_per_bitmap_byte) / 558 heap_bytes_per_bitmap_byte; 559 ImageHeader image_header(PointerToLowMemUInt32(image_begin_), 560 static_cast<uint32_t>(image_end_), 561 RoundUp(image_end_, kPageSize), 562 RoundUp(bitmap_bytes, kPageSize), 563 PointerToLowMemUInt32(GetImageAddress(image_roots.Get())), 564 oat_file_->GetOatHeader().GetChecksum(), 565 PointerToLowMemUInt32(oat_file_begin), 566 PointerToLowMemUInt32(oat_data_begin_), 567 PointerToLowMemUInt32(oat_data_end), 568 PointerToLowMemUInt32(oat_file_end)); 569 memcpy(image_->Begin(), &image_header, sizeof(image_header)); 570 571 // Note that image_end_ is left at end of used space 572} 573 574void ImageWriter::CopyAndFixupObjects() 575 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 576 Thread* self = Thread::Current(); 577 const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter"); 578 gc::Heap* heap = Runtime::Current()->GetHeap(); 579 // TODO: heap validation can't handle this fix up pass 580 heap->DisableObjectValidation(); 581 // TODO: Image spaces only? 582 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); 583 heap->VisitObjects(CopyAndFixupObjectsCallback, this); 584 // Fix up the object previously had hash codes. 585 for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) { 586 hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second), false); 587 } 588 saved_hashes_.clear(); 589 self->EndAssertNoThreadSuspension(old_cause); 590} 591 592void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) { 593 DCHECK(obj != nullptr); 594 DCHECK(arg != nullptr); 595 ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg); 596 // see GetLocalAddress for similar computation 597 size_t offset = image_writer->GetImageOffset(obj); 598 byte* dst = image_writer->image_->Begin() + offset; 599 const byte* src = reinterpret_cast<const byte*>(obj); 600 size_t n = obj->SizeOf(); 601 DCHECK_LT(offset + n, image_writer->image_->Size()); 602 memcpy(dst, src, n); 603 Object* copy = reinterpret_cast<Object*>(dst); 604 // Write in a hash code of objects which have inflated monitors or a hash code in their monitor 605 // word. 606 copy->SetLockWord(LockWord(), false); 607 image_writer->FixupObject(obj, copy); 608} 609 610class FixupVisitor { 611 public: 612 FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) { 613 } 614 615 void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const 616 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 617 Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset); 618 // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the 619 // image. 620 copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( 621 offset, image_writer_->GetImageAddress(ref)); 622 } 623 624 // java.lang.ref.Reference visitor. 625 void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const 626 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 627 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 628 copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( 629 mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent())); 630 } 631 632 protected: 633 ImageWriter* const image_writer_; 634 mirror::Object* const copy_; 635}; 636 637class FixupClassVisitor FINAL : public FixupVisitor { 638 public: 639 FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) { 640 } 641 642 void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const 643 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 644 DCHECK(obj->IsClass()); 645 FixupVisitor::operator()(obj, offset, false); 646 647 if (offset.Uint32Value() < mirror::Class::EmbeddedVTableOffset().Uint32Value()) { 648 return; 649 } 650 } 651 652 void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const 653 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 654 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { 655 LOG(FATAL) << "Reference not expected here."; 656 } 657}; 658 659void ImageWriter::FixupObject(Object* orig, Object* copy) { 660 DCHECK(orig != nullptr); 661 DCHECK(copy != nullptr); 662 if (kUseBakerOrBrooksReadBarrier) { 663 orig->AssertReadBarrierPointer(); 664 if (kUseBrooksReadBarrier) { 665 // Note the address 'copy' isn't the same as the image address of 'orig'. 666 copy->SetReadBarrierPointer(GetImageAddress(orig)); 667 DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig)); 668 } 669 } 670 if (orig->IsClass() && orig->AsClass()->ShouldHaveEmbeddedImtAndVTable()) { 671 FixupClassVisitor visitor(this, copy); 672 orig->VisitReferences<true /*visit class*/>(visitor, visitor); 673 } else { 674 FixupVisitor visitor(this, copy); 675 orig->VisitReferences<true /*visit class*/>(visitor, visitor); 676 } 677 if (orig->IsArtMethod<kVerifyNone>()) { 678 FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy)); 679 } 680} 681 682const byte* ImageWriter::GetQuickCode(mirror::ArtMethod* method, bool* quick_is_interpreted) { 683 DCHECK(!method->IsResolutionMethod() && !method->IsImtConflictMethod() && 684 !method->IsAbstract()) << PrettyMethod(method); 685 686 // Use original code if it exists. Otherwise, set the code pointer to the resolution 687 // trampoline. 688 689 // Quick entrypoint: 690 const byte* quick_code = GetOatAddress(method->GetQuickOatCodeOffset()); 691 *quick_is_interpreted = false; 692 if (quick_code != nullptr && 693 (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) { 694 // We have code for a non-static or initialized method, just use the code. 695 } else if (quick_code == nullptr && method->IsNative() && 696 (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) { 697 // Non-static or initialized native method missing compiled code, use generic JNI version. 698 quick_code = GetOatAddress(quick_generic_jni_trampoline_offset_); 699 } else if (quick_code == nullptr && !method->IsNative()) { 700 // We don't have code at all for a non-native method, use the interpreter. 701 quick_code = GetOatAddress(quick_to_interpreter_bridge_offset_); 702 *quick_is_interpreted = true; 703 } else { 704 CHECK(!method->GetDeclaringClass()->IsInitialized()); 705 // We have code for a static method, but need to go through the resolution stub for class 706 // initialization. 707 quick_code = GetOatAddress(quick_resolution_trampoline_offset_); 708 } 709 return quick_code; 710} 711 712const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) { 713 // Calculate the quick entry point following the same logic as FixupMethod() below. 714 // The resolution method has a special trampoline to call. 715 if (UNLIKELY(method == Runtime::Current()->GetResolutionMethod())) { 716 return GetOatAddress(quick_resolution_trampoline_offset_); 717 } else if (UNLIKELY(method == Runtime::Current()->GetImtConflictMethod())) { 718 return GetOatAddress(quick_imt_conflict_trampoline_offset_); 719 } else { 720 // We assume all methods have code. If they don't currently then we set them to the use the 721 // resolution trampoline. Abstract methods never have code and so we need to make sure their 722 // use results in an AbstractMethodError. We use the interpreter to achieve this. 723 if (UNLIKELY(method->IsAbstract())) { 724 return GetOatAddress(quick_to_interpreter_bridge_offset_); 725 } else { 726 bool quick_is_interpreted; 727 return GetQuickCode(method, &quick_is_interpreted); 728 } 729 } 730} 731 732void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { 733 // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to 734 // oat_begin_ 735 736 // The resolution method has a special trampoline to call. 737 if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) { 738 copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_)); 739 copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_)); 740 } else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) { 741 copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_)); 742 copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_)); 743 } else { 744 // We assume all methods have code. If they don't currently then we set them to the use the 745 // resolution trampoline. Abstract methods never have code and so we need to make sure their 746 // use results in an AbstractMethodError. We use the interpreter to achieve this. 747 if (UNLIKELY(orig->IsAbstract())) { 748 copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_)); 749 copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_)); 750 copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*> 751 (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_)))); 752 } else { 753 bool quick_is_interpreted; 754 const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted); 755 copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code); 756 757 // Portable entrypoint: 758 const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset()); 759 bool portable_is_interpreted = false; 760 if (portable_code != nullptr && 761 (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) { 762 // We have code for a non-static or initialized method, just use the code. 763 } else if (portable_code == nullptr && orig->IsNative() && 764 (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) { 765 // Non-static or initialized native method missing compiled code, use generic JNI version. 766 // TODO: generic JNI support for LLVM. 767 portable_code = GetOatAddress(portable_resolution_trampoline_offset_); 768 } else if (portable_code == nullptr && !orig->IsNative()) { 769 // We don't have code at all for a non-native method, use the interpreter. 770 portable_code = GetOatAddress(portable_to_interpreter_bridge_offset_); 771 portable_is_interpreted = true; 772 } else { 773 CHECK(!orig->GetDeclaringClass()->IsInitialized()); 774 // We have code for a static method, but need to go through the resolution stub for class 775 // initialization. 776 portable_code = GetOatAddress(portable_resolution_trampoline_offset_); 777 } 778 copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code); 779 780 // JNI entrypoint: 781 if (orig->IsNative()) { 782 // The native method's pointer is set to a stub to lookup via dlsym. 783 // Note this is not the code_ pointer, that is handled above. 784 copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_)); 785 } else { 786 // Normal (non-abstract non-native) methods have various tables to relocate. 787 uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset(); 788 const byte* native_gc_map = GetOatAddress(native_gc_map_offset); 789 copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map)); 790 } 791 792 // Interpreter entrypoint: 793 // Set the interpreter entrypoint depending on whether there is compiled code or not. 794 uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted) 795 ? interpreter_to_interpreter_bridge_offset_ 796 : interpreter_to_compiled_code_bridge_offset_; 797 copy->SetEntryPointFromInterpreter<kVerifyNone>( 798 reinterpret_cast<EntryPointFromInterpreter*>( 799 const_cast<byte*>(GetOatAddress(interpreter_code)))); 800 } 801 } 802} 803 804static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch) 805 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 806 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 807 StackHandleScope<1> hs(Thread::Current()); 808 Handle<mirror::DexCache> dex_cache( 809 hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile()))); 810 ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(), 811 patch->GetTargetMethodIdx(), 812 dex_cache, 813 NullHandle<mirror::ClassLoader>(), 814 NullHandle<mirror::ArtMethod>(), 815 patch->GetTargetInvokeType()); 816 CHECK(method != NULL) 817 << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx(); 818 CHECK(!method->IsRuntimeMethod()) 819 << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetTargetMethodIdx(); 820 CHECK(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx()) == method) 821 << patch->GetTargetDexFile()->GetLocation() << " " << patch->GetReferrerMethodIdx() << " " 822 << PrettyMethod(dex_cache->GetResolvedMethods()->Get(patch->GetTargetMethodIdx())) << " " 823 << PrettyMethod(method); 824 return method; 825} 826 827static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch) 828 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 829 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 830 StackHandleScope<2> hs(Thread::Current()); 831 Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile()))); 832 Class* klass = class_linker->ResolveType(patch->GetDexFile(), patch->GetTargetTypeIdx(), 833 dex_cache, NullHandle<mirror::ClassLoader>()); 834 CHECK(klass != NULL) 835 << patch->GetDexFile().GetLocation() << " " << patch->GetTargetTypeIdx(); 836 CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass) 837 << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " " 838 << PrettyClass(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx())) << " " 839 << PrettyClass(klass); 840 return klass; 841} 842 843void ImageWriter::PatchOatCodeAndMethods(File* elf_file) { 844 std::vector<uintptr_t> patches; 845 std::set<uintptr_t> patches_set; 846 auto maybe_push = [&patches, &patches_set] (uintptr_t p) { 847 if (patches_set.find(p) == patches_set.end()) { 848 patches.push_back(p); 849 patches_set.insert(p); 850 } 851 }; 852 const bool add_patches = compiler_driver_.GetCompilerOptions().GetIncludePatchInformation(); 853 if (add_patches) { 854 // TODO if we are adding patches the resulting ELF file might have a potentially rather large 855 // amount of free space where patches might have been placed. We should adjust the ELF file to 856 // get rid of this excess space. 857 patches.reserve(compiler_driver_.GetCodeToPatch().size() + 858 compiler_driver_.GetMethodsToPatch().size() + 859 compiler_driver_.GetClassesToPatch().size()); 860 } 861 uintptr_t loc = 0; 862 Thread* self = Thread::Current(); 863 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 864 const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter"); 865 866 typedef std::vector<const CompilerDriver::CallPatchInformation*> CallPatches; 867 const CallPatches& code_to_patch = compiler_driver_.GetCodeToPatch(); 868 for (size_t i = 0; i < code_to_patch.size(); i++) { 869 const CompilerDriver::CallPatchInformation* patch = code_to_patch[i]; 870 ArtMethod* target = GetTargetMethod(patch); 871 uintptr_t quick_code = reinterpret_cast<uintptr_t>(class_linker->GetQuickOatCodeFor(target)); 872 DCHECK_NE(quick_code, 0U) << PrettyMethod(target); 873 uintptr_t code_base = reinterpret_cast<uintptr_t>(&oat_file_->GetOatHeader()); 874 uintptr_t code_offset = quick_code - code_base; 875 bool is_quick_offset = false; 876 if (quick_code == reinterpret_cast<uintptr_t>(GetQuickToInterpreterBridge())) { 877 is_quick_offset = true; 878 code_offset = quick_to_interpreter_bridge_offset_; 879 } else if (quick_code == 880 reinterpret_cast<uintptr_t>(class_linker->GetQuickGenericJniTrampoline())) { 881 CHECK(target->IsNative()); 882 is_quick_offset = true; 883 code_offset = quick_generic_jni_trampoline_offset_; 884 } 885 uintptr_t value; 886 if (patch->IsRelative()) { 887 // value to patch is relative to the location being patched 888 const void* quick_oat_code = 889 class_linker->GetQuickOatCodeFor(patch->GetDexFile(), 890 patch->GetReferrerClassDefIdx(), 891 patch->GetReferrerMethodIdx()); 892 if (is_quick_offset) { 893 // If its a quick offset it means that we are doing a relative patch from the class linker 894 // oat_file to the image writer oat_file so we need to adjust the quick oat code to be the 895 // one in the image writer oat_file. 896 quick_code = PointerToLowMemUInt32(GetOatAddress(code_offset)); 897 quick_oat_code = 898 reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(quick_oat_code) + 899 reinterpret_cast<uintptr_t>(oat_data_begin_) - code_base); 900 } 901 uintptr_t base = reinterpret_cast<uintptr_t>(quick_oat_code); 902 uintptr_t patch_location = base + patch->GetLiteralOffset(); 903 value = quick_code - patch_location + patch->RelativeOffset(); 904 } else { 905 value = PointerToLowMemUInt32(GetOatAddress(code_offset)); 906 } 907 SetPatchLocation(patch, value, &loc); 908 if (add_patches && !patch->AsCall()->IsRelative()) { 909 maybe_push(loc); 910 } 911 } 912 913 const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch(); 914 for (size_t i = 0; i < methods_to_patch.size(); i++) { 915 const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i]; 916 ArtMethod* target = GetTargetMethod(patch); 917 SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)), &loc); 918 if (add_patches && !patch->AsCall()->IsRelative()) { 919 maybe_push(loc); 920 } 921 } 922 923 const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch = 924 compiler_driver_.GetClassesToPatch(); 925 for (size_t i = 0; i < classes_to_patch.size(); i++) { 926 const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i]; 927 Class* target = GetTargetType(patch); 928 SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)), &loc); 929 if (add_patches) { 930 maybe_push(loc); 931 } 932 } 933 934 // Update the image header with the new checksum after patching 935 ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin()); 936 image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum()); 937 self->EndAssertNoThreadSuspension(old_cause); 938 939 // Update the ElfFiles SHT_OAT_PATCH section to include the patches. 940 if (add_patches) { 941 std::string err; 942 // TODO we are mapping in the contents of this file twice. We should be able 943 // to do it only once, which would be better. 944 std::unique_ptr<ElfFile> file(ElfFile::Open(elf_file, true, false, &err)); 945 if (file == nullptr) { 946 LOG(ERROR) << err; 947 } 948 Elf32_Shdr* shdr = file->FindSectionByName(".oat_patches"); 949 if (shdr != nullptr) { 950 CHECK_EQ(shdr, file->FindSectionByType(SHT_OAT_PATCH)) 951 << "Incorrect type for .oat_patches section"; 952 CHECK_LE(patches.size() * sizeof(uintptr_t), shdr->sh_size) 953 << "We got more patches than anticipated"; 954 CHECK_LE(reinterpret_cast<uintptr_t>(file->Begin()) + shdr->sh_offset + shdr->sh_size, 955 reinterpret_cast<uintptr_t>(file->End())) << "section is too large"; 956 CHECK(shdr == &file->GetSectionHeader(file->GetSectionHeaderNum() - 1) || 957 shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset) 958 << "Section overlaps onto next section"; 959 // It's mmap'd so we can just memcpy. 960 memcpy(file->Begin() + shdr->sh_offset, patches.data(), patches.size()*sizeof(uintptr_t)); 961 // TODO We should fill in the newly empty space between the last patch and the start of the 962 // next section by moving the following sections down if possible. 963 shdr->sh_size = patches.size() * sizeof(uintptr_t); 964 } else { 965 LOG(ERROR) << "Unable to find section header for SHT_OAT_PATCH"; 966 } 967 } 968} 969 970void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value, 971 uintptr_t* patched_ptr) { 972 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 973 const void* quick_oat_code = class_linker->GetQuickOatCodeFor(patch->GetDexFile(), 974 patch->GetReferrerClassDefIdx(), 975 patch->GetReferrerMethodIdx()); 976 OatHeader& oat_header = const_cast<OatHeader&>(oat_file_->GetOatHeader()); 977 // TODO: make this Thumb2 specific 978 uint8_t* base = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(quick_oat_code) & ~0x1); 979 uint32_t* patch_location = reinterpret_cast<uint32_t*>(base + patch->GetLiteralOffset()); 980 if (kIsDebugBuild) { 981 if (patch->IsCall()) { 982 const CompilerDriver::CallPatchInformation* cpatch = patch->AsCall(); 983 const DexFile::MethodId& id = cpatch->GetTargetDexFile()->GetMethodId(cpatch->GetTargetMethodIdx()); 984 uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF; 985 uint32_t actual = *patch_location; 986 CHECK(actual == expected || actual == value) << std::hex 987 << "actual=" << actual 988 << "expected=" << expected 989 << "value=" << value; 990 } 991 if (patch->IsType()) { 992 const CompilerDriver::TypePatchInformation* tpatch = patch->AsType(); 993 const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx()); 994 uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF; 995 uint32_t actual = *patch_location; 996 CHECK(actual == expected || actual == value) << std::hex 997 << "actual=" << actual 998 << "expected=" << expected 999 << "value=" << value; 1000 } 1001 } 1002 *patch_location = value; 1003 oat_header.UpdateChecksum(patch_location, sizeof(value)); 1004 1005 uintptr_t loc = reinterpret_cast<uintptr_t>(patch_location) - 1006 (reinterpret_cast<uintptr_t>(oat_file_->Begin()) + oat_header.GetExecutableOffset()); 1007 CHECK_GT(reinterpret_cast<uintptr_t>(patch_location), 1008 reinterpret_cast<uintptr_t>(oat_file_->Begin()) + oat_header.GetExecutableOffset()); 1009 CHECK_LT(loc, oat_file_->Size() - oat_header.GetExecutableOffset()); 1010 1011 *patched_ptr = loc; 1012} 1013 1014} // namespace art 1015