1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_IMAGE_WRITER_H_
18#define ART_COMPILER_IMAGE_WRITER_H_
19
20#include <stdint.h>
21#include "base/memory_tool.h"
22
23#include <cstddef>
24#include <memory>
25#include <set>
26#include <stack>
27#include <string>
28#include <ostream>
29
30#include "base/bit_utils.h"
31#include "base/dchecked_vector.h"
32#include "base/length_prefixed_array.h"
33#include "base/macros.h"
34#include "driver/compiler_driver.h"
35#include "gc/space/space.h"
36#include "image.h"
37#include "lock_word.h"
38#include "mem_map.h"
39#include "oat_file.h"
40#include "mirror/dex_cache.h"
41#include "os.h"
42#include "safe_map.h"
43#include "utils.h"
44
45namespace art {
46namespace gc {
47namespace space {
48class ImageSpace;
49}  // namespace space
50}  // namespace gc
51
52class ClassTable;
53
54static constexpr int kInvalidFd = -1;
55
56// Write a Space built during compilation for use during execution.
57class ImageWriter FINAL {
58 public:
59  ImageWriter(const CompilerDriver& compiler_driver,
60              uintptr_t image_begin,
61              bool compile_pic,
62              bool compile_app_image,
63              ImageHeader::StorageMode image_storage_mode,
64              const std::vector<const char*>& oat_filenames,
65              const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
66
67  bool PrepareImageAddressSpace();
68
69  bool IsImageAddressSpaceReady() const {
70    DCHECK(!image_infos_.empty());
71    for (const ImageInfo& image_info : image_infos_) {
72      if (image_info.image_roots_address_ == 0u) {
73        return false;
74      }
75    }
76    return true;
77  }
78
79  template <typename T>
80  T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
81    if (object == nullptr || IsInBootImage(object)) {
82      return object;
83    } else {
84      size_t oat_index = GetOatIndex(object);
85      const ImageInfo& image_info = GetImageInfo(oat_index);
86      return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
87    }
88  }
89
90  ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
91
92  template <typename PtrType>
93  PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
94      const SHARED_REQUIRES(Locks::mutator_lock_) {
95    auto oat_it = dex_file_oat_index_map_.find(dex_file);
96    DCHECK(oat_it != dex_file_oat_index_map_.end());
97    const ImageInfo& image_info = GetImageInfo(oat_it->second);
98    auto it = image_info.dex_cache_array_starts_.find(dex_file);
99    DCHECK(it != image_info.dex_cache_array_starts_.end());
100    return reinterpret_cast<PtrType>(
101        image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
102            it->second + offset);
103  }
104
105  size_t GetOatFileOffset(size_t oat_index) const {
106    return GetImageInfo(oat_index).oat_offset_;
107  }
108
109  const uint8_t* GetOatFileBegin(size_t oat_index) const {
110    return GetImageInfo(oat_index).oat_file_begin_;
111  }
112
113  // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
114  // the names in image_filenames.
115  // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
116  // the names in oat_filenames.
117  bool Write(int image_fd,
118             const std::vector<const char*>& image_filenames,
119             const std::vector<const char*>& oat_filenames)
120      REQUIRES(!Locks::mutator_lock_);
121
122  uintptr_t GetOatDataBegin(size_t oat_index) {
123    return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
124  }
125
126  // Get the index of the oat file containing the dex file.
127  //
128  // This "oat_index" is used to retrieve information about the the memory layout
129  // of the oat file and its associated image file, needed for link-time patching
130  // of references to the image or across oat files.
131  size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
132
133  // Get the index of the oat file containing the dex file served by the dex cache.
134  size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
135      SHARED_REQUIRES(Locks::mutator_lock_);
136
137  // Update the oat layout for the given oat file.
138  // This will make the oat_offset for the next oat file valid.
139  void UpdateOatFileLayout(size_t oat_index,
140                           size_t oat_loaded_size,
141                           size_t oat_data_offset,
142                           size_t oat_data_size);
143  // Update information about the oat header, i.e. checksum and trampoline offsets.
144  void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
145
146 private:
147  using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
148
149  bool AllocMemory();
150
151  // Mark the objects defined in this space in the given live bitmap.
152  void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
153
154  // Classify different kinds of bins that objects end up getting packed into during image writing.
155  // Ordered from dirtiest to cleanest (until ArtMethods).
156  enum Bin {
157    kBinMiscDirty,                // Dex caches, object locks, etc...
158    kBinClassVerified,            // Class verified, but initializers haven't been run
159    // Unknown mix of clean/dirty:
160    kBinRegular,
161    kBinClassInitialized,         // Class initializers have been run
162    // All classes get their own bins since their fields often dirty
163    kBinClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
164    // Likely-clean:
165    kBinString,                        // [String] Almost always immutable (except for obj header).
166    // Add more bins here if we add more segregation code.
167    // Non mirror fields must be below.
168    // ArtFields should be always clean.
169    kBinArtField,
170    // If the class is initialized, then the ArtMethods are probably clean.
171    kBinArtMethodClean,
172    // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
173    // initialized.
174    kBinArtMethodDirty,
175    // IMT (clean)
176    kBinImTable,
177    // Conflict tables (clean).
178    kBinIMTConflictTable,
179    // Runtime methods (always clean, do not have a length prefix array).
180    kBinRuntimeMethod,
181    // Dex cache arrays have a special slot for PC-relative addressing. Since they are
182    // huge, and as such their dirtiness is not important for the clean/dirty separation,
183    // we arbitrarily keep them at the end of the native data.
184    kBinDexCacheArray,            // Arrays belonging to dex cache.
185    kBinSize,
186    // Number of bins which are for mirror objects.
187    kBinMirrorCount = kBinArtField,
188  };
189  friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
190
191  enum NativeObjectRelocationType {
192    kNativeObjectRelocationTypeArtField,
193    kNativeObjectRelocationTypeArtFieldArray,
194    kNativeObjectRelocationTypeArtMethodClean,
195    kNativeObjectRelocationTypeArtMethodArrayClean,
196    kNativeObjectRelocationTypeArtMethodDirty,
197    kNativeObjectRelocationTypeArtMethodArrayDirty,
198    kNativeObjectRelocationTypeRuntimeMethod,
199    kNativeObjectRelocationTypeIMTable,
200    kNativeObjectRelocationTypeIMTConflictTable,
201    kNativeObjectRelocationTypeDexCacheArray,
202  };
203  friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
204
205  enum OatAddress {
206    kOatAddressInterpreterToInterpreterBridge,
207    kOatAddressInterpreterToCompiledCodeBridge,
208    kOatAddressJNIDlsymLookup,
209    kOatAddressQuickGenericJNITrampoline,
210    kOatAddressQuickIMTConflictTrampoline,
211    kOatAddressQuickResolutionTrampoline,
212    kOatAddressQuickToInterpreterBridge,
213    // Number of elements in the enum.
214    kOatAddressCount,
215  };
216  friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
217
218  static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
219  // uint32 = typeof(lockword_)
220  // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
221  // failures due to invalid read barrier bits during object field reads.
222  static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits -
223      LockWord::kReadBarrierStateSize;
224  // 111000.....0
225  static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
226
227  // We use the lock word to store the bin # and bin index of the object in the image.
228  //
229  // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
230  // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
231  struct BinSlot {
232    explicit BinSlot(uint32_t lockword);
233    BinSlot(Bin bin, uint32_t index);
234
235    // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
236    Bin GetBin() const;
237    // The offset in bytes from the beginning of the bin. Aligned to object size.
238    uint32_t GetIndex() const;
239    // Pack into a single uint32_t, for storing into a lock word.
240    uint32_t Uint32Value() const { return lockword_; }
241    // Comparison operator for map support
242    bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
243
244  private:
245    // Must be the same size as LockWord, any larger and we would truncate the data.
246    const uint32_t lockword_;
247  };
248
249  struct ImageInfo {
250    ImageInfo();
251    ImageInfo(ImageInfo&&) = default;
252
253    // Create the image sections into the out sections variable, returns the size of the image
254    // excluding the bitmap.
255    size_t CreateImageSections(ImageSection* out_sections) const;
256
257    std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
258
259    // Target begin of this image. Notes: It is not valid to write here, this is the address
260    // of the target image, not necessarily where image_ is mapped. The address is only valid
261    // after layouting (otherwise null).
262    uint8_t* image_begin_ = nullptr;
263
264    // Offset to the free space in image_, initially size of image header.
265    size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
266    uint32_t image_roots_address_ = 0;  // The image roots address in the image.
267    size_t image_offset_ = 0;  // Offset of this image from the start of the first image.
268
269    // Image size is the *address space* covered by this image. As the live bitmap is aligned
270    // to the page size, the live bitmap will cover more address space than necessary. But live
271    // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
272    // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
273    // page-aligned).
274    size_t image_size_ = 0;
275
276    // Oat data.
277    // Offset of the oat file for this image from start of oat files. This is
278    // valid when the previous oat file has been written.
279    size_t oat_offset_ = 0;
280    // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
281    const uint8_t* oat_file_begin_ = nullptr;
282    size_t oat_loaded_size_ = 0;
283    const uint8_t* oat_data_begin_ = nullptr;
284    size_t oat_size_ = 0;  // Size of the corresponding oat data.
285    // The oat header checksum, valid after UpdateOatFileHeader().
286    uint32_t oat_checksum_ = 0u;
287
288    // Image bitmap which lets us know where the objects inside of the image reside.
289    std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
290
291    // The start offsets of the dex cache arrays.
292    SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
293
294    // Offset from oat_data_begin_ to the stubs.
295    uint32_t oat_address_offsets_[kOatAddressCount] = {};
296
297    // Bin slot tracking for dirty object packing.
298    size_t bin_slot_sizes_[kBinSize] = {};  // Number of bytes in a bin.
299    size_t bin_slot_offsets_[kBinSize] = {};  // Number of bytes in previous bins.
300    size_t bin_slot_count_[kBinSize] = {};  // Number of objects in a bin.
301
302    // Cached size of the intern table for when we allocate memory.
303    size_t intern_table_bytes_ = 0;
304
305    // Number of image class table bytes.
306    size_t class_table_bytes_ = 0;
307
308    // Intern table associated with this image for serialization.
309    std::unique_ptr<InternTable> intern_table_;
310
311    // Class table associated with this image for serialization.
312    std::unique_ptr<ClassTable> class_table_;
313  };
314
315  // We use the lock word to store the offset of the object in the image.
316  void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
317      SHARED_REQUIRES(Locks::mutator_lock_);
318  void SetImageOffset(mirror::Object* object, size_t offset)
319      SHARED_REQUIRES(Locks::mutator_lock_);
320  bool IsImageOffsetAssigned(mirror::Object* object) const
321      SHARED_REQUIRES(Locks::mutator_lock_);
322  size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
323  void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
324      SHARED_REQUIRES(Locks::mutator_lock_);
325
326  void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
327  void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
328      SHARED_REQUIRES(Locks::mutator_lock_);
329  mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
330      SHARED_REQUIRES(Locks::mutator_lock_);
331  void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
332      SHARED_REQUIRES(Locks::mutator_lock_);
333  bool IsImageBinSlotAssigned(mirror::Object* object) const
334      SHARED_REQUIRES(Locks::mutator_lock_);
335  BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
336
337  void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
338      SHARED_REQUIRES(Locks::mutator_lock_);
339  void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
340
341  static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
342      SHARED_REQUIRES(Locks::mutator_lock_) {
343    return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
344  }
345
346  mirror::Object* GetLocalAddress(mirror::Object* object) const
347      SHARED_REQUIRES(Locks::mutator_lock_) {
348    size_t offset = GetImageOffset(object);
349    size_t oat_index = GetOatIndex(object);
350    const ImageInfo& image_info = GetImageInfo(oat_index);
351    uint8_t* dst = image_info.image_->Begin() + offset;
352    return reinterpret_cast<mirror::Object*>(dst);
353  }
354
355  // Returns the address in the boot image if we are compiling the app image.
356  const uint8_t* GetOatAddress(OatAddress type) const;
357
358  const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
359    // With Quick, code is within the OatFile, as there are all in one
360    // .o ELF object. But interpret it as signed.
361    DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
362    DCHECK(image_info.oat_data_begin_ != nullptr);
363    return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
364  }
365
366  // Returns true if the class was in the original requested image classes list.
367  bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
368
369  // Debug aid that list of requested image classes.
370  void DumpImageClasses();
371
372  // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
373  void ComputeLazyFieldsForImageClasses()
374      SHARED_REQUIRES(Locks::mutator_lock_);
375
376  // Remove unwanted classes from various roots.
377  void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
378
379  // Verify unwanted classes removed.
380  void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
381  static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
382      SHARED_REQUIRES(Locks::mutator_lock_);
383
384  // Lays out where the image objects will be at runtime.
385  void CalculateNewObjectOffsets()
386      SHARED_REQUIRES(Locks::mutator_lock_);
387  void ProcessWorkStack(WorkStack* work_stack)
388      SHARED_REQUIRES(Locks::mutator_lock_);
389  void CreateHeader(size_t oat_index)
390      SHARED_REQUIRES(Locks::mutator_lock_);
391  mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
392      SHARED_REQUIRES(Locks::mutator_lock_);
393  void UnbinObjectsIntoOffset(mirror::Object* obj)
394      SHARED_REQUIRES(Locks::mutator_lock_);
395
396  static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
397      SHARED_REQUIRES(Locks::mutator_lock_);
398  static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
399      SHARED_REQUIRES(Locks::mutator_lock_);
400  static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
401      SHARED_REQUIRES(Locks::mutator_lock_);
402
403  // Creates the contiguous image in memory and adjusts pointers.
404  void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
405  void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
406  static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
407      SHARED_REQUIRES(Locks::mutator_lock_);
408  void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
409  void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
410      SHARED_REQUIRES(Locks::mutator_lock_);
411  void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
412  void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
413      SHARED_REQUIRES(Locks::mutator_lock_);
414  void FixupClass(mirror::Class* orig, mirror::Class* copy)
415      SHARED_REQUIRES(Locks::mutator_lock_);
416  void FixupObject(mirror::Object* orig, mirror::Object* copy)
417      SHARED_REQUIRES(Locks::mutator_lock_);
418  void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
419      SHARED_REQUIRES(Locks::mutator_lock_);
420  void FixupPointerArray(mirror::Object* dst,
421                         mirror::PointerArray* arr,
422                         mirror::Class* klass,
423                         Bin array_type)
424      SHARED_REQUIRES(Locks::mutator_lock_);
425
426  // Get quick code for non-resolution/imt_conflict/abstract method.
427  const uint8_t* GetQuickCode(ArtMethod* method,
428                              const ImageInfo& image_info,
429                              bool* quick_is_interpreted)
430      SHARED_REQUIRES(Locks::mutator_lock_);
431
432  // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
433  size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
434
435  // Return true if a method is likely to be dirtied at runtime.
436  bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
437
438  // Assign the offset for an ArtMethod.
439  void AssignMethodOffset(ArtMethod* method,
440                          NativeObjectRelocationType type,
441                          size_t oat_index)
442      SHARED_REQUIRES(Locks::mutator_lock_);
443
444  void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
445
446  // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
447  // relocation.
448  void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
449      SHARED_REQUIRES(Locks::mutator_lock_);
450
451  // Return true if klass is loaded by the boot class loader but not in the boot image.
452  bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
453
454  // Return true if klass depends on a boot class loader non image class. We want to prune these
455  // classes since we do not want any boot class loader classes in the image. This means that
456  // we also cannot have any classes which refer to these boot class loader non image classes.
457  // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
458  // driver.
459  bool PruneAppImageClass(mirror::Class* klass)
460      SHARED_REQUIRES(Locks::mutator_lock_);
461
462  // early_exit is true if we had a cyclic dependency anywhere down the chain.
463  bool PruneAppImageClassInternal(mirror::Class* klass,
464                                  bool* early_exit,
465                                  std::unordered_set<mirror::Class*>* visited)
466      SHARED_REQUIRES(Locks::mutator_lock_);
467
468  bool IsMultiImage() const {
469    return image_infos_.size() > 1;
470  }
471
472  static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
473
474  uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
475
476  // Location of where the object will be when the image is loaded at runtime.
477  template <typename T>
478  T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
479
480  // Location of where the temporary copy of the object currently is.
481  template <typename T>
482  T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
483
484  // Return true of obj is inside of the boot image space. This may only return true if we are
485  // compiling an app image.
486  bool IsInBootImage(const void* obj) const;
487
488  // Return true if ptr is within the boot oat file.
489  bool IsInBootOatFile(const void* ptr) const;
490
491  // Get the index of the oat file associated with the object.
492  size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
493
494  // The oat index for shared data in multi-image and all data in single-image compilation.
495  size_t GetDefaultOatIndex() const {
496    return 0u;
497  }
498
499  ImageInfo& GetImageInfo(size_t oat_index) {
500    return image_infos_[oat_index];
501  }
502
503  const ImageInfo& GetImageInfo(size_t oat_index) const {
504    return image_infos_[oat_index];
505  }
506
507  // Find an already strong interned string in the other images or in the boot image. Used to
508  // remove duplicates in the multi image and app image case.
509  mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
510
511  // Return true if there already exists a native allocation for an object.
512  bool NativeRelocationAssigned(void* ptr) const;
513
514  const CompilerDriver& compiler_driver_;
515
516  // Beginning target image address for the first image.
517  uint8_t* global_image_begin_;
518
519  // Offset from image_begin_ to where the first object is in image_.
520  size_t image_objects_offset_begin_;
521
522  // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
523  // to keep track. These include vtable arrays, iftable arrays, and dex caches.
524  std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
525
526  // Saved hash codes. We use these to restore lockwords which were temporarily used to have
527  // forwarding addresses as well as copying over hash codes.
528  std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
529
530  // Oat index map for objects.
531  std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
532
533  // Boolean flags.
534  const bool compile_pic_;
535  const bool compile_app_image_;
536
537  // Size of pointers on the target architecture.
538  size_t target_ptr_size_;
539
540  // Image data indexed by the oat file index.
541  dchecked_vector<ImageInfo> image_infos_;
542
543  // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
544  // have one entry per art field for convenience. ArtFields are placed right after the end of the
545  // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
546  struct NativeObjectRelocation {
547    size_t oat_index;
548    uintptr_t offset;
549    NativeObjectRelocationType type;
550
551    bool IsArtMethodRelocation() const {
552      return type == kNativeObjectRelocationTypeArtMethodClean ||
553          type == kNativeObjectRelocationTypeArtMethodDirty ||
554          type == kNativeObjectRelocationTypeRuntimeMethod;
555    }
556  };
557  std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
558
559  // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
560  ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
561
562  // Counters for measurements, used for logging only.
563  uint64_t dirty_methods_;
564  uint64_t clean_methods_;
565
566  // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
567  std::unordered_map<mirror::Class*, bool> prune_class_memo_;
568
569  // Class loaders with a class table to write out. There should only be one class loader because
570  // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
571  // null is a valid entry.
572  std::unordered_set<mirror::ClassLoader*> class_loaders_;
573
574  // Which mode the image is stored as, see image.h
575  const ImageHeader::StorageMode image_storage_mode_;
576
577  // The file names of oat files.
578  const std::vector<const char*>& oat_filenames_;
579
580  // Map of dex files to the indexes of oat files that they were compiled into.
581  const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
582
583  friend class ContainsBootClassLoaderNonImageClassVisitor;
584  friend class FixupClassVisitor;
585  friend class FixupRootVisitor;
586  friend class FixupVisitor;
587  class GetRootsVisitor;
588  friend class NativeLocationVisitor;
589  friend class NonImageClassesVisitor;
590  class VisitReferencesVisitor;
591  DISALLOW_COPY_AND_ASSIGN(ImageWriter);
592};
593
594}  // namespace art
595
596#endif  // ART_COMPILER_IMAGE_WRITER_H_
597