1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_COMPILER_COMPILED_METHOD_H_ 18#define ART_COMPILER_COMPILED_METHOD_H_ 19 20#include <memory> 21#include <iosfwd> 22#include <string> 23#include <vector> 24 25#include "arch/instruction_set.h" 26#include "base/bit_utils.h" 27#include "base/length_prefixed_array.h" 28#include "method_reference.h" 29#include "utils/array_ref.h" 30 31namespace art { 32 33class CompilerDriver; 34class CompiledMethodStorage; 35 36class CompiledCode { 37 public: 38 // For Quick to supply an code blob 39 CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, 40 const ArrayRef<const uint8_t>& quick_code); 41 42 virtual ~CompiledCode(); 43 44 InstructionSet GetInstructionSet() const { 45 return instruction_set_; 46 } 47 48 ArrayRef<const uint8_t> GetQuickCode() const { 49 return GetArray(quick_code_); 50 } 51 52 bool operator==(const CompiledCode& rhs) const; 53 54 // To align an offset from a page-aligned value to make it suitable 55 // for code storage. For example on ARM, to ensure that PC relative 56 // valu computations work out as expected. 57 size_t AlignCode(size_t offset) const; 58 static size_t AlignCode(size_t offset, InstructionSet instruction_set); 59 60 // returns the difference between the code address and a usable PC. 61 // mainly to cope with kThumb2 where the lower bit must be set. 62 size_t CodeDelta() const; 63 static size_t CodeDelta(InstructionSet instruction_set); 64 65 // Returns a pointer suitable for invoking the code at the argument 66 // code_pointer address. Mainly to cope with kThumb2 where the 67 // lower bit must be set to indicate Thumb mode. 68 static const void* CodePointer(const void* code_pointer, 69 InstructionSet instruction_set); 70 71 protected: 72 template <typename T> 73 static ArrayRef<const T> GetArray(const LengthPrefixedArray<T>* array) { 74 if (array == nullptr) { 75 return ArrayRef<const T>(); 76 } 77 DCHECK_NE(array->size(), 0u); 78 return ArrayRef<const T>(&array->At(0), array->size()); 79 } 80 81 CompilerDriver* GetCompilerDriver() { 82 return compiler_driver_; 83 } 84 85 private: 86 CompilerDriver* const compiler_driver_; 87 88 const InstructionSet instruction_set_; 89 90 // Used to store the PIC code for Quick. 91 const LengthPrefixedArray<uint8_t>* const quick_code_; 92}; 93 94class SrcMapElem { 95 public: 96 uint32_t from_; 97 int32_t to_; 98}; 99 100inline bool operator<(const SrcMapElem& lhs, const SrcMapElem& rhs) { 101 if (lhs.from_ != rhs.from_) { 102 return lhs.from_ < rhs.from_; 103 } 104 return lhs.to_ < rhs.to_; 105} 106 107inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) { 108 return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_; 109} 110 111template <class Allocator> 112class SrcMap FINAL : public std::vector<SrcMapElem, Allocator> { 113 public: 114 using std::vector<SrcMapElem, Allocator>::begin; 115 using typename std::vector<SrcMapElem, Allocator>::const_iterator; 116 using std::vector<SrcMapElem, Allocator>::empty; 117 using std::vector<SrcMapElem, Allocator>::end; 118 using std::vector<SrcMapElem, Allocator>::resize; 119 using std::vector<SrcMapElem, Allocator>::shrink_to_fit; 120 using std::vector<SrcMapElem, Allocator>::size; 121 122 explicit SrcMap() {} 123 explicit SrcMap(const Allocator& alloc) : std::vector<SrcMapElem, Allocator>(alloc) {} 124 125 template <class InputIt> 126 SrcMap(InputIt first, InputIt last, const Allocator& alloc) 127 : std::vector<SrcMapElem, Allocator>(first, last, alloc) {} 128 129 void push_back(const SrcMapElem& elem) { 130 if (!empty()) { 131 // Check that the addresses are inserted in sorted order. 132 DCHECK_GE(elem.from_, this->back().from_); 133 // If two consequitive entries map to the same value, ignore the later. 134 // E.g. for map {{0, 1}, {4, 1}, {8, 2}}, all values in [0,8) map to 1. 135 if (elem.to_ == this->back().to_) { 136 return; 137 } 138 } 139 std::vector<SrcMapElem, Allocator>::push_back(elem); 140 } 141 142 // Returns true and the corresponding "to" value if the mapping is found. 143 // Oterwise returns false and 0. 144 std::pair<bool, int32_t> Find(uint32_t from) const { 145 // Finds first mapping such that lb.from_ >= from. 146 auto lb = std::lower_bound(begin(), end(), SrcMapElem {from, INT32_MIN}); 147 if (lb != end() && lb->from_ == from) { 148 // Found exact match. 149 return std::make_pair(true, lb->to_); 150 } else if (lb != begin()) { 151 // The previous mapping is still in effect. 152 return std::make_pair(true, (--lb)->to_); 153 } else { 154 // Not found because 'from' is smaller than first entry in the map. 155 return std::make_pair(false, 0); 156 } 157 } 158}; 159 160using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>; 161 162class LinkerPatch { 163 public: 164 // Note: We explicitly specify the underlying type of the enum because GCC 165 // would otherwise select a bigger underlying type and then complain that 166 // 'art::LinkerPatch::patch_type_' is too small to hold all 167 // values of 'enum class art::LinkerPatch::Type' 168 // which is ridiculous given we have only a handful of values here. If we 169 // choose to squeeze the Type into fewer than 8 bits, we'll have to declare 170 // patch_type_ as an uintN_t and do explicit static_cast<>s. 171 enum class Type : uint8_t { 172 kRecordPosition, // Just record patch position for patchoat. 173 kMethod, 174 kCall, 175 kCallRelative, // NOTE: Actual patching is instruction_set-dependent. 176 kType, 177 kString, 178 kStringRelative, // NOTE: Actual patching is instruction_set-dependent. 179 kDexCacheArray, // NOTE: Actual patching is instruction_set-dependent. 180 }; 181 182 static LinkerPatch RecordPosition(size_t literal_offset) { 183 return LinkerPatch(literal_offset, Type::kRecordPosition, /* target_dex_file */ nullptr); 184 } 185 186 static LinkerPatch MethodPatch(size_t literal_offset, 187 const DexFile* target_dex_file, 188 uint32_t target_method_idx) { 189 LinkerPatch patch(literal_offset, Type::kMethod, target_dex_file); 190 patch.method_idx_ = target_method_idx; 191 return patch; 192 } 193 194 static LinkerPatch CodePatch(size_t literal_offset, 195 const DexFile* target_dex_file, 196 uint32_t target_method_idx) { 197 LinkerPatch patch(literal_offset, Type::kCall, target_dex_file); 198 patch.method_idx_ = target_method_idx; 199 return patch; 200 } 201 202 static LinkerPatch RelativeCodePatch(size_t literal_offset, 203 const DexFile* target_dex_file, 204 uint32_t target_method_idx) { 205 LinkerPatch patch(literal_offset, Type::kCallRelative, target_dex_file); 206 patch.method_idx_ = target_method_idx; 207 return patch; 208 } 209 210 static LinkerPatch TypePatch(size_t literal_offset, 211 const DexFile* target_dex_file, 212 uint32_t target_type_idx) { 213 LinkerPatch patch(literal_offset, Type::kType, target_dex_file); 214 patch.type_idx_ = target_type_idx; 215 return patch; 216 } 217 218 static LinkerPatch StringPatch(size_t literal_offset, 219 const DexFile* target_dex_file, 220 uint32_t target_string_idx) { 221 LinkerPatch patch(literal_offset, Type::kString, target_dex_file); 222 patch.string_idx_ = target_string_idx; 223 return patch; 224 } 225 226 static LinkerPatch RelativeStringPatch(size_t literal_offset, 227 const DexFile* target_dex_file, 228 uint32_t pc_insn_offset, 229 uint32_t target_string_idx) { 230 LinkerPatch patch(literal_offset, Type::kStringRelative, target_dex_file); 231 patch.string_idx_ = target_string_idx; 232 patch.pc_insn_offset_ = pc_insn_offset; 233 return patch; 234 } 235 236 static LinkerPatch DexCacheArrayPatch(size_t literal_offset, 237 const DexFile* target_dex_file, 238 uint32_t pc_insn_offset, 239 size_t element_offset) { 240 DCHECK(IsUint<32>(element_offset)); 241 LinkerPatch patch(literal_offset, Type::kDexCacheArray, target_dex_file); 242 patch.pc_insn_offset_ = pc_insn_offset; 243 patch.element_offset_ = element_offset; 244 return patch; 245 } 246 247 LinkerPatch(const LinkerPatch& other) = default; 248 LinkerPatch& operator=(const LinkerPatch& other) = default; 249 250 size_t LiteralOffset() const { 251 return literal_offset_; 252 } 253 254 Type GetType() const { 255 return patch_type_; 256 } 257 258 bool IsPcRelative() const { 259 switch (GetType()) { 260 case Type::kCallRelative: 261 case Type::kStringRelative: 262 case Type::kDexCacheArray: 263 return true; 264 default: 265 return false; 266 } 267 } 268 269 MethodReference TargetMethod() const { 270 DCHECK(patch_type_ == Type::kMethod || 271 patch_type_ == Type::kCall || 272 patch_type_ == Type::kCallRelative); 273 return MethodReference(target_dex_file_, method_idx_); 274 } 275 276 const DexFile* TargetTypeDexFile() const { 277 DCHECK(patch_type_ == Type::kType); 278 return target_dex_file_; 279 } 280 281 uint32_t TargetTypeIndex() const { 282 DCHECK(patch_type_ == Type::kType); 283 return type_idx_; 284 } 285 286 const DexFile* TargetStringDexFile() const { 287 DCHECK(patch_type_ == Type::kString || patch_type_ == Type::kStringRelative); 288 return target_dex_file_; 289 } 290 291 uint32_t TargetStringIndex() const { 292 DCHECK(patch_type_ == Type::kString || patch_type_ == Type::kStringRelative); 293 return string_idx_; 294 } 295 296 const DexFile* TargetDexCacheDexFile() const { 297 DCHECK(patch_type_ == Type::kDexCacheArray); 298 return target_dex_file_; 299 } 300 301 size_t TargetDexCacheElementOffset() const { 302 DCHECK(patch_type_ == Type::kDexCacheArray); 303 return element_offset_; 304 } 305 306 uint32_t PcInsnOffset() const { 307 DCHECK(patch_type_ == Type::kStringRelative || patch_type_ == Type::kDexCacheArray); 308 return pc_insn_offset_; 309 } 310 311 private: 312 LinkerPatch(size_t literal_offset, Type patch_type, const DexFile* target_dex_file) 313 : target_dex_file_(target_dex_file), 314 literal_offset_(literal_offset), 315 patch_type_(patch_type) { 316 cmp1_ = 0u; 317 cmp2_ = 0u; 318 // The compiler rejects methods that are too big, so the compiled code 319 // of a single method really shouln't be anywhere close to 16MiB. 320 DCHECK(IsUint<24>(literal_offset)); 321 } 322 323 const DexFile* target_dex_file_; 324 uint32_t literal_offset_ : 24; // Method code size up to 16MiB. 325 Type patch_type_ : 8; 326 union { 327 uint32_t cmp1_; // Used for relational operators. 328 uint32_t method_idx_; // Method index for Call/Method patches. 329 uint32_t type_idx_; // Type index for Type patches. 330 uint32_t string_idx_; // String index for String patches. 331 uint32_t element_offset_; // Element offset in the dex cache arrays. 332 static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators"); 333 static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators"); 334 static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators"); 335 static_assert(sizeof(element_offset_) == sizeof(cmp1_), "needed by relational operators"); 336 }; 337 union { 338 // Note: To avoid uninitialized padding on 64-bit systems, we use `size_t` for `cmp2_`. 339 // This allows a hashing function to treat an array of linker patches as raw memory. 340 size_t cmp2_; // Used for relational operators. 341 // Literal offset of the insn loading PC (same as literal_offset if it's the same insn, 342 // may be different if the PC-relative addressing needs multiple insns). 343 uint32_t pc_insn_offset_; 344 static_assert(sizeof(pc_insn_offset_) <= sizeof(cmp2_), "needed by relational operators"); 345 }; 346 347 friend bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs); 348 friend bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs); 349}; 350std::ostream& operator<<(std::ostream& os, const LinkerPatch::Type& type); 351 352inline bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs) { 353 return lhs.literal_offset_ == rhs.literal_offset_ && 354 lhs.patch_type_ == rhs.patch_type_ && 355 lhs.target_dex_file_ == rhs.target_dex_file_ && 356 lhs.cmp1_ == rhs.cmp1_ && 357 lhs.cmp2_ == rhs.cmp2_; 358} 359 360inline bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs) { 361 return (lhs.literal_offset_ != rhs.literal_offset_) ? lhs.literal_offset_ < rhs.literal_offset_ 362 : (lhs.patch_type_ != rhs.patch_type_) ? lhs.patch_type_ < rhs.patch_type_ 363 : (lhs.target_dex_file_ != rhs.target_dex_file_) ? lhs.target_dex_file_ < rhs.target_dex_file_ 364 : (lhs.cmp1_ != rhs.cmp1_) ? lhs.cmp1_ < rhs.cmp1_ 365 : lhs.cmp2_ < rhs.cmp2_; 366} 367 368class CompiledMethod FINAL : public CompiledCode { 369 public: 370 // Constructs a CompiledMethod. 371 // Note: Consider using the static allocation methods below that will allocate the CompiledMethod 372 // in the swap space. 373 CompiledMethod(CompilerDriver* driver, 374 InstructionSet instruction_set, 375 const ArrayRef<const uint8_t>& quick_code, 376 const size_t frame_size_in_bytes, 377 const uint32_t core_spill_mask, 378 const uint32_t fp_spill_mask, 379 const ArrayRef<const SrcMapElem>& src_mapping_table, 380 const ArrayRef<const uint8_t>& vmap_table, 381 const ArrayRef<const uint8_t>& cfi_info, 382 const ArrayRef<const LinkerPatch>& patches); 383 384 virtual ~CompiledMethod(); 385 386 static CompiledMethod* SwapAllocCompiledMethod( 387 CompilerDriver* driver, 388 InstructionSet instruction_set, 389 const ArrayRef<const uint8_t>& quick_code, 390 const size_t frame_size_in_bytes, 391 const uint32_t core_spill_mask, 392 const uint32_t fp_spill_mask, 393 const ArrayRef<const SrcMapElem>& src_mapping_table, 394 const ArrayRef<const uint8_t>& vmap_table, 395 const ArrayRef<const uint8_t>& cfi_info, 396 const ArrayRef<const LinkerPatch>& patches); 397 398 static void ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m); 399 400 size_t GetFrameSizeInBytes() const { 401 return frame_size_in_bytes_; 402 } 403 404 uint32_t GetCoreSpillMask() const { 405 return core_spill_mask_; 406 } 407 408 uint32_t GetFpSpillMask() const { 409 return fp_spill_mask_; 410 } 411 412 ArrayRef<const SrcMapElem> GetSrcMappingTable() const { 413 return GetArray(src_mapping_table_); 414 } 415 416 ArrayRef<const uint8_t> GetVmapTable() const { 417 return GetArray(vmap_table_); 418 } 419 420 ArrayRef<const uint8_t> GetCFIInfo() const { 421 return GetArray(cfi_info_); 422 } 423 424 ArrayRef<const LinkerPatch> GetPatches() const { 425 return GetArray(patches_); 426 } 427 428 private: 429 // For quick code, the size of the activation used by the code. 430 const size_t frame_size_in_bytes_; 431 // For quick code, a bit mask describing spilled GPR callee-save registers. 432 const uint32_t core_spill_mask_; 433 // For quick code, a bit mask describing spilled FPR callee-save registers. 434 const uint32_t fp_spill_mask_; 435 // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset. 436 const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_; 437 // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed. 438 const LengthPrefixedArray<uint8_t>* const vmap_table_; 439 // For quick code, a FDE entry for the debug_frame section. 440 const LengthPrefixedArray<uint8_t>* const cfi_info_; 441 // For quick code, linker patches needed by the method. 442 const LengthPrefixedArray<LinkerPatch>* const patches_; 443}; 444 445} // namespace art 446 447#endif // ART_COMPILER_COMPILED_METHOD_H_ 448