1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "linker/arm/relative_patcher_arm_base.h"
18
19#include "base/stl_util.h"
20#include "compiled_method-inl.h"
21#include "debug/method_debug_info.h"
22#include "dex/dex_file_types.h"
23#include "linker/linker_patch.h"
24#include "linker/output_stream.h"
25#include "oat.h"
26#include "oat_quick_method_header.h"
27
28namespace art {
29namespace linker {
30
31class ArmBaseRelativePatcher::ThunkData {
32 public:
33  ThunkData(std::vector<uint8_t> code, uint32_t max_next_offset)
34      : code_(std::move(code)),
35        offsets_(),
36        max_next_offset_(max_next_offset),
37        pending_offset_(0u) {
38    DCHECK(NeedsNextThunk());  // The data is constructed only when we expect to need the thunk.
39  }
40
41  ThunkData(ThunkData&& src) = default;
42
43  size_t CodeSize() const {
44    return code_.size();
45  }
46
47  ArrayRef<const uint8_t> GetCode() const {
48    return ArrayRef<const uint8_t>(code_);
49  }
50
51  bool NeedsNextThunk() const {
52    return max_next_offset_ != 0u;
53  }
54
55  uint32_t MaxNextOffset() const {
56    DCHECK(NeedsNextThunk());
57    return max_next_offset_;
58  }
59
60  void ClearMaxNextOffset() {
61    DCHECK(NeedsNextThunk());
62    max_next_offset_ = 0u;
63  }
64
65  void SetMaxNextOffset(uint32_t max_next_offset) {
66    DCHECK(!NeedsNextThunk());
67    max_next_offset_ = max_next_offset;
68  }
69
70  // Adjust the MaxNextOffset() down if needed to fit the code before the next thunk.
71  // Returns true if it was adjusted, false if the old value was kept.
72  bool MakeSpaceBefore(const ThunkData& next_thunk, size_t alignment) {
73    DCHECK(NeedsNextThunk());
74    DCHECK(next_thunk.NeedsNextThunk());
75    DCHECK_ALIGNED_PARAM(MaxNextOffset(), alignment);
76    DCHECK_ALIGNED_PARAM(next_thunk.MaxNextOffset(), alignment);
77    if (next_thunk.MaxNextOffset() - CodeSize() < MaxNextOffset()) {
78      max_next_offset_ = RoundDown(next_thunk.MaxNextOffset() - CodeSize(), alignment);
79      return true;
80    } else {
81      return false;
82    }
83  }
84
85  uint32_t ReserveOffset(size_t offset) {
86    DCHECK(NeedsNextThunk());
87    DCHECK_LE(offset, max_next_offset_);
88    max_next_offset_ = 0u;  // The reserved offset should satisfy all pending references.
89    offsets_.push_back(offset);
90    return offset + CodeSize();
91  }
92
93  bool HasReservedOffset() const {
94    return !offsets_.empty();
95  }
96
97  uint32_t LastReservedOffset() const {
98    DCHECK(HasReservedOffset());
99    return offsets_.back();
100  }
101
102  bool HasPendingOffset() const {
103    return pending_offset_ != offsets_.size();
104  }
105
106  uint32_t GetPendingOffset() const {
107    DCHECK(HasPendingOffset());
108    return offsets_[pending_offset_];
109  }
110
111  void MarkPendingOffsetAsWritten() {
112    DCHECK(HasPendingOffset());
113    ++pending_offset_;
114  }
115
116  bool HasWrittenOffset() const {
117    return pending_offset_ != 0u;
118  }
119
120  uint32_t LastWrittenOffset() const {
121    DCHECK(HasWrittenOffset());
122    return offsets_[pending_offset_ - 1u];
123  }
124
125  size_t IndexOfFirstThunkAtOrAfter(uint32_t offset) const {
126    size_t number_of_thunks = NumberOfThunks();
127    for (size_t i = 0; i != number_of_thunks; ++i) {
128      if (GetThunkOffset(i) >= offset) {
129        return i;
130      }
131    }
132    return number_of_thunks;
133  }
134
135  size_t NumberOfThunks() const {
136    return offsets_.size();
137  }
138
139  uint32_t GetThunkOffset(size_t index) const {
140    DCHECK_LT(index, NumberOfThunks());
141    return offsets_[index];
142  }
143
144 private:
145  std::vector<uint8_t> code_;       // The code of the thunk.
146  std::vector<uint32_t> offsets_;   // Offsets at which the thunk needs to be written.
147  uint32_t max_next_offset_;        // The maximum offset at which the next thunk can be placed.
148  uint32_t pending_offset_;         // The index of the next offset to write.
149};
150
151class ArmBaseRelativePatcher::PendingThunkComparator {
152 public:
153  bool operator()(const ThunkData* lhs, const ThunkData* rhs) const {
154    DCHECK(lhs->HasPendingOffset());
155    DCHECK(rhs->HasPendingOffset());
156    // The top of the heap is defined to contain the highest element and we want to pick
157    // the thunk with the smallest pending offset, so use the reverse ordering, i.e. ">".
158    return lhs->GetPendingOffset() > rhs->GetPendingOffset();
159  }
160};
161
162uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset,
163                                              const CompiledMethod* compiled_method,
164                                              MethodReference method_ref) {
165  return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
166}
167
168uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
169  // For multi-oat compilations (boot image), ReserveSpaceEnd() is called for each oat file.
170  // Since we do not know here whether this is the last file or whether the next opportunity
171  // to place thunk will be soon enough, we need to reserve all needed thunks now. Code for
172  // subsequent oat files can still call back to them.
173  if (!unprocessed_method_call_patches_.empty()) {
174    ResolveMethodCalls(offset, MethodReference(nullptr, dex::kDexNoIndex));
175  }
176  for (ThunkData* data : unreserved_thunks_) {
177    uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
178    offset = data->ReserveOffset(thunk_offset);
179  }
180  unreserved_thunks_.clear();
181  // We also need to delay initiating the pending_thunks_ until the call to WriteThunks().
182  // Check that the `pending_thunks_.capacity()` indicates that no WriteThunks() has taken place.
183  DCHECK_EQ(pending_thunks_.capacity(), 0u);
184  return offset;
185}
186
187uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
188  if (pending_thunks_.capacity() == 0u) {
189    if (thunks_.empty()) {
190      return offset;
191    }
192    // First call to WriteThunks(), prepare the thunks for writing.
193    pending_thunks_.reserve(thunks_.size());
194    for (auto& entry : thunks_) {
195      ThunkData* data = &entry.second;
196      if (data->HasPendingOffset()) {
197        pending_thunks_.push_back(data);
198      }
199    }
200    std::make_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
201  }
202  uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
203  while (!pending_thunks_.empty() &&
204         pending_thunks_.front()->GetPendingOffset() == aligned_offset) {
205    // Write alignment bytes and code.
206    uint32_t aligned_code_delta = aligned_offset - offset;
207    if (aligned_code_delta != 0u && UNLIKELY(!WriteCodeAlignment(out, aligned_code_delta))) {
208      return 0u;
209    }
210    if (UNLIKELY(!WriteThunk(out, pending_thunks_.front()->GetCode()))) {
211      return 0u;
212    }
213    offset = aligned_offset + pending_thunks_.front()->CodeSize();
214    // Mark the thunk as written at the pending offset and update the `pending_thunks_` heap.
215    std::pop_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
216    pending_thunks_.back()->MarkPendingOffsetAsWritten();
217    if (pending_thunks_.back()->HasPendingOffset()) {
218      std::push_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
219    } else {
220      pending_thunks_.pop_back();
221    }
222    aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
223  }
224  DCHECK(pending_thunks_.empty() || pending_thunks_.front()->GetPendingOffset() > aligned_offset);
225  return offset;
226}
227
228std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugInfo(
229    uint32_t executable_offset) {
230  // For multi-oat compilation (boot image), `thunks_` records thunks for all oat files.
231  // To return debug info for the current oat file, we must ignore thunks before the
232  // `executable_offset` as they are in the previous oat files and this function must be
233  // called before reserving thunk positions for subsequent oat files.
234  size_t number_of_thunks = 0u;
235  for (auto&& entry : thunks_) {
236    const ThunkData& data = entry.second;
237    number_of_thunks += data.NumberOfThunks() - data.IndexOfFirstThunkAtOrAfter(executable_offset);
238  }
239  std::vector<debug::MethodDebugInfo> result;
240  result.reserve(number_of_thunks);
241  for (auto&& entry : thunks_) {
242    const ThunkKey& key = entry.first;
243    const ThunkData& data = entry.second;
244    size_t start = data.IndexOfFirstThunkAtOrAfter(executable_offset);
245    if (start == data.NumberOfThunks()) {
246      continue;
247    }
248    // Get the base name to use for the first occurrence of the thunk.
249    std::string base_name = GetThunkDebugName(key);
250    for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
251      debug::MethodDebugInfo info = {};
252      if (i == 0u) {
253        info.custom_name = base_name;
254      } else {
255        // Add a disambiguating tag for subsequent identical thunks. Since the `thunks_`
256        // keeps records also for thunks in previous oat files, names based on the thunk
257        // index shall be unique across the whole multi-oat output.
258        info.custom_name = base_name + "_" + std::to_string(i);
259      }
260      info.isa = instruction_set_;
261      info.is_code_address_text_relative = true;
262      info.code_address = data.GetThunkOffset(i) - executable_offset;
263      info.code_size = data.CodeSize();
264      result.push_back(std::move(info));
265    }
266  }
267  return result;
268}
269
270ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
271                                               InstructionSet instruction_set)
272    : provider_(provider),
273      instruction_set_(instruction_set),
274      thunks_(),
275      unprocessed_method_call_patches_(),
276      method_call_thunk_(nullptr),
277      pending_thunks_() {
278}
279
280ArmBaseRelativePatcher::~ArmBaseRelativePatcher() {
281  // All work done by member destructors.
282}
283
284uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset,
285                                                      const CompiledMethod* compiled_method,
286                                                      MethodReference method_ref,
287                                                      uint32_t max_extra_space) {
288  // Adjust code size for extra space required by the subclass.
289  uint32_t max_code_size = compiled_method->GetQuickCode().size() + max_extra_space;
290  uint32_t code_offset;
291  uint32_t next_aligned_offset;
292  while (true) {
293    code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
294    next_aligned_offset = compiled_method->AlignCode(code_offset + max_code_size);
295    if (unreserved_thunks_.empty() ||
296        unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
297      break;
298    }
299    ThunkData* thunk = unreserved_thunks_.front();
300    if (thunk == method_call_thunk_) {
301      ResolveMethodCalls(code_offset, method_ref);
302      // This may have changed `method_call_thunk_` data, so re-check if we need to reserve.
303      if (unreserved_thunks_.empty() ||
304          unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
305        break;
306      }
307      // We need to process the new `front()` whether it's still the `method_call_thunk_` or not.
308      thunk = unreserved_thunks_.front();
309    }
310    unreserved_thunks_.pop_front();
311    uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
312    offset = thunk->ReserveOffset(thunk_offset);
313    if (thunk == method_call_thunk_) {
314      // All remaining method call patches will be handled by this thunk.
315      DCHECK(!unprocessed_method_call_patches_.empty());
316      DCHECK_LE(thunk_offset - unprocessed_method_call_patches_.front().GetPatchOffset(),
317                MaxPositiveDisplacement(GetMethodCallKey()));
318      unprocessed_method_call_patches_.clear();
319    }
320  }
321
322  // Process patches and check that adding thunks for the current method did not push any
323  // thunks (previously existing or newly added) before `next_aligned_offset`. This is
324  // essentially a check that we never compile a method that's too big. The calls or branches
325  // from the method should be able to reach beyond the end of the method and over any pending
326  // thunks. (The number of different thunks should be relatively low and their code short.)
327  ProcessPatches(compiled_method, code_offset);
328  CHECK(unreserved_thunks_.empty() ||
329        unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset);
330
331  return offset;
332}
333
334uint32_t ArmBaseRelativePatcher::CalculateMethodCallDisplacement(uint32_t patch_offset,
335                                                                 uint32_t target_offset) {
336  DCHECK(method_call_thunk_ != nullptr);
337  // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
338  uint32_t displacement = target_offset - patch_offset;
339  uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
340  uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
341  // NOTE: With unsigned arithmetic we do mean to use && rather than || below.
342  if (displacement > max_positive_displacement && displacement < -max_negative_displacement) {
343    // Unwritten thunks have higher offsets, check if it's within range.
344    DCHECK(!method_call_thunk_->HasPendingOffset() ||
345           method_call_thunk_->GetPendingOffset() > patch_offset);
346    if (method_call_thunk_->HasPendingOffset() &&
347        method_call_thunk_->GetPendingOffset() - patch_offset <= max_positive_displacement) {
348      displacement = method_call_thunk_->GetPendingOffset() - patch_offset;
349    } else {
350      // We must have a previous thunk then.
351      DCHECK(method_call_thunk_->HasWrittenOffset());
352      DCHECK_LT(method_call_thunk_->LastWrittenOffset(), patch_offset);
353      displacement = method_call_thunk_->LastWrittenOffset() - patch_offset;
354      DCHECK_GE(displacement, -max_negative_displacement);
355    }
356  }
357  return displacement;
358}
359
360uint32_t ArmBaseRelativePatcher::GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset) {
361  auto it = thunks_.find(key);
362  CHECK(it != thunks_.end());
363  const ThunkData& data = it->second;
364  if (data.HasWrittenOffset()) {
365    uint32_t offset = data.LastWrittenOffset();
366    DCHECK_LT(offset, patch_offset);
367    if (patch_offset - offset <= MaxNegativeDisplacement(key)) {
368      return offset;
369    }
370  }
371  DCHECK(data.HasPendingOffset());
372  uint32_t offset = data.GetPendingOffset();
373  DCHECK_GT(offset, patch_offset);
374  DCHECK_LE(offset - patch_offset, MaxPositiveDisplacement(key));
375  return offset;
376}
377
378ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetMethodCallKey() {
379  return ThunkKey(ThunkType::kMethodCall);
380}
381
382ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetBakerThunkKey(
383    const LinkerPatch& patch) {
384  DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kBakerReadBarrierBranch);
385  return ThunkKey(ThunkType::kBakerReadBarrier,
386                  patch.GetBakerCustomValue1(),
387                  patch.GetBakerCustomValue2());
388}
389
390void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_method,
391                                            uint32_t code_offset) {
392  for (const LinkerPatch& patch : compiled_method->GetPatches()) {
393    uint32_t patch_offset = code_offset + patch.LiteralOffset();
394    ThunkKey key(static_cast<ThunkType>(-1));
395    ThunkData* old_data = nullptr;
396    if (patch.GetType() == LinkerPatch::Type::kCallRelative) {
397      key = GetMethodCallKey();
398      unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod());
399      if (method_call_thunk_ == nullptr) {
400        uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
401        auto it = thunks_.Put(key, ThunkData(CompileThunk(key), max_next_offset));
402        method_call_thunk_ = &it->second;
403        AddUnreservedThunk(method_call_thunk_);
404      } else {
405        old_data = method_call_thunk_;
406      }
407    } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
408      key = GetBakerThunkKey(patch);
409      auto lb = thunks_.lower_bound(key);
410      if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
411        uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
412        auto it = thunks_.PutBefore(lb, key, ThunkData(CompileThunk(key), max_next_offset));
413        AddUnreservedThunk(&it->second);
414      } else {
415        old_data = &lb->second;
416      }
417    }
418    if (old_data != nullptr) {
419      // Shared path where an old thunk may need an update.
420      DCHECK(key.GetType() != static_cast<ThunkType>(-1));
421      DCHECK(!old_data->HasReservedOffset() || old_data->LastReservedOffset() < patch_offset);
422      if (old_data->NeedsNextThunk()) {
423        // Patches for a method are ordered by literal offset, so if we still need to place
424        // this thunk for a previous patch, that thunk shall be in range for this patch.
425        DCHECK_LE(old_data->MaxNextOffset(), CalculateMaxNextOffset(patch_offset, key));
426      } else {
427        if (!old_data->HasReservedOffset() ||
428            patch_offset - old_data->LastReservedOffset() > MaxNegativeDisplacement(key)) {
429          old_data->SetMaxNextOffset(CalculateMaxNextOffset(patch_offset, key));
430          AddUnreservedThunk(old_data);
431        }
432      }
433    }
434  }
435}
436
437void ArmBaseRelativePatcher::AddUnreservedThunk(ThunkData* data) {
438  DCHECK(data->NeedsNextThunk());
439  size_t index = unreserved_thunks_.size();
440  while (index != 0u && data->MaxNextOffset() < unreserved_thunks_[index - 1u]->MaxNextOffset()) {
441    --index;
442  }
443  unreserved_thunks_.insert(unreserved_thunks_.begin() + index, data);
444  // We may need to update the max next offset(s) if the thunk code would not fit.
445  size_t alignment = GetInstructionSetAlignment(instruction_set_);
446  if (index + 1u != unreserved_thunks_.size()) {
447    // Note: Ignore the return value as we need to process previous thunks regardless.
448    data->MakeSpaceBefore(*unreserved_thunks_[index + 1u], alignment);
449  }
450  // Make space for previous thunks. Once we find a pending thunk that does
451  // not need an adjustment, we can stop.
452  while (index != 0u && unreserved_thunks_[index - 1u]->MakeSpaceBefore(*data, alignment)) {
453    --index;
454    data = unreserved_thunks_[index];
455  }
456}
457
458void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset,
459                                                MethodReference method_ref) {
460  DCHECK(!unreserved_thunks_.empty());
461  DCHECK(!unprocessed_method_call_patches_.empty());
462  DCHECK(method_call_thunk_ != nullptr);
463  uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
464  uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
465  // Process as many patches as possible, stop only on unresolved targets or calls too far back.
466  while (!unprocessed_method_call_patches_.empty()) {
467    MethodReference target_method = unprocessed_method_call_patches_.front().GetTargetMethod();
468    uint32_t patch_offset = unprocessed_method_call_patches_.front().GetPatchOffset();
469    DCHECK(!method_call_thunk_->HasReservedOffset() ||
470           method_call_thunk_->LastReservedOffset() <= patch_offset);
471    if (!method_call_thunk_->HasReservedOffset() ||
472        patch_offset - method_call_thunk_->LastReservedOffset() > max_negative_displacement) {
473      // No previous thunk in range, check if we can reach the target directly.
474      if (target_method == method_ref) {
475        DCHECK_GT(quick_code_offset, patch_offset);
476        if (quick_code_offset - patch_offset > max_positive_displacement) {
477          break;
478        }
479      } else {
480        auto result = provider_->FindMethodOffset(target_method);
481        if (!result.first) {
482          break;
483        }
484        uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_);
485        if (target_offset >= patch_offset) {
486          DCHECK_LE(target_offset - patch_offset, max_positive_displacement);
487        } else if (patch_offset - target_offset > max_negative_displacement) {
488          break;
489        }
490      }
491    }
492    unprocessed_method_call_patches_.pop_front();
493  }
494  if (!unprocessed_method_call_patches_.empty()) {
495    // Try to adjust the max next offset in `method_call_thunk_`. Do this conservatively only if
496    // the thunk shall be at the end of the `unreserved_thunks_` to avoid dealing with overlaps.
497    uint32_t new_max_next_offset =
498        unprocessed_method_call_patches_.front().GetPatchOffset() + max_positive_displacement;
499    if (new_max_next_offset >
500        unreserved_thunks_.back()->MaxNextOffset() + unreserved_thunks_.back()->CodeSize()) {
501      method_call_thunk_->ClearMaxNextOffset();
502      method_call_thunk_->SetMaxNextOffset(new_max_next_offset);
503      if (method_call_thunk_ != unreserved_thunks_.back()) {
504        RemoveElement(unreserved_thunks_, method_call_thunk_);
505        unreserved_thunks_.push_back(method_call_thunk_);
506      }
507    }
508  } else {
509    // We have resolved all method calls, we do not need a new thunk anymore.
510    method_call_thunk_->ClearMaxNextOffset();
511    RemoveElement(unreserved_thunks_, method_call_thunk_);
512  }
513}
514
515inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_offset,
516                                                               const ThunkKey& key) {
517  return RoundDown(patch_offset + MaxPositiveDisplacement(key),
518                   GetInstructionSetAlignment(instruction_set_));
519}
520
521}  // namespace linker
522}  // namespace art
523