1/* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "linker/arm/relative_patcher_arm_base.h" 18 19#include "compiled_method.h" 20#include "oat.h" 21#include "output_stream.h" 22 23namespace art { 24namespace linker { 25 26uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset, 27 const CompiledMethod* compiled_method, 28 MethodReference method_ref) { 29 return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u); 30} 31 32uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) { 33 // NOTE: The final thunk can be reserved from InitCodeMethodVisitor::EndClass() while it 34 // may be written early by WriteCodeMethodVisitor::VisitMethod() for a deduplicated chunk 35 // of code. To avoid any alignment discrepancies for the final chunk, we always align the 36 // offset after reserving of writing any chunk. 37 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_); 38 bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset, MethodReference(nullptr, 0u), 39 aligned_offset); 40 if (needs_thunk) { 41 thunk_locations_.push_back(aligned_offset); 42 offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_); 43 } 44 return offset; 45} 46 47uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { 48 if (current_thunk_to_write_ == thunk_locations_.size()) { 49 return offset; 50 } 51 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_); 52 if (UNLIKELY(aligned_offset == thunk_locations_[current_thunk_to_write_])) { 53 ++current_thunk_to_write_; 54 uint32_t aligned_code_delta = aligned_offset - offset; 55 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { 56 return 0u; 57 } 58 if (UNLIKELY(!WriteRelCallThunk(out, ArrayRef<const uint8_t>(thunk_code_)))) { 59 return 0u; 60 } 61 uint32_t thunk_end_offset = aligned_offset + thunk_code_.size(); 62 // Align after writing chunk, see the ReserveSpace() above. 63 offset = CompiledMethod::AlignCode(thunk_end_offset, instruction_set_); 64 aligned_code_delta = offset - thunk_end_offset; 65 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { 66 return 0u; 67 } 68 } 69 return offset; 70} 71 72ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, 73 InstructionSet instruction_set, 74 std::vector<uint8_t> thunk_code, 75 uint32_t max_positive_displacement, 76 uint32_t max_negative_displacement) 77 : provider_(provider), instruction_set_(instruction_set), thunk_code_(thunk_code), 78 max_positive_displacement_(max_positive_displacement), 79 max_negative_displacement_(max_negative_displacement), 80 thunk_locations_(), current_thunk_to_write_(0u), unprocessed_patches_() { 81} 82 83uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset, 84 const CompiledMethod* compiled_method, 85 MethodReference method_ref, 86 uint32_t max_extra_space) { 87 DCHECK(compiled_method->GetQuickCode() != nullptr); 88 uint32_t quick_code_size = compiled_method->GetQuickCode()->size(); 89 uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader); 90 uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size); 91 // Adjust for extra space required by the subclass. 92 next_aligned_offset = compiled_method->AlignCode(next_aligned_offset + max_extra_space); 93 // TODO: ignore unprocessed patches targeting this method if they can reach quick_code_offset. 94 // We need the MethodReference for that. 95 if (!unprocessed_patches_.empty() && 96 next_aligned_offset - unprocessed_patches_.front().second > max_positive_displacement_) { 97 bool needs_thunk = ReserveSpaceProcessPatches(quick_code_offset, method_ref, 98 next_aligned_offset); 99 if (needs_thunk) { 100 // A single thunk will cover all pending patches. 101 unprocessed_patches_.clear(); 102 uint32_t thunk_location = compiled_method->AlignCode(offset); 103 thunk_locations_.push_back(thunk_location); 104 offset = CompiledMethod::AlignCode(thunk_location + thunk_code_.size(), instruction_set_); 105 } 106 } 107 for (const LinkerPatch& patch : compiled_method->GetPatches()) { 108 if (patch.Type() == kLinkerPatchCallRelative) { 109 unprocessed_patches_.emplace_back(patch.TargetMethod(), 110 quick_code_offset + patch.LiteralOffset()); 111 } 112 } 113 return offset; 114} 115 116uint32_t ArmBaseRelativePatcher::CalculateDisplacement(uint32_t patch_offset, 117 uint32_t target_offset) { 118 // Unsigned arithmetic with its well-defined overflow behavior is just fine here. 119 uint32_t displacement = target_offset - patch_offset; 120 // NOTE: With unsigned arithmetic we do mean to use && rather than || below. 121 if (displacement > max_positive_displacement_ && displacement < -max_negative_displacement_) { 122 // Unwritten thunks have higher offsets, check if it's within range. 123 DCHECK(current_thunk_to_write_ == thunk_locations_.size() || 124 thunk_locations_[current_thunk_to_write_] > patch_offset); 125 if (current_thunk_to_write_ != thunk_locations_.size() && 126 thunk_locations_[current_thunk_to_write_] - patch_offset < max_positive_displacement_) { 127 displacement = thunk_locations_[current_thunk_to_write_] - patch_offset; 128 } else { 129 // We must have a previous thunk then. 130 DCHECK_NE(current_thunk_to_write_, 0u); 131 DCHECK_LT(thunk_locations_[current_thunk_to_write_ - 1], patch_offset); 132 displacement = thunk_locations_[current_thunk_to_write_ - 1] - patch_offset; 133 DCHECK(displacement >= -max_negative_displacement_); 134 } 135 } 136 return displacement; 137} 138 139bool ArmBaseRelativePatcher::ReserveSpaceProcessPatches(uint32_t quick_code_offset, 140 MethodReference method_ref, 141 uint32_t next_aligned_offset) { 142 // Process as many patches as possible, stop only on unresolved targets or calls too far back. 143 while (!unprocessed_patches_.empty()) { 144 MethodReference patch_ref = unprocessed_patches_.front().first; 145 uint32_t patch_offset = unprocessed_patches_.front().second; 146 DCHECK(thunk_locations_.empty() || thunk_locations_.back() <= patch_offset); 147 if (patch_ref.dex_file == method_ref.dex_file && 148 patch_ref.dex_method_index == method_ref.dex_method_index) { 149 DCHECK_GT(quick_code_offset, patch_offset); 150 if (quick_code_offset - patch_offset > max_positive_displacement_) { 151 return true; 152 } 153 } else { 154 auto result = provider_->FindMethodOffset(patch_ref); 155 if (!result.first) { 156 // If still unresolved, check if we have a thunk within range. 157 if (thunk_locations_.empty() || 158 patch_offset - thunk_locations_.back() > max_negative_displacement_) { 159 return next_aligned_offset - patch_offset > max_positive_displacement_; 160 } 161 } else { 162 uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_); 163 if (target_offset >= patch_offset) { 164 DCHECK_LE(target_offset - patch_offset, max_positive_displacement_); 165 } else { 166 // When calling back, check if we have a thunk that's closer than the actual target. 167 if (!thunk_locations_.empty()) { 168 target_offset = std::max(target_offset, thunk_locations_.back()); 169 } 170 if (patch_offset - target_offset > max_negative_displacement_) { 171 return true; 172 } 173 } 174 } 175 } 176 unprocessed_patches_.pop_front(); 177 } 178 return false; 179} 180 181} // namespace linker 182} // namespace art 183