call_arm.cc revision 0f45f22eb3c52f0ece4c56989180e79c6680d825
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm_lir.h" 20#include "codegen_arm.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "gc/accounting/card_table.h" 23#include "entrypoints/quick/quick_entrypoints.h" 24 25namespace art { 26 27/* 28 * The sparse table in the literal pool is an array of <key,displacement> 29 * pairs. For each set, we'll load them as a pair using ldmia. 30 * This means that the register number of the temp we use for the key 31 * must be lower than the reg for the displacement. 32 * 33 * The test loop will look something like: 34 * 35 * adr r_base, <table> 36 * ldr r_val, [rARM_SP, v_reg_off] 37 * mov r_idx, #table_size 38 * lp: 39 * ldmia r_base!, {r_key, r_disp} 40 * sub r_idx, #1 41 * cmp r_val, r_key 42 * ifeq 43 * add rARM_PC, r_disp ; This is the branch from which we compute displacement 44 * cbnz r_idx, lp 45 */ 46void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, 47 RegLocation rl_src) { 48 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 49 if (cu_->verbose) { 50 DumpSparseSwitchTable(table); 51 } 52 // Add the table to the list - we'll process it later 53 SwitchTable *tab_rec = 54 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 55 tab_rec->table = table; 56 tab_rec->vaddr = current_dalvik_offset_; 57 uint32_t size = table[1]; 58 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); 59 switch_tables_.Insert(tab_rec); 60 61 // Get the switch value 62 rl_src = LoadValue(rl_src, kCoreReg); 63 RegStorage r_base = AllocTemp(); 64 /* Allocate key and disp temps */ 65 RegStorage r_key = AllocTemp(); 66 RegStorage r_disp = AllocTemp(); 67 // Make sure r_key's register number is less than r_disp's number for ldmia 68 if (r_key.GetReg() > r_disp.GetReg()) { 69 RegStorage tmp = r_disp; 70 r_disp = r_key; 71 r_key = tmp; 72 } 73 // Materialize a pointer to the switch table 74 NewLIR3(kThumb2Adr, r_base.GetReg(), 0, WrapPointer(tab_rec)); 75 // Set up r_idx 76 RegStorage r_idx = AllocTemp(); 77 LoadConstant(r_idx, size); 78 // Establish loop branch target 79 LIR* target = NewLIR0(kPseudoTargetLabel); 80 // Load next key/disp 81 NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetRegNum()) | (1 << r_disp.GetRegNum())); 82 OpRegReg(kOpCmp, r_key, rl_src.reg); 83 // Go if match. NOTE: No instruction set switch here - must stay Thumb2 84 LIR* it = OpIT(kCondEq, ""); 85 LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg()); 86 OpEndIT(it); 87 tab_rec->anchor = switch_branch; 88 // Needs to use setflags encoding here 89 OpRegRegImm(kOpSub, r_idx, r_idx, 1); // For value == 1, this should set flags. 90 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); 91 OpCondBranch(kCondNe, target); 92} 93 94 95void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, 96 RegLocation rl_src) { 97 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 98 if (cu_->verbose) { 99 DumpPackedSwitchTable(table); 100 } 101 // Add the table to the list - we'll process it later 102 SwitchTable *tab_rec = 103 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 104 tab_rec->table = table; 105 tab_rec->vaddr = current_dalvik_offset_; 106 uint32_t size = table[1]; 107 tab_rec->targets = 108 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR)); 109 switch_tables_.Insert(tab_rec); 110 111 // Get the switch value 112 rl_src = LoadValue(rl_src, kCoreReg); 113 RegStorage table_base = AllocTemp(); 114 // Materialize a pointer to the switch table 115 NewLIR3(kThumb2Adr, table_base.GetReg(), 0, WrapPointer(tab_rec)); 116 int low_key = s4FromSwitchData(&table[2]); 117 RegStorage keyReg; 118 // Remove the bias, if necessary 119 if (low_key == 0) { 120 keyReg = rl_src.reg; 121 } else { 122 keyReg = AllocTemp(); 123 OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key); 124 } 125 // Bounds check - if < 0 or >= size continue following switch 126 OpRegImm(kOpCmp, keyReg, size-1); 127 LIR* branch_over = OpCondBranch(kCondHi, NULL); 128 129 // Load the displacement from the switch table 130 RegStorage disp_reg = AllocTemp(); 131 LoadBaseIndexed(table_base, keyReg, disp_reg, 2, k32); 132 133 // ..and go! NOTE: No instruction set switch here - must stay Thumb2 134 LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg()); 135 tab_rec->anchor = switch_branch; 136 137 /* branch_over target here */ 138 LIR* target = NewLIR0(kPseudoTargetLabel); 139 branch_over->target = target; 140} 141 142/* 143 * Array data table format: 144 * ushort ident = 0x0300 magic value 145 * ushort width width of each element in the table 146 * uint size number of elements in the table 147 * ubyte data[size*width] table of data values (may contain a single-byte 148 * padding at the end) 149 * 150 * Total size is 4+(width * size + 1)/2 16-bit code units. 151 */ 152void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { 153 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 154 // Add the table to the list - we'll process it later 155 FillArrayData *tab_rec = 156 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData)); 157 tab_rec->table = table; 158 tab_rec->vaddr = current_dalvik_offset_; 159 uint16_t width = tab_rec->table[1]; 160 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16); 161 tab_rec->size = (size * width) + 8; 162 163 fill_array_data_.Insert(tab_rec); 164 165 // Making a call - use explicit registers 166 FlushAllRegs(); /* Everything to home location */ 167 LoadValueDirectFixed(rl_src, rs_r0); 168 LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData).Int32Value(), 169 rs_rARM_LR); 170 // Materialize a pointer to the fill data image 171 NewLIR3(kThumb2Adr, rs_r1.GetReg(), 0, WrapPointer(tab_rec)); 172 ClobberCallerSave(); 173 LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR); 174 MarkSafepointPC(call_inst); 175} 176 177/* 178 * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more 179 * details see monitor.cc. 180 */ 181void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 182 FlushAllRegs(); 183 // FIXME: need separate LoadValues for object references. 184 LoadValueDirectFixed(rl_src, rs_r0); // Get obj 185 LockCallTemps(); // Prepare for explicit register usage 186 constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. 187 if (kArchVariantHasGoodBranchPredictor) { 188 LIR* null_check_branch = nullptr; 189 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { 190 null_check_branch = nullptr; // No null check. 191 } else { 192 // If the null-check fails its handled by the slow-path to reduce exception related meta-data. 193 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 194 null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); 195 } 196 } 197 Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); 198 NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(), 199 mirror::Object::MonitorOffset().Int32Value() >> 2); 200 MarkPossibleNullPointerException(opt_flags); 201 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL); 202 NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(), 203 mirror::Object::MonitorOffset().Int32Value() >> 2); 204 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL); 205 206 207 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 208 not_unlocked_branch->target = slow_path_target; 209 if (null_check_branch != nullptr) { 210 null_check_branch->target = slow_path_target; 211 } 212 // TODO: move to a slow path. 213 // Go expensive route - artLockObjectFromCode(obj); 214 LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), rs_rARM_LR); 215 ClobberCallerSave(); 216 LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR); 217 MarkSafepointPC(call_inst); 218 219 LIR* success_target = NewLIR0(kPseudoTargetLabel); 220 lock_success_branch->target = success_target; 221 GenMemBarrier(kLoadAny); 222 } else { 223 // Explicit null-check as slow-path is entered using an IT. 224 GenNullCheck(rs_r0, opt_flags); 225 Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); 226 NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(), 227 mirror::Object::MonitorOffset().Int32Value() >> 2); 228 MarkPossibleNullPointerException(opt_flags); 229 OpRegImm(kOpCmp, rs_r1, 0); 230 LIR* it = OpIT(kCondEq, ""); 231 NewLIR4(kThumb2Strex/*eq*/, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(), 232 mirror::Object::MonitorOffset().Int32Value() >> 2); 233 OpEndIT(it); 234 OpRegImm(kOpCmp, rs_r1, 0); 235 it = OpIT(kCondNe, "T"); 236 // Go expensive route - artLockObjectFromCode(self, obj); 237 LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), 238 rs_rARM_LR); 239 ClobberCallerSave(); 240 LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR); 241 OpEndIT(it); 242 MarkSafepointPC(call_inst); 243 GenMemBarrier(kLoadAny); 244 } 245} 246 247/* 248 * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more 249 * details see monitor.cc. Note the code below doesn't use ldrex/strex as the code holds the lock 250 * and can only give away ownership if its suspended. 251 */ 252void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 253 FlushAllRegs(); 254 LoadValueDirectFixed(rl_src, rs_r0); // Get obj 255 LockCallTemps(); // Prepare for explicit register usage 256 LIR* null_check_branch = nullptr; 257 Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); 258 constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. 259 if (kArchVariantHasGoodBranchPredictor) { 260 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { 261 null_check_branch = nullptr; // No null check. 262 } else { 263 // If the null-check fails its handled by the slow-path to reduce exception related meta-data. 264 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 265 null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); 266 } 267 } 268 Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); 269 MarkPossibleNullPointerException(opt_flags); 270 LoadConstantNoClobber(rs_r3, 0); 271 LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL); 272 GenMemBarrier(kAnyStore); 273 Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); 274 LIR* unlock_success_branch = OpUnconditionalBranch(NULL); 275 276 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 277 slow_unlock_branch->target = slow_path_target; 278 if (null_check_branch != nullptr) { 279 null_check_branch->target = slow_path_target; 280 } 281 // TODO: move to a slow path. 282 // Go expensive route - artUnlockObjectFromCode(obj); 283 LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), rs_rARM_LR); 284 ClobberCallerSave(); 285 LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR); 286 MarkSafepointPC(call_inst); 287 288 LIR* success_target = NewLIR0(kPseudoTargetLabel); 289 unlock_success_branch->target = success_target; 290 } else { 291 // Explicit null-check as slow-path is entered using an IT. 292 GenNullCheck(rs_r0, opt_flags); 293 Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock 294 MarkPossibleNullPointerException(opt_flags); 295 Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); 296 LoadConstantNoClobber(rs_r3, 0); 297 // Is lock unheld on lock or held by us (==thread_id) on unlock? 298 OpRegReg(kOpCmp, rs_r1, rs_r2); 299 300 LIR* it = OpIT(kCondEq, "EE"); 301 if (GenMemBarrier(kAnyStore)) { 302 UpdateIT(it, "TEE"); 303 } 304 Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3); 305 // Go expensive route - UnlockObjectFromCode(obj); 306 LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), 307 rs_rARM_LR); 308 ClobberCallerSave(); 309 LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR); 310 OpEndIT(it); 311 MarkSafepointPC(call_inst); 312 } 313} 314 315void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { 316 int ex_offset = Thread::ExceptionOffset<4>().Int32Value(); 317 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 318 RegStorage reset_reg = AllocTempRef(); 319 LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg, kNotVolatile); 320 LoadConstant(reset_reg, 0); 321 StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg, kNotVolatile); 322 FreeTemp(reset_reg); 323 StoreValue(rl_dest, rl_result); 324} 325 326/* 327 * Mark garbage collection card. Skip if the value we're storing is null. 328 */ 329void ArmMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { 330 RegStorage reg_card_base = AllocTemp(); 331 RegStorage reg_card_no = AllocTemp(); 332 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); 333 LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base); 334 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); 335 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); 336 LIR* target = NewLIR0(kPseudoTargetLabel); 337 branch_over->target = target; 338 FreeTemp(reg_card_base); 339 FreeTemp(reg_card_no); 340} 341 342void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { 343 int spill_count = num_core_spills_ + num_fp_spills_; 344 /* 345 * On entry, r0, r1, r2 & r3 are live. Let the register allocation 346 * mechanism know so it doesn't try to use any of them when 347 * expanding the frame or flushing. This leaves the utility 348 * code with a single temp: r12. This should be enough. 349 */ 350 LockTemp(rs_r0); 351 LockTemp(rs_r1); 352 LockTemp(rs_r2); 353 LockTemp(rs_r3); 354 355 /* 356 * We can safely skip the stack overflow check if we're 357 * a leaf *and* our frame size < fudge factor. 358 */ 359 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm); 360 NewLIR0(kPseudoMethodEntry); 361 const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm) - 362 Thread::kStackOverflowSignalReservedBytes; 363 bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes); 364 if (!skip_overflow_check) { 365 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 366 if (!large_frame) { 367 /* Load stack limit */ 368 LockTemp(rs_r12); 369 Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); 370 } 371 } else { 372 // Implicit stack overflow check. 373 // Generate a load from [sp, #-overflowsize]. If this is in the stack 374 // redzone we will get a segmentation fault. 375 // 376 // Caveat coder: if someone changes the kStackOverflowReservedBytes value 377 // we need to make sure that it's loadable in an immediate field of 378 // a sub instruction. Otherwise we will get a temp allocation and the 379 // code size will increase. 380 // 381 // This is done before the callee save instructions to avoid any possibility 382 // of these overflowing. This uses r12 and that's never saved in a callee 383 // save. 384 OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, GetStackOverflowReservedBytes(kArm)); 385 Load32Disp(rs_r12, 0, rs_r12); 386 MarkPossibleStackOverflowException(); 387 } 388 } 389 /* Spill core callee saves */ 390 NewLIR1(kThumb2Push, core_spill_mask_); 391 /* Need to spill any FP regs? */ 392 if (num_fp_spills_) { 393 /* 394 * NOTE: fp spills are a little different from core spills in that 395 * they are pushed as a contiguous block. When promoting from 396 * the fp set, we must allocate all singles from s16..highest-promoted 397 */ 398 NewLIR1(kThumb2VPushCS, num_fp_spills_); 399 } 400 401 const int spill_size = spill_count * 4; 402 const int frame_size_without_spills = frame_size_ - spill_size; 403 if (!skip_overflow_check) { 404 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 405 class StackOverflowSlowPath : public LIRSlowPath { 406 public: 407 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace) 408 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), restore_lr_(restore_lr), 409 sp_displace_(sp_displace) { 410 } 411 void Compile() OVERRIDE { 412 m2l_->ResetRegPool(); 413 m2l_->ResetDefTracking(); 414 GenerateTargetLabel(kPseudoThrowTarget); 415 if (restore_lr_) { 416 m2l_->LoadWordDisp(rs_rARM_SP, sp_displace_ - 4, rs_rARM_LR); 417 } 418 m2l_->OpRegImm(kOpAdd, rs_rARM_SP, sp_displace_); 419 m2l_->ClobberCallerSave(); 420 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow); 421 // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes 422 // codegen and target are in thumb2 mode. 423 // NOTE: native pointer. 424 m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC); 425 } 426 427 private: 428 const bool restore_lr_; 429 const size_t sp_displace_; 430 }; 431 if (large_frame) { 432 // Note: may need a temp reg, and we only have r12 free at this point. 433 OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills); 434 Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); 435 LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr); 436 // Need to restore LR since we used it as a temp. 437 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size)); 438 OpRegCopy(rs_rARM_SP, rs_rARM_LR); // Establish stack 439 } else { 440 /* 441 * If the frame is small enough we are guaranteed to have enough space that remains to 442 * handle signals on the user stack. However, we may not have any free temp 443 * registers at this point, so we'll temporarily add LR to the temp pool. 444 */ 445 DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp()); 446 MarkTemp(rs_rARM_LR); 447 FreeTemp(rs_rARM_LR); 448 OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills); 449 Clobber(rs_rARM_LR); 450 UnmarkTemp(rs_rARM_LR); 451 LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr); 452 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_)); 453 } 454 } else { 455 // Implicit stack overflow check has already been done. Just make room on the 456 // stack for the frame now. 457 OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); 458 } 459 } else { 460 OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); 461 } 462 463 FlushIns(ArgLocs, rl_method); 464 465 FreeTemp(rs_r0); 466 FreeTemp(rs_r1); 467 FreeTemp(rs_r2); 468 FreeTemp(rs_r3); 469 FreeTemp(rs_r12); 470} 471 472void ArmMir2Lir::GenExitSequence() { 473 int spill_count = num_core_spills_ + num_fp_spills_; 474 /* 475 * In the exit path, r0/r1 are live - make sure they aren't 476 * allocated by the register utilities as temps. 477 */ 478 LockTemp(rs_r0); 479 LockTemp(rs_r1); 480 481 NewLIR0(kPseudoMethodExit); 482 OpRegImm(kOpAdd, rs_rARM_SP, frame_size_ - (spill_count * 4)); 483 /* Need to restore any FP callee saves? */ 484 if (num_fp_spills_) { 485 NewLIR1(kThumb2VPopCS, num_fp_spills_); 486 } 487 if (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) { 488 /* Unspill rARM_LR to rARM_PC */ 489 core_spill_mask_ &= ~(1 << rs_rARM_LR.GetRegNum()); 490 core_spill_mask_ |= (1 << rs_rARM_PC.GetRegNum()); 491 } 492 NewLIR1(kThumb2Pop, core_spill_mask_); 493 if (!(core_spill_mask_ & (1 << rs_rARM_PC.GetRegNum()))) { 494 /* We didn't pop to rARM_PC, so must do a bv rARM_LR */ 495 NewLIR1(kThumbBx, rs_rARM_LR.GetReg()); 496 } 497} 498 499void ArmMir2Lir::GenSpecialExitSequence() { 500 NewLIR1(kThumbBx, rs_rARM_LR.GetReg()); 501} 502 503} // namespace art 504