call_arm.cc revision 848871b4d8481229c32e0d048a9856e5a9a17ef9
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Thumb2 ISA. */ 18 19#include "arm_lir.h" 20#include "codegen_arm.h" 21#include "dex/quick/mir_to_lir-inl.h" 22#include "entrypoints/quick/quick_entrypoints.h" 23 24namespace art { 25 26 27/* Return the position of an ssa name within the argument list */ 28int ArmMir2Lir::InPosition(int s_reg) { 29 int v_reg = mir_graph_->SRegToVReg(s_reg); 30 return v_reg - cu_->num_regs; 31} 32 33/* 34 * Describe an argument. If it's already in an arg register, just leave it 35 * there. NOTE: all live arg registers must be locked prior to this call 36 * to avoid having them allocated as a temp by downstream utilities. 37 */ 38RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) { 39 int arg_num = InPosition(loc.s_reg_low); 40 if (loc.wide) { 41 if (arg_num == 2) { 42 // Bad case - half in register, half in frame. Just punt 43 loc.location = kLocInvalid; 44 } else if (arg_num < 2) { 45 loc.low_reg = rARM_ARG1 + arg_num; 46 loc.high_reg = loc.low_reg + 1; 47 loc.location = kLocPhysReg; 48 } else { 49 loc.location = kLocDalvikFrame; 50 } 51 } else { 52 if (arg_num < 3) { 53 loc.low_reg = rARM_ARG1 + arg_num; 54 loc.location = kLocPhysReg; 55 } else { 56 loc.location = kLocDalvikFrame; 57 } 58 } 59 return loc; 60} 61 62/* 63 * Load an argument. If already in a register, just return. If in 64 * the frame, we can't use the normal LoadValue() because it assumed 65 * a proper frame - and we're frameless. 66 */ 67RegLocation ArmMir2Lir::LoadArg(RegLocation loc) { 68 if (loc.location == kLocDalvikFrame) { 69 int start = (InPosition(loc.s_reg_low) + 1) * sizeof(uint32_t); 70 loc.low_reg = AllocTemp(); 71 LoadWordDisp(rARM_SP, start, loc.low_reg); 72 if (loc.wide) { 73 loc.high_reg = AllocTemp(); 74 LoadWordDisp(rARM_SP, start + sizeof(uint32_t), loc.high_reg); 75 } 76 loc.location = kLocPhysReg; 77 } 78 return loc; 79} 80 81/* Lock any referenced arguments that arrive in registers */ 82void ArmMir2Lir::LockLiveArgs(MIR* mir) { 83 int first_in = cu_->num_regs; 84 const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc 85 for (int i = 0; i < mir->ssa_rep->num_uses; i++) { 86 int v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[i]); 87 int InPosition = v_reg - first_in; 88 if (InPosition < num_arg_regs) { 89 LockTemp(rARM_ARG1 + InPosition); 90 } 91 } 92} 93 94/* Find the next MIR, which may be in a following basic block */ 95// TODO: should this be a utility in mir_graph? 96MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) { 97 BasicBlock* bb = *p_bb; 98 MIR* orig_mir = mir; 99 while (bb != NULL) { 100 if (mir != NULL) { 101 mir = mir->next; 102 } 103 if (mir != NULL) { 104 return mir; 105 } else { 106 bb = bb->fall_through; 107 *p_bb = bb; 108 if (bb) { 109 mir = bb->first_mir_insn; 110 if (mir != NULL) { 111 return mir; 112 } 113 } 114 } 115 } 116 return orig_mir; 117} 118 119/* Used for the "verbose" listing */ 120// TODO: move to common code 121void ArmMir2Lir::GenPrintLabel(MIR* mir) { 122 /* Mark the beginning of a Dalvik instruction for line tracking */ 123 char* inst_str = cu_->verbose ? 124 mir_graph_->GetDalvikDisassembly(mir) : NULL; 125 MarkBoundary(mir->offset, inst_str); 126} 127 128MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir, 129 OpSize size, bool long_or_double, bool is_object) { 130 int field_offset; 131 bool is_volatile; 132 uint32_t field_idx = mir->dalvikInsn.vC; 133 bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false); 134 if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) { 135 return NULL; 136 } 137 RegLocation rl_obj = mir_graph_->GetSrc(mir, 0); 138 LockLiveArgs(mir); 139 rl_obj = ArmMir2Lir::ArgLoc(rl_obj); 140 RegLocation rl_dest; 141 if (long_or_double) { 142 rl_dest = GetReturnWide(false); 143 } else { 144 rl_dest = GetReturn(false); 145 } 146 // Point of no return - no aborts after this 147 ArmMir2Lir::GenPrintLabel(mir); 148 rl_obj = LoadArg(rl_obj); 149 GenIGet(field_idx, mir->optimization_flags, size, rl_dest, rl_obj, long_or_double, is_object); 150 return GetNextMir(bb, mir); 151} 152 153MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir, 154 OpSize size, bool long_or_double, bool is_object) { 155 int field_offset; 156 bool is_volatile; 157 uint32_t field_idx = mir->dalvikInsn.vC; 158 bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false); 159 if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) { 160 return NULL; 161 } 162 RegLocation rl_src; 163 RegLocation rl_obj; 164 LockLiveArgs(mir); 165 if (long_or_double) { 166 rl_src = mir_graph_->GetSrcWide(mir, 0); 167 rl_obj = mir_graph_->GetSrc(mir, 2); 168 } else { 169 rl_src = mir_graph_->GetSrc(mir, 0); 170 rl_obj = mir_graph_->GetSrc(mir, 1); 171 } 172 rl_src = ArmMir2Lir::ArgLoc(rl_src); 173 rl_obj = ArmMir2Lir::ArgLoc(rl_obj); 174 // Reject if source is split across registers & frame 175 if (rl_obj.location == kLocInvalid) { 176 ResetRegPool(); 177 return NULL; 178 } 179 // Point of no return - no aborts after this 180 ArmMir2Lir::GenPrintLabel(mir); 181 rl_obj = LoadArg(rl_obj); 182 rl_src = LoadArg(rl_src); 183 GenIPut(field_idx, mir->optimization_flags, size, rl_src, rl_obj, long_or_double, is_object); 184 return GetNextMir(bb, mir); 185} 186 187MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) { 188 RegLocation rl_src; 189 RegLocation rl_dest; 190 bool wide = (mir->ssa_rep->num_uses == 2); 191 if (wide) { 192 rl_src = mir_graph_->GetSrcWide(mir, 0); 193 rl_dest = GetReturnWide(false); 194 } else { 195 rl_src = mir_graph_->GetSrc(mir, 0); 196 rl_dest = GetReturn(false); 197 } 198 LockLiveArgs(mir); 199 rl_src = ArmMir2Lir::ArgLoc(rl_src); 200 if (rl_src.location == kLocInvalid) { 201 ResetRegPool(); 202 return NULL; 203 } 204 // Point of no return - no aborts after this 205 ArmMir2Lir::GenPrintLabel(mir); 206 rl_src = LoadArg(rl_src); 207 if (wide) { 208 StoreValueWide(rl_dest, rl_src); 209 } else { 210 StoreValue(rl_dest, rl_src); 211 } 212 return mir; 213} 214 215/* 216 * Special-case code genration for simple non-throwing leaf methods. 217 */ 218void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, 219 SpecialCaseHandler special_case) { 220 current_dalvik_offset_ = mir->offset; 221 MIR* next_mir = NULL; 222 switch (special_case) { 223 case kNullMethod: 224 DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID); 225 next_mir = mir; 226 break; 227 case kConstFunction: 228 ArmMir2Lir::GenPrintLabel(mir); 229 LoadConstant(rARM_RET0, mir->dalvikInsn.vB); 230 next_mir = GetNextMir(&bb, mir); 231 break; 232 case kIGet: 233 next_mir = SpecialIGet(&bb, mir, kWord, false, false); 234 break; 235 case kIGetBoolean: 236 case kIGetByte: 237 next_mir = SpecialIGet(&bb, mir, kUnsignedByte, false, false); 238 break; 239 case kIGetObject: 240 next_mir = SpecialIGet(&bb, mir, kWord, false, true); 241 break; 242 case kIGetChar: 243 next_mir = SpecialIGet(&bb, mir, kUnsignedHalf, false, false); 244 break; 245 case kIGetShort: 246 next_mir = SpecialIGet(&bb, mir, kSignedHalf, false, false); 247 break; 248 case kIGetWide: 249 next_mir = SpecialIGet(&bb, mir, kLong, true, false); 250 break; 251 case kIPut: 252 next_mir = SpecialIPut(&bb, mir, kWord, false, false); 253 break; 254 case kIPutBoolean: 255 case kIPutByte: 256 next_mir = SpecialIPut(&bb, mir, kUnsignedByte, false, false); 257 break; 258 case kIPutObject: 259 next_mir = SpecialIPut(&bb, mir, kWord, false, true); 260 break; 261 case kIPutChar: 262 next_mir = SpecialIPut(&bb, mir, kUnsignedHalf, false, false); 263 break; 264 case kIPutShort: 265 next_mir = SpecialIPut(&bb, mir, kSignedHalf, false, false); 266 break; 267 case kIPutWide: 268 next_mir = SpecialIPut(&bb, mir, kLong, true, false); 269 break; 270 case kIdentity: 271 next_mir = SpecialIdentity(mir); 272 break; 273 default: 274 return; 275 } 276 if (next_mir != NULL) { 277 current_dalvik_offset_ = next_mir->offset; 278 if (special_case != kIdentity) { 279 ArmMir2Lir::GenPrintLabel(next_mir); 280 } 281 NewLIR1(kThumbBx, rARM_LR); 282 core_spill_mask_ = 0; 283 num_core_spills_ = 0; 284 fp_spill_mask_ = 0; 285 num_fp_spills_ = 0; 286 frame_size_ = 0; 287 core_vmap_table_.clear(); 288 fp_vmap_table_.clear(); 289 } 290} 291 292/* 293 * The sparse table in the literal pool is an array of <key,displacement> 294 * pairs. For each set, we'll load them as a pair using ldmia. 295 * This means that the register number of the temp we use for the key 296 * must be lower than the reg for the displacement. 297 * 298 * The test loop will look something like: 299 * 300 * adr rBase, <table> 301 * ldr r_val, [rARM_SP, v_reg_off] 302 * mov r_idx, #table_size 303 * lp: 304 * ldmia rBase!, {r_key, r_disp} 305 * sub r_idx, #1 306 * cmp r_val, r_key 307 * ifeq 308 * add rARM_PC, r_disp ; This is the branch from which we compute displacement 309 * cbnz r_idx, lp 310 */ 311void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, 312 RegLocation rl_src) { 313 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 314 if (cu_->verbose) { 315 DumpSparseSwitchTable(table); 316 } 317 // Add the table to the list - we'll process it later 318 SwitchTable *tab_rec = 319 static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true, 320 ArenaAllocator::kAllocData)); 321 tab_rec->table = table; 322 tab_rec->vaddr = current_dalvik_offset_; 323 int size = table[1]; 324 tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true, 325 ArenaAllocator::kAllocLIR)); 326 switch_tables_.Insert(tab_rec); 327 328 // Get the switch value 329 rl_src = LoadValue(rl_src, kCoreReg); 330 int rBase = AllocTemp(); 331 /* Allocate key and disp temps */ 332 int r_key = AllocTemp(); 333 int r_disp = AllocTemp(); 334 // Make sure r_key's register number is less than r_disp's number for ldmia 335 if (r_key > r_disp) { 336 int tmp = r_disp; 337 r_disp = r_key; 338 r_key = tmp; 339 } 340 // Materialize a pointer to the switch table 341 NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec)); 342 // Set up r_idx 343 int r_idx = AllocTemp(); 344 LoadConstant(r_idx, size); 345 // Establish loop branch target 346 LIR* target = NewLIR0(kPseudoTargetLabel); 347 // Load next key/disp 348 NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp)); 349 OpRegReg(kOpCmp, r_key, rl_src.low_reg); 350 // Go if match. NOTE: No instruction set switch here - must stay Thumb2 351 OpIT(kCondEq, ""); 352 LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp); 353 tab_rec->anchor = switch_branch; 354 // Needs to use setflags encoding here 355 NewLIR3(kThumb2SubsRRI12, r_idx, r_idx, 1); 356 OpCondBranch(kCondNe, target); 357} 358 359 360void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, 361 RegLocation rl_src) { 362 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 363 if (cu_->verbose) { 364 DumpPackedSwitchTable(table); 365 } 366 // Add the table to the list - we'll process it later 367 SwitchTable *tab_rec = 368 static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true, 369 ArenaAllocator::kAllocData)); 370 tab_rec->table = table; 371 tab_rec->vaddr = current_dalvik_offset_; 372 int size = table[1]; 373 tab_rec->targets = 374 static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true, ArenaAllocator::kAllocLIR)); 375 switch_tables_.Insert(tab_rec); 376 377 // Get the switch value 378 rl_src = LoadValue(rl_src, kCoreReg); 379 int table_base = AllocTemp(); 380 // Materialize a pointer to the switch table 381 NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec)); 382 int low_key = s4FromSwitchData(&table[2]); 383 int keyReg; 384 // Remove the bias, if necessary 385 if (low_key == 0) { 386 keyReg = rl_src.low_reg; 387 } else { 388 keyReg = AllocTemp(); 389 OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key); 390 } 391 // Bounds check - if < 0 or >= size continue following switch 392 OpRegImm(kOpCmp, keyReg, size-1); 393 LIR* branch_over = OpCondBranch(kCondHi, NULL); 394 395 // Load the displacement from the switch table 396 int disp_reg = AllocTemp(); 397 LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord); 398 399 // ..and go! NOTE: No instruction set switch here - must stay Thumb2 400 LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg); 401 tab_rec->anchor = switch_branch; 402 403 /* branch_over target here */ 404 LIR* target = NewLIR0(kPseudoTargetLabel); 405 branch_over->target = target; 406} 407 408/* 409 * Array data table format: 410 * ushort ident = 0x0300 magic value 411 * ushort width width of each element in the table 412 * uint size number of elements in the table 413 * ubyte data[size*width] table of data values (may contain a single-byte 414 * padding at the end) 415 * 416 * Total size is 4+(width * size + 1)/2 16-bit code units. 417 */ 418void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { 419 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 420 // Add the table to the list - we'll process it later 421 FillArrayData *tab_rec = 422 static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true, 423 ArenaAllocator::kAllocData)); 424 tab_rec->table = table; 425 tab_rec->vaddr = current_dalvik_offset_; 426 uint16_t width = tab_rec->table[1]; 427 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16); 428 tab_rec->size = (size * width) + 8; 429 430 fill_array_data_.Insert(tab_rec); 431 432 // Making a call - use explicit registers 433 FlushAllRegs(); /* Everything to home location */ 434 LoadValueDirectFixed(rl_src, r0); 435 LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(), 436 rARM_LR); 437 // Materialize a pointer to the fill data image 438 NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec)); 439 ClobberCalleeSave(); 440 LIR* call_inst = OpReg(kOpBlx, rARM_LR); 441 MarkSafepointPC(call_inst); 442} 443 444/* 445 * Handle simple case (thin lock) inline. If it's complicated, bail 446 * out to the heavyweight lock/unlock routines. We'll use dedicated 447 * registers here in order to be in the right position in case we 448 * to bail to oat[Lock/Unlock]Object(self, object) 449 * 450 * r0 -> self pointer [arg0 for oat[Lock/Unlock]Object 451 * r1 -> object [arg1 for oat[Lock/Unlock]Object 452 * r2 -> intial contents of object->lock, later result of strex 453 * r3 -> self->thread_id 454 * r12 -> allow to be used by utilities as general temp 455 * 456 * The result of the strex is 0 if we acquire the lock. 457 * 458 * See comments in monitor.cc for the layout of the lock word. 459 * Of particular interest to this code is the test for the 460 * simple case - which we handle inline. For monitor enter, the 461 * simple case is thin lock, held by no-one. For monitor exit, 462 * the simple case is thin lock, held by the unlocking thread with 463 * a recurse count of 0. 464 * 465 * A minor complication is that there is a field in the lock word 466 * unrelated to locking: the hash state. This field must be ignored, but 467 * preserved. 468 * 469 */ 470void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 471 FlushAllRegs(); 472 DCHECK_EQ(LW_SHAPE_THIN, 0); 473 LoadValueDirectFixed(rl_src, r0); // Get obj 474 LockCallTemps(); // Prepare for explicit register usage 475 GenNullCheck(rl_src.s_reg_low, r0, opt_flags); 476 LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2); 477 NewLIR3(kThumb2Ldrex, r1, r0, 478 mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock 479 // Align owner 480 OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT); 481 // Is lock unheld on lock or held by us (==thread_id) on unlock? 482 NewLIR4(kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1); 483 NewLIR3(kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1); 484 OpRegImm(kOpCmp, r1, 0); 485 OpIT(kCondEq, ""); 486 NewLIR4(kThumb2Strex, r1, r2, r0, 487 mirror::Object::MonitorOffset().Int32Value() >> 2); 488 OpRegImm(kOpCmp, r1, 0); 489 OpIT(kCondNe, "T"); 490 // Go expensive route - artLockObjectFromCode(self, obj); 491 LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR); 492 ClobberCalleeSave(); 493 LIR* call_inst = OpReg(kOpBlx, rARM_LR); 494 MarkSafepointPC(call_inst); 495 GenMemBarrier(kLoadLoad); 496} 497 498/* 499 * For monitor unlock, we don't have to use ldrex/strex. Once 500 * we've determined that the lock is thin and that we own it with 501 * a zero recursion count, it's safe to punch it back to the 502 * initial, unlock thin state with a store word. 503 */ 504void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 505 DCHECK_EQ(LW_SHAPE_THIN, 0); 506 FlushAllRegs(); 507 LoadValueDirectFixed(rl_src, r0); // Get obj 508 LockCallTemps(); // Prepare for explicit register usage 509 GenNullCheck(rl_src.s_reg_low, r0, opt_flags); 510 LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock 511 LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2); 512 // Is lock unheld on lock or held by us (==thread_id) on unlock? 513 OpRegRegImm(kOpAnd, r3, r1, 514 (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT)); 515 // Align owner 516 OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT); 517 NewLIR3(kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1); 518 OpRegReg(kOpSub, r1, r2); 519 OpIT(kCondEq, "EE"); 520 StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3); 521 // Go expensive route - UnlockObjectFromCode(obj); 522 LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR); 523 ClobberCalleeSave(); 524 LIR* call_inst = OpReg(kOpBlx, rARM_LR); 525 MarkSafepointPC(call_inst); 526 GenMemBarrier(kStoreLoad); 527} 528 529void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { 530 int ex_offset = Thread::ExceptionOffset().Int32Value(); 531 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 532 int reset_reg = AllocTemp(); 533 LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg); 534 LoadConstant(reset_reg, 0); 535 StoreWordDisp(rARM_SELF, ex_offset, reset_reg); 536 FreeTemp(reset_reg); 537 StoreValue(rl_dest, rl_result); 538} 539 540/* 541 * Mark garbage collection card. Skip if the value we're storing is null. 542 */ 543void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) { 544 int reg_card_base = AllocTemp(); 545 int reg_card_no = AllocTemp(); 546 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); 547 LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base); 548 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); 549 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, 550 kUnsignedByte); 551 LIR* target = NewLIR0(kPseudoTargetLabel); 552 branch_over->target = target; 553 FreeTemp(reg_card_base); 554 FreeTemp(reg_card_no); 555} 556 557void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { 558 int spill_count = num_core_spills_ + num_fp_spills_; 559 /* 560 * On entry, r0, r1, r2 & r3 are live. Let the register allocation 561 * mechanism know so it doesn't try to use any of them when 562 * expanding the frame or flushing. This leaves the utility 563 * code with a single temp: r12. This should be enough. 564 */ 565 LockTemp(r0); 566 LockTemp(r1); 567 LockTemp(r2); 568 LockTemp(r3); 569 570 /* 571 * We can safely skip the stack overflow check if we're 572 * a leaf *and* our frame size < fudge factor. 573 */ 574 bool skip_overflow_check = (mir_graph_->MethodIsLeaf() && 575 (static_cast<size_t>(frame_size_) < 576 Thread::kStackOverflowReservedBytes)); 577 NewLIR0(kPseudoMethodEntry); 578 if (!skip_overflow_check) { 579 /* Load stack limit */ 580 LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12); 581 } 582 /* Spill core callee saves */ 583 NewLIR1(kThumb2Push, core_spill_mask_); 584 /* Need to spill any FP regs? */ 585 if (num_fp_spills_) { 586 /* 587 * NOTE: fp spills are a little different from core spills in that 588 * they are pushed as a contiguous block. When promoting from 589 * the fp set, we must allocate all singles from s16..highest-promoted 590 */ 591 NewLIR1(kThumb2VPushCS, num_fp_spills_); 592 } 593 if (!skip_overflow_check) { 594 OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4)); 595 GenRegRegCheck(kCondCc, rARM_LR, r12, kThrowStackOverflow); 596 OpRegCopy(rARM_SP, rARM_LR); // Establish stack 597 } else { 598 OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4)); 599 } 600 601 FlushIns(ArgLocs, rl_method); 602 603 FreeTemp(r0); 604 FreeTemp(r1); 605 FreeTemp(r2); 606 FreeTemp(r3); 607} 608 609void ArmMir2Lir::GenExitSequence() { 610 int spill_count = num_core_spills_ + num_fp_spills_; 611 /* 612 * In the exit path, r0/r1 are live - make sure they aren't 613 * allocated by the register utilities as temps. 614 */ 615 LockTemp(r0); 616 LockTemp(r1); 617 618 NewLIR0(kPseudoMethodExit); 619 OpRegImm(kOpAdd, rARM_SP, frame_size_ - (spill_count * 4)); 620 /* Need to restore any FP callee saves? */ 621 if (num_fp_spills_) { 622 NewLIR1(kThumb2VPopCS, num_fp_spills_); 623 } 624 if (core_spill_mask_ & (1 << rARM_LR)) { 625 /* Unspill rARM_LR to rARM_PC */ 626 core_spill_mask_ &= ~(1 << rARM_LR); 627 core_spill_mask_ |= (1 << rARM_PC); 628 } 629 NewLIR1(kThumb2Pop, core_spill_mask_); 630 if (!(core_spill_mask_ & (1 << rARM_PC))) { 631 /* We didn't pop to rARM_PC, so must do a bv rARM_LR */ 632 NewLIR1(kThumbBx, rARM_LR); 633 } 634} 635 636} // namespace art 637