call_mips.cc revision a0cd2d701f29e0bc6275f1b13c0edfd4ec391879
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the Mips ISA */ 18 19#include "codegen_mips.h" 20#include "dex/quick/mir_to_lir-inl.h" 21#include "entrypoints/quick/quick_entrypoints.h" 22#include "mips_lir.h" 23 24namespace art { 25 26bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, 27 const InlineMethod& special) { 28 // TODO 29 return false; 30} 31 32/* 33 * The lack of pc-relative loads on Mips presents somewhat of a challenge 34 * for our PIC switch table strategy. To materialize the current location 35 * we'll do a dummy JAL and reference our tables using rRA as the 36 * base register. Note that rRA will be used both as the base to 37 * locate the switch table data and as the reference base for the switch 38 * target offsets stored in the table. We'll use a special pseudo-instruction 39 * to represent the jal and trigger the construction of the 40 * switch table offsets (which will happen after final assembly and all 41 * labels are fixed). 42 * 43 * The test loop will look something like: 44 * 45 * ori r_end, rZERO, #table_size ; size in bytes 46 * jal BaseLabel ; stores "return address" (BaseLabel) in rRA 47 * nop ; opportunistically fill 48 * BaseLabel: 49 * addiu r_base, rRA, <table> - <BaseLabel> ; table relative to BaseLabel 50 addu r_end, r_end, r_base ; end of table 51 * lw r_val, [rSP, v_reg_off] ; Test Value 52 * loop: 53 * beq r_base, r_end, done 54 * lw r_key, 0(r_base) 55 * addu r_base, 8 56 * bne r_val, r_key, loop 57 * lw r_disp, -4(r_base) 58 * addu rRA, r_disp 59 * jr rRA 60 * done: 61 * 62 */ 63void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, 64 RegLocation rl_src) { 65 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 66 if (cu_->verbose) { 67 DumpSparseSwitchTable(table); 68 } 69 // Add the table to the list - we'll process it later 70 SwitchTable* tab_rec = 71 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 72 tab_rec->table = table; 73 tab_rec->vaddr = current_dalvik_offset_; 74 int elements = table[1]; 75 tab_rec->targets = 76 static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR)); 77 switch_tables_.Insert(tab_rec); 78 79 // The table is composed of 8-byte key/disp pairs 80 int byte_size = elements * 8; 81 82 int size_hi = byte_size >> 16; 83 int size_lo = byte_size & 0xffff; 84 85 RegStorage r_end = AllocTemp(); 86 if (size_hi) { 87 NewLIR2(kMipsLui, r_end.GetReg(), size_hi); 88 } 89 // Must prevent code motion for the curr pc pair 90 GenBarrier(); // Scheduling barrier 91 NewLIR0(kMipsCurrPC); // Really a jal to .+8 92 // Now, fill the branch delay slot 93 if (size_hi) { 94 NewLIR3(kMipsOri, r_end.GetReg(), r_end.GetReg(), size_lo); 95 } else { 96 NewLIR3(kMipsOri, r_end.GetReg(), rZERO, size_lo); 97 } 98 GenBarrier(); // Scheduling barrier 99 100 // Construct BaseLabel and set up table base register 101 LIR* base_label = NewLIR0(kPseudoTargetLabel); 102 // Remember base label so offsets can be computed later 103 tab_rec->anchor = base_label; 104 RegStorage r_base = AllocTemp(); 105 NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec)); 106 OpRegRegReg(kOpAdd, r_end, r_end, r_base); 107 108 // Grab switch test value 109 rl_src = LoadValue(rl_src, kCoreReg); 110 111 // Test loop 112 RegStorage r_key = AllocTemp(); 113 LIR* loop_label = NewLIR0(kPseudoTargetLabel); 114 LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL); 115 Load32Disp(r_base, 0, r_key); 116 OpRegImm(kOpAdd, r_base, 8); 117 OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label); 118 RegStorage r_disp = AllocTemp(); 119 Load32Disp(r_base, -4, r_disp); 120 OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp); 121 OpReg(kOpBx, rs_rRA); 122 123 // Loop exit 124 LIR* exit_label = NewLIR0(kPseudoTargetLabel); 125 exit_branch->target = exit_label; 126} 127 128/* 129 * Code pattern will look something like: 130 * 131 * lw r_val 132 * jal BaseLabel ; stores "return address" (BaseLabel) in rRA 133 * nop ; opportunistically fill 134 * [subiu r_val, bias] ; Remove bias if low_val != 0 135 * bound check -> done 136 * lw r_disp, [rRA, r_val] 137 * addu rRA, r_disp 138 * jr rRA 139 * done: 140 */ 141void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, 142 RegLocation rl_src) { 143 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 144 if (cu_->verbose) { 145 DumpPackedSwitchTable(table); 146 } 147 // Add the table to the list - we'll process it later 148 SwitchTable* tab_rec = 149 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 150 tab_rec->table = table; 151 tab_rec->vaddr = current_dalvik_offset_; 152 int size = table[1]; 153 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), 154 kArenaAllocLIR)); 155 switch_tables_.Insert(tab_rec); 156 157 // Get the switch value 158 rl_src = LoadValue(rl_src, kCoreReg); 159 160 // Prepare the bias. If too big, handle 1st stage here 161 int low_key = s4FromSwitchData(&table[2]); 162 bool large_bias = false; 163 RegStorage r_key; 164 if (low_key == 0) { 165 r_key = rl_src.reg; 166 } else if ((low_key & 0xffff) != low_key) { 167 r_key = AllocTemp(); 168 LoadConstant(r_key, low_key); 169 large_bias = true; 170 } else { 171 r_key = AllocTemp(); 172 } 173 174 // Must prevent code motion for the curr pc pair 175 GenBarrier(); 176 NewLIR0(kMipsCurrPC); // Really a jal to .+8 177 // Now, fill the branch delay slot with bias strip 178 if (low_key == 0) { 179 NewLIR0(kMipsNop); 180 } else { 181 if (large_bias) { 182 OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key); 183 } else { 184 OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key); 185 } 186 } 187 GenBarrier(); // Scheduling barrier 188 189 // Construct BaseLabel and set up table base register 190 LIR* base_label = NewLIR0(kPseudoTargetLabel); 191 // Remember base label so offsets can be computed later 192 tab_rec->anchor = base_label; 193 194 // Bounds check - if < 0 or >= size continue following switch 195 LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL); 196 197 // Materialize the table base pointer 198 RegStorage r_base = AllocTemp(); 199 NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec)); 200 201 // Load the displacement from the switch table 202 RegStorage r_disp = AllocTemp(); 203 LoadBaseIndexed(r_base, r_key, r_disp, 2, k32); 204 205 // Add to rAP and go 206 OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp); 207 OpReg(kOpBx, rs_rRA); 208 209 /* branch_over target here */ 210 LIR* target = NewLIR0(kPseudoTargetLabel); 211 branch_over->target = target; 212} 213 214/* 215 * Array data table format: 216 * ushort ident = 0x0300 magic value 217 * ushort width width of each element in the table 218 * uint size number of elements in the table 219 * ubyte data[size*width] table of data values (may contain a single-byte 220 * padding at the end) 221 * 222 * Total size is 4+(width * size + 1)/2 16-bit code units. 223 */ 224void MipsMir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { 225 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 226 // Add the table to the list - we'll process it later 227 FillArrayData* tab_rec = 228 reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), 229 kArenaAllocData)); 230 tab_rec->table = table; 231 tab_rec->vaddr = current_dalvik_offset_; 232 uint16_t width = tab_rec->table[1]; 233 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16); 234 tab_rec->size = (size * width) + 8; 235 236 fill_array_data_.Insert(tab_rec); 237 238 // Making a call - use explicit registers 239 FlushAllRegs(); /* Everything to home location */ 240 LockCallTemps(); 241 LoadValueDirectFixed(rl_src, rs_rMIPS_ARG0); 242 243 // Must prevent code motion for the curr pc pair 244 GenBarrier(); 245 NewLIR0(kMipsCurrPC); // Really a jal to .+8 246 // Now, fill the branch delay slot with the helper load 247 RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData)); 248 GenBarrier(); // Scheduling barrier 249 250 // Construct BaseLabel and set up table base register 251 LIR* base_label = NewLIR0(kPseudoTargetLabel); 252 253 // Materialize a pointer to the fill data image 254 NewLIR4(kMipsDelta, rMIPS_ARG1, 0, WrapPointer(base_label), WrapPointer(tab_rec)); 255 256 // And go... 257 ClobberCallerSave(); 258 LIR* call_inst = OpReg(kOpBlx, r_tgt); // ( array*, fill_data* ) 259 MarkSafepointPC(call_inst); 260} 261 262void MipsMir2Lir::GenMoveException(RegLocation rl_dest) { 263 int ex_offset = Thread::ExceptionOffset<4>().Int32Value(); 264 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 265 RegStorage reset_reg = AllocTempRef(); 266 LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg); 267 LoadConstant(reset_reg, 0); 268 StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg); 269 FreeTemp(reset_reg); 270 StoreValue(rl_dest, rl_result); 271} 272 273/* 274 * Mark garbage collection card. Skip if the value we're storing is null. 275 */ 276void MipsMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { 277 RegStorage reg_card_base = AllocTemp(); 278 RegStorage reg_card_no = AllocTemp(); 279 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); 280 // NOTE: native pointer. 281 LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base); 282 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); 283 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); 284 LIR* target = NewLIR0(kPseudoTargetLabel); 285 branch_over->target = target; 286 FreeTemp(reg_card_base); 287 FreeTemp(reg_card_no); 288} 289 290void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { 291 int spill_count = num_core_spills_ + num_fp_spills_; 292 /* 293 * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register 294 * allocation mechanism know so it doesn't try to use any of them when 295 * expanding the frame or flushing. This leaves the utility 296 * code with a single temp: r12. This should be enough. 297 */ 298 LockTemp(rs_rMIPS_ARG0); 299 LockTemp(rs_rMIPS_ARG1); 300 LockTemp(rs_rMIPS_ARG2); 301 LockTemp(rs_rMIPS_ARG3); 302 303 /* 304 * We can safely skip the stack overflow check if we're 305 * a leaf *and* our frame size < fudge factor. 306 */ 307 bool skip_overflow_check = (mir_graph_->MethodIsLeaf() && 308 (static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes)); 309 NewLIR0(kPseudoMethodEntry); 310 RegStorage check_reg = AllocTemp(); 311 RegStorage new_sp = AllocTemp(); 312 if (!skip_overflow_check) { 313 /* Load stack limit */ 314 Load32Disp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg); 315 } 316 /* Spill core callee saves */ 317 SpillCoreRegs(); 318 /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */ 319 DCHECK_EQ(num_fp_spills_, 0); 320 const int frame_sub = frame_size_ - spill_count * 4; 321 if (!skip_overflow_check) { 322 class StackOverflowSlowPath : public LIRSlowPath { 323 public: 324 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) 325 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) { 326 } 327 void Compile() OVERRIDE { 328 m2l_->ResetRegPool(); 329 m2l_->ResetDefTracking(); 330 GenerateTargetLabel(kPseudoThrowTarget); 331 // LR is offset 0 since we push in reverse order. 332 m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA); 333 m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_); 334 m2l_->ClobberCallerSave(); 335 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow); 336 RegStorage r_tgt = m2l_->CallHelperSetup(func_offset); // Doesn't clobber LR. 337 m2l_->CallHelper(r_tgt, func_offset, false /* MarkSafepointPC */, false /* UseLink */); 338 } 339 340 private: 341 const size_t sp_displace_; 342 }; 343 OpRegRegImm(kOpSub, new_sp, rs_rMIPS_SP, frame_sub); 344 LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr); 345 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 4)); 346 // TODO: avoid copy for small frame sizes. 347 OpRegCopy(rs_rMIPS_SP, new_sp); // Establish stack 348 } else { 349 OpRegImm(kOpSub, rs_rMIPS_SP, frame_sub); 350 } 351 352 FlushIns(ArgLocs, rl_method); 353 354 FreeTemp(rs_rMIPS_ARG0); 355 FreeTemp(rs_rMIPS_ARG1); 356 FreeTemp(rs_rMIPS_ARG2); 357 FreeTemp(rs_rMIPS_ARG3); 358} 359 360void MipsMir2Lir::GenExitSequence() { 361 /* 362 * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't 363 * allocated by the register utilities as temps. 364 */ 365 LockTemp(rs_rMIPS_RET0); 366 LockTemp(rs_rMIPS_RET1); 367 368 NewLIR0(kPseudoMethodExit); 369 UnSpillCoreRegs(); 370 OpReg(kOpBx, rs_rRA); 371} 372 373void MipsMir2Lir::GenSpecialExitSequence() { 374 OpReg(kOpBx, rs_rRA); 375} 376 377} // namespace art 378