call_x86.cc revision 63c051a540e6dfc806f656b88ac3a63e99395429
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the X86 ISA */ 18 19#include "codegen_x86.h" 20#include "dex/quick/mir_to_lir-inl.h" 21#include "gc/accounting/card_table.h" 22#include "x86_lir.h" 23 24namespace art { 25 26/* 27 * The sparse table in the literal pool is an array of <key,displacement> 28 * pairs. 29 */ 30void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 31 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 32 if (cu_->verbose) { 33 DumpSparseSwitchTable(table); 34 } 35 int entries = table[1]; 36 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 37 const int32_t* targets = &keys[entries]; 38 rl_src = LoadValue(rl_src, kCoreReg); 39 for (int i = 0; i < entries; i++) { 40 int key = keys[i]; 41 BasicBlock* case_block = 42 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 43 OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]); 44 } 45} 46 47/* 48 * Code pattern will look something like: 49 * 50 * mov r_val, .. 51 * call 0 52 * pop r_start_of_method 53 * sub r_start_of_method, .. 54 * mov r_key_reg, r_val 55 * sub r_key_reg, low_key 56 * cmp r_key_reg, size-1 ; bound check 57 * ja done 58 * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset] 59 * add r_start_of_method, r_disp 60 * jmp r_start_of_method 61 * done: 62 */ 63void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 64 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 65 if (cu_->verbose) { 66 DumpPackedSwitchTable(table); 67 } 68 // Add the table to the list - we'll process it later 69 SwitchTable* tab_rec = 70 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); 71 tab_rec->table = table; 72 tab_rec->vaddr = current_dalvik_offset_; 73 int size = table[1]; 74 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), 75 kArenaAllocLIR)); 76 switch_tables_.Insert(tab_rec); 77 78 // Get the switch value 79 rl_src = LoadValue(rl_src, kCoreReg); 80 // NewLIR0(kX86Bkpt); 81 82 // Materialize a pointer to the switch table 83 RegStorage start_of_method_reg; 84 if (base_of_code_ != nullptr) { 85 // We can use the saved value. 86 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 87 if (rl_method.wide) { 88 rl_method = LoadValueWide(rl_method, kCoreReg); 89 } else { 90 rl_method = LoadValue(rl_method, kCoreReg); 91 } 92 start_of_method_reg = rl_method.reg; 93 store_method_addr_used_ = true; 94 } else { 95 start_of_method_reg = AllocTempRef(); 96 NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg()); 97 } 98 DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64); 99 int low_key = s4FromSwitchData(&table[2]); 100 RegStorage keyReg; 101 // Remove the bias, if necessary 102 if (low_key == 0) { 103 keyReg = rl_src.reg; 104 } else { 105 keyReg = AllocTemp(); 106 OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key); 107 } 108 // Bounds check - if < 0 or >= size continue following switch 109 OpRegImm(kOpCmp, keyReg, size - 1); 110 LIR* branch_over = OpCondBranch(kCondHi, NULL); 111 112 // Load the displacement from the switch table 113 RegStorage disp_reg = AllocTemp(); 114 NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(), 115 2, WrapPointer(tab_rec)); 116 // Add displacement to start of method 117 OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg); 118 // ..and go! 119 LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg()); 120 tab_rec->anchor = switch_branch; 121 122 /* branch_over target here */ 123 LIR* target = NewLIR0(kPseudoTargetLabel); 124 branch_over->target = target; 125} 126 127/* 128 * Array data table format: 129 * ushort ident = 0x0300 magic value 130 * ushort width width of each element in the table 131 * uint size number of elements in the table 132 * ubyte data[size*width] table of data values (may contain a single-byte 133 * padding at the end) 134 * 135 * Total size is 4+(width * size + 1)/2 16-bit code units. 136 */ 137void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { 138 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 139 // Add the table to the list - we'll process it later 140 FillArrayData* tab_rec = 141 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData)); 142 tab_rec->table = table; 143 tab_rec->vaddr = current_dalvik_offset_; 144 uint16_t width = tab_rec->table[1]; 145 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16); 146 tab_rec->size = (size * width) + 8; 147 148 fill_array_data_.Insert(tab_rec); 149 150 // Making a call - use explicit registers 151 FlushAllRegs(); /* Everything to home location */ 152 RegStorage array_ptr = TargetReg(kArg0, kRef); 153 RegStorage payload = TargetPtrReg(kArg1); 154 RegStorage method_start = TargetPtrReg(kArg2); 155 156 LoadValueDirectFixed(rl_src, array_ptr); 157 // Materialize a pointer to the fill data image 158 if (base_of_code_ != nullptr) { 159 // We can use the saved value. 160 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 161 if (rl_method.wide) { 162 LoadValueDirectWide(rl_method, method_start); 163 } else { 164 LoadValueDirect(rl_method, method_start); 165 } 166 store_method_addr_used_ = true; 167 } else { 168 NewLIR1(kX86StartOfMethod, method_start.GetReg()); 169 } 170 NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec)); 171 OpRegReg(kOpAdd, payload, method_start); 172 CallRuntimeHelperRegReg(kQuickHandleFillArrayData, array_ptr, payload, true); 173} 174 175void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { 176 int ex_offset = cu_->target64 ? 177 Thread::ExceptionOffset<8>().Int32Value() : 178 Thread::ExceptionOffset<4>().Int32Value(); 179 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 180 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset); 181 NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0); 182 StoreValue(rl_dest, rl_result); 183} 184 185/* 186 * Mark garbage collection card. Skip if the value we're storing is null. 187 */ 188void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { 189 DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64); 190 DCHECK_EQ(val_reg.Is64Bit(), cu_->target64); 191 RegStorage reg_card_base = AllocTempRef(); 192 RegStorage reg_card_no = AllocTempRef(); 193 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); 194 int ct_offset = cu_->target64 ? 195 Thread::CardTableOffset<8>().Int32Value() : 196 Thread::CardTableOffset<4>().Int32Value(); 197 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset); 198 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); 199 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); 200 LIR* target = NewLIR0(kPseudoTargetLabel); 201 branch_over->target = target; 202 FreeTemp(reg_card_base); 203 FreeTemp(reg_card_no); 204} 205 206void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { 207 /* 208 * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register 209 * allocation mechanism know so it doesn't try to use any of them when 210 * expanding the frame or flushing. This leaves the utility 211 * code with no spare temps. 212 */ 213 LockTemp(rs_rX86_ARG0); 214 LockTemp(rs_rX86_ARG1); 215 LockTemp(rs_rX86_ARG2); 216 217 /* 218 * We can safely skip the stack overflow check if we're 219 * a leaf *and* our frame size < fudge factor. 220 */ 221 InstructionSet isa = cu_->target64 ? kX86_64 : kX86; 222 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa); 223 224 // If we doing an implicit stack overflow check, perform the load immediately 225 // before the stack pointer is decremented and anything is saved. 226 if (!skip_overflow_check && 227 cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 228 // Implicit stack overflow check. 229 // test eax,[esp + -overflow] 230 int overflow = GetStackOverflowReservedBytes(isa); 231 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow); 232 MarkPossibleStackOverflowException(); 233 } 234 235 /* Build frame, return address already on stack */ 236 stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - 237 GetInstructionSetPointerSize(cu_->instruction_set)); 238 239 NewLIR0(kPseudoMethodEntry); 240 /* Spill core callee saves */ 241 SpillCoreRegs(); 242 SpillFPRegs(); 243 if (!skip_overflow_check) { 244 class StackOverflowSlowPath : public LIRSlowPath { 245 public: 246 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) 247 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) { 248 } 249 void Compile() OVERRIDE { 250 m2l_->ResetRegPool(); 251 m2l_->ResetDefTracking(); 252 GenerateTargetLabel(kPseudoThrowTarget); 253 m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_); 254 m2l_->ClobberCallerSave(); 255 // Assumes codegen and target are in thumb2 mode. 256 m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow, 257 false /* MarkSafepointPC */, false /* UseLink */); 258 } 259 260 private: 261 const size_t sp_displace_; 262 }; 263 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 264 // TODO: for large frames we should do something like: 265 // spill ebp 266 // lea ebp, [esp + frame_size] 267 // cmp ebp, fs:[stack_end_] 268 // jcc stack_overflow_exception 269 // mov esp, ebp 270 // in case a signal comes in that's not using an alternate signal stack and the large frame 271 // may have moved us outside of the reserved area at the end of the stack. 272 // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath 273 if (cu_->target64) { 274 OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>()); 275 } else { 276 OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>()); 277 } 278 LIR* branch = OpCondBranch(kCondUlt, nullptr); 279 AddSlowPath( 280 new(arena_)StackOverflowSlowPath(this, branch, 281 frame_size_ - 282 GetInstructionSetPointerSize(cu_->instruction_set))); 283 } 284 } 285 286 FlushIns(ArgLocs, rl_method); 287 288 if (base_of_code_ != nullptr) { 289 RegStorage method_start = TargetPtrReg(kArg0); 290 // We have been asked to save the address of the method start for later use. 291 setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg()); 292 int displacement = SRegOffset(base_of_code_->s_reg_low); 293 // Native pointer - must be natural word size. 294 setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start, 295 cu_->target64 ? k64 : k32, kNotVolatile); 296 } 297 298 FreeTemp(rs_rX86_ARG0); 299 FreeTemp(rs_rX86_ARG1); 300 FreeTemp(rs_rX86_ARG2); 301} 302 303void X86Mir2Lir::GenExitSequence() { 304 /* 305 * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't 306 * allocated by the register utilities as temps. 307 */ 308 LockTemp(rs_rX86_RET0); 309 LockTemp(rs_rX86_RET1); 310 311 NewLIR0(kPseudoMethodExit); 312 UnSpillCoreRegs(); 313 UnSpillFPRegs(); 314 /* Remove frame except for return address */ 315 stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set)); 316 NewLIR0(kX86Ret); 317} 318 319void X86Mir2Lir::GenSpecialExitSequence() { 320 NewLIR0(kX86Ret); 321} 322 323void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) { 324 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 325 return; 326 } 327 // Implicit null pointer check. 328 // test eax,[arg1+0] 329 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0); 330 MarkPossibleNullPointerException(opt_flags); 331} 332 333} // namespace art 334