int_x86.cc revision 147eb41b53729ec8d5c188d1cac90964a51afb8a
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains codegen for the X86 ISA */ 18 19#include "codegen_x86.h" 20#include "dex/quick/mir_to_lir-inl.h" 21#include "dex/reg_storage_eq.h" 22#include "mirror/art_method.h" 23#include "mirror/array.h" 24#include "x86_lir.h" 25 26namespace art { 27 28/* 29 * Compare two 64-bit values 30 * x = y return 0 31 * x < y return -1 32 * x > y return 1 33 */ 34void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, 35 RegLocation rl_src2) { 36 if (cu_->target64) { 37 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 38 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 39 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 40 RegStorage temp_reg = AllocTemp(); 41 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 42 NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondG); // result = (src1 > src2) ? 1 : 0 43 NewLIR2(kX86Set8R, temp_reg.GetReg(), kX86CondL); // temp = (src1 >= src2) ? 0 : 1 44 NewLIR2(kX86Sub8RR, rl_result.reg.GetReg(), temp_reg.GetReg()); 45 NewLIR2(kX86Movsx8qRR, rl_result.reg.GetReg(), rl_result.reg.GetReg()); 46 47 StoreValue(rl_dest, rl_result); 48 FreeTemp(temp_reg); 49 return; 50 } 51 52 FlushAllRegs(); 53 LockCallTemps(); // Prepare for explicit register usage 54 RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_r0, rs_r1); 55 RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_r2, rs_r3); 56 LoadValueDirectWideFixed(rl_src1, r_tmp1); 57 LoadValueDirectWideFixed(rl_src2, r_tmp2); 58 // Compute (r1:r0) = (r1:r0) - (r3:r2) 59 OpRegReg(kOpSub, rs_r0, rs_r2); // r0 = r0 - r2 60 OpRegReg(kOpSbc, rs_r1, rs_r3); // r1 = r1 - r3 - CF 61 NewLIR2(kX86Set8R, rs_r2.GetReg(), kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0 62 NewLIR2(kX86Movzx8RR, rs_r2.GetReg(), rs_r2.GetReg()); 63 OpReg(kOpNeg, rs_r2); // r2 = -r2 64 OpRegReg(kOpOr, rs_r0, rs_r1); // r0 = high | low - sets ZF 65 NewLIR2(kX86Set8R, rs_r0.GetReg(), kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0 66 NewLIR2(kX86Movzx8RR, r0, r0); 67 OpRegReg(kOpOr, rs_r0, rs_r2); // r0 = r0 | r2 68 RegLocation rl_result = LocCReturn(); 69 StoreValue(rl_dest, rl_result); 70} 71 72X86ConditionCode X86ConditionEncoding(ConditionCode cond) { 73 switch (cond) { 74 case kCondEq: return kX86CondEq; 75 case kCondNe: return kX86CondNe; 76 case kCondCs: return kX86CondC; 77 case kCondCc: return kX86CondNc; 78 case kCondUlt: return kX86CondC; 79 case kCondUge: return kX86CondNc; 80 case kCondMi: return kX86CondS; 81 case kCondPl: return kX86CondNs; 82 case kCondVs: return kX86CondO; 83 case kCondVc: return kX86CondNo; 84 case kCondHi: return kX86CondA; 85 case kCondLs: return kX86CondBe; 86 case kCondGe: return kX86CondGe; 87 case kCondLt: return kX86CondL; 88 case kCondGt: return kX86CondG; 89 case kCondLe: return kX86CondLe; 90 case kCondAl: 91 case kCondNv: LOG(FATAL) << "Should not reach here"; 92 } 93 return kX86CondO; 94} 95 96LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) { 97 NewLIR2(src1.Is64Bit() ? kX86Cmp64RR : kX86Cmp32RR, src1.GetReg(), src2.GetReg()); 98 X86ConditionCode cc = X86ConditionEncoding(cond); 99 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , 100 cc); 101 branch->target = target; 102 return branch; 103} 104 105LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, 106 int check_value, LIR* target) { 107 if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) { 108 // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode 109 NewLIR2(reg.Is64Bit() ? kX86Test64RR: kX86Test32RR, reg.GetReg(), reg.GetReg()); 110 } else { 111 if (reg.Is64Bit()) { 112 NewLIR2(IS_SIMM8(check_value) ? kX86Cmp64RI8 : kX86Cmp64RI, reg.GetReg(), check_value); 113 } else { 114 NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value); 115 } 116 } 117 X86ConditionCode cc = X86ConditionEncoding(cond); 118 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc); 119 branch->target = target; 120 return branch; 121} 122 123LIR* X86Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { 124 // If src or dest is a pair, we'll be using low reg. 125 if (r_dest.IsPair()) { 126 r_dest = r_dest.GetLow(); 127 } 128 if (r_src.IsPair()) { 129 r_src = r_src.GetLow(); 130 } 131 if (r_dest.IsFloat() || r_src.IsFloat()) 132 return OpFpRegCopy(r_dest, r_src); 133 LIR* res = RawLIR(current_dalvik_offset_, r_dest.Is64Bit() ? kX86Mov64RR : kX86Mov32RR, 134 r_dest.GetReg(), r_src.GetReg()); 135 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { 136 res->flags.is_nop = true; 137 } 138 return res; 139} 140 141void X86Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { 142 if (r_dest != r_src) { 143 LIR *res = OpRegCopyNoInsert(r_dest, r_src); 144 AppendLIR(res); 145 } 146} 147 148void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) { 149 if (r_dest != r_src) { 150 bool dest_fp = r_dest.IsFloat(); 151 bool src_fp = r_src.IsFloat(); 152 if (dest_fp) { 153 if (src_fp) { 154 OpRegCopy(r_dest, r_src); 155 } else { 156 // TODO: Prevent this from happening in the code. The result is often 157 // unused or could have been loaded more easily from memory. 158 if (!r_src.IsPair()) { 159 DCHECK(!r_dest.IsPair()); 160 NewLIR2(kX86MovqxrRR, r_dest.GetReg(), r_src.GetReg()); 161 } else { 162 NewLIR2(kX86MovdxrRR, r_dest.GetReg(), r_src.GetLowReg()); 163 RegStorage r_tmp = AllocTempDouble(); 164 NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), r_src.GetHighReg()); 165 NewLIR2(kX86PunpckldqRR, r_dest.GetReg(), r_tmp.GetReg()); 166 FreeTemp(r_tmp); 167 } 168 } 169 } else { 170 if (src_fp) { 171 if (!r_dest.IsPair()) { 172 DCHECK(!r_src.IsPair()); 173 NewLIR2(kX86MovqrxRR, r_dest.GetReg(), r_src.GetReg()); 174 } else { 175 NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetReg()); 176 RegStorage temp_reg = AllocTempDouble(); 177 NewLIR2(kX86MovsdRR, temp_reg.GetReg(), r_src.GetReg()); 178 NewLIR2(kX86PsrlqRI, temp_reg.GetReg(), 32); 179 NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), temp_reg.GetReg()); 180 } 181 } else { 182 DCHECK_EQ(r_dest.IsPair(), r_src.IsPair()); 183 if (!r_src.IsPair()) { 184 // Just copy the register directly. 185 OpRegCopy(r_dest, r_src); 186 } else { 187 // Handle overlap 188 if (r_src.GetHighReg() == r_dest.GetLowReg() && 189 r_src.GetLowReg() == r_dest.GetHighReg()) { 190 // Deal with cycles. 191 RegStorage temp_reg = AllocTemp(); 192 OpRegCopy(temp_reg, r_dest.GetHigh()); 193 OpRegCopy(r_dest.GetHigh(), r_dest.GetLow()); 194 OpRegCopy(r_dest.GetLow(), temp_reg); 195 FreeTemp(temp_reg); 196 } else if (r_src.GetHighReg() == r_dest.GetLowReg()) { 197 OpRegCopy(r_dest.GetHigh(), r_src.GetHigh()); 198 OpRegCopy(r_dest.GetLow(), r_src.GetLow()); 199 } else { 200 OpRegCopy(r_dest.GetLow(), r_src.GetLow()); 201 OpRegCopy(r_dest.GetHigh(), r_src.GetHigh()); 202 } 203 } 204 } 205 } 206 } 207} 208 209void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { 210 RegLocation rl_result; 211 RegLocation rl_src = mir_graph_->GetSrc(mir, 0); 212 RegLocation rl_dest = mir_graph_->GetDest(mir); 213 // Avoid using float regs here. 214 RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg; 215 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg; 216 rl_src = LoadValue(rl_src, src_reg_class); 217 ConditionCode ccode = mir->meta.ccode; 218 219 // The kMirOpSelect has two variants, one for constants and one for moves. 220 const bool is_constant_case = (mir->ssa_rep->num_uses == 1); 221 222 if (is_constant_case) { 223 int true_val = mir->dalvikInsn.vB; 224 int false_val = mir->dalvikInsn.vC; 225 rl_result = EvalLoc(rl_dest, result_reg_class, true); 226 227 /* 228 * For ccode == kCondEq: 229 * 230 * 1) When the true case is zero and result_reg is not same as src_reg: 231 * xor result_reg, result_reg 232 * cmp $0, src_reg 233 * mov t1, $false_case 234 * cmovnz result_reg, t1 235 * 2) When the false case is zero and result_reg is not same as src_reg: 236 * xor result_reg, result_reg 237 * cmp $0, src_reg 238 * mov t1, $true_case 239 * cmovz result_reg, t1 240 * 3) All other cases (we do compare first to set eflags): 241 * cmp $0, src_reg 242 * mov result_reg, $false_case 243 * mov t1, $true_case 244 * cmovz result_reg, t1 245 */ 246 // FIXME: depending on how you use registers you could get a false != mismatch when dealing 247 // with different views of the same underlying physical resource (i.e. solo32 vs. solo64). 248 const bool result_reg_same_as_src = 249 (rl_src.location == kLocPhysReg && rl_src.reg.GetRegNum() == rl_result.reg.GetRegNum()); 250 const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src); 251 const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src); 252 const bool catch_all_case = !(true_zero_case || false_zero_case); 253 254 if (true_zero_case || false_zero_case) { 255 OpRegReg(kOpXor, rl_result.reg, rl_result.reg); 256 } 257 258 if (true_zero_case || false_zero_case || catch_all_case) { 259 OpRegImm(kOpCmp, rl_src.reg, 0); 260 } 261 262 if (catch_all_case) { 263 OpRegImm(kOpMov, rl_result.reg, false_val); 264 } 265 266 if (true_zero_case || false_zero_case || catch_all_case) { 267 ConditionCode cc = true_zero_case ? NegateComparison(ccode) : ccode; 268 int immediateForTemp = true_zero_case ? false_val : true_val; 269 RegStorage temp1_reg = AllocTypedTemp(false, result_reg_class); 270 OpRegImm(kOpMov, temp1_reg, immediateForTemp); 271 272 OpCondRegReg(kOpCmov, cc, rl_result.reg, temp1_reg); 273 274 FreeTemp(temp1_reg); 275 } 276 } else { 277 RegLocation rl_true = mir_graph_->GetSrc(mir, 1); 278 RegLocation rl_false = mir_graph_->GetSrc(mir, 2); 279 rl_true = LoadValue(rl_true, result_reg_class); 280 rl_false = LoadValue(rl_false, result_reg_class); 281 rl_result = EvalLoc(rl_dest, result_reg_class, true); 282 283 /* 284 * For ccode == kCondEq: 285 * 286 * 1) When true case is already in place: 287 * cmp $0, src_reg 288 * cmovnz result_reg, false_reg 289 * 2) When false case is already in place: 290 * cmp $0, src_reg 291 * cmovz result_reg, true_reg 292 * 3) When neither cases are in place: 293 * cmp $0, src_reg 294 * mov result_reg, false_reg 295 * cmovz result_reg, true_reg 296 */ 297 298 // kMirOpSelect is generated just for conditional cases when comparison is done with zero. 299 OpRegImm(kOpCmp, rl_src.reg, 0); 300 301 if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { 302 OpCondRegReg(kOpCmov, NegateComparison(ccode), rl_result.reg, rl_false.reg); 303 } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { 304 OpCondRegReg(kOpCmov, ccode, rl_result.reg, rl_true.reg); 305 } else { 306 OpRegCopy(rl_result.reg, rl_false.reg); 307 OpCondRegReg(kOpCmov, ccode, rl_result.reg, rl_true.reg); 308 } 309 } 310 311 StoreValue(rl_dest, rl_result); 312} 313 314void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { 315 LIR* taken = &block_label_list_[bb->taken]; 316 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0); 317 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2); 318 ConditionCode ccode = mir->meta.ccode; 319 320 if (rl_src1.is_const) { 321 std::swap(rl_src1, rl_src2); 322 ccode = FlipComparisonOrder(ccode); 323 } 324 if (rl_src2.is_const) { 325 // Do special compare/branch against simple const operand 326 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 327 GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode); 328 return; 329 } 330 331 if (cu_->target64) { 332 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 333 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 334 335 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 336 OpCondBranch(ccode, taken); 337 return; 338 } 339 340 FlushAllRegs(); 341 LockCallTemps(); // Prepare for explicit register usage 342 RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_r0, rs_r1); 343 RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_r2, rs_r3); 344 LoadValueDirectWideFixed(rl_src1, r_tmp1); 345 LoadValueDirectWideFixed(rl_src2, r_tmp2); 346 347 // Swap operands and condition code to prevent use of zero flag. 348 if (ccode == kCondLe || ccode == kCondGt) { 349 // Compute (r3:r2) = (r3:r2) - (r1:r0) 350 OpRegReg(kOpSub, rs_r2, rs_r0); // r2 = r2 - r0 351 OpRegReg(kOpSbc, rs_r3, rs_r1); // r3 = r3 - r1 - CF 352 } else { 353 // Compute (r1:r0) = (r1:r0) - (r3:r2) 354 OpRegReg(kOpSub, rs_r0, rs_r2); // r0 = r0 - r2 355 OpRegReg(kOpSbc, rs_r1, rs_r3); // r1 = r1 - r3 - CF 356 } 357 switch (ccode) { 358 case kCondEq: 359 case kCondNe: 360 OpRegReg(kOpOr, rs_r0, rs_r1); // r0 = r0 | r1 361 break; 362 case kCondLe: 363 ccode = kCondGe; 364 break; 365 case kCondGt: 366 ccode = kCondLt; 367 break; 368 case kCondLt: 369 case kCondGe: 370 break; 371 default: 372 LOG(FATAL) << "Unexpected ccode: " << ccode; 373 } 374 OpCondBranch(ccode, taken); 375} 376 377void X86Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, 378 int64_t val, ConditionCode ccode) { 379 int32_t val_lo = Low32Bits(val); 380 int32_t val_hi = High32Bits(val); 381 LIR* taken = &block_label_list_[bb->taken]; 382 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 383 bool is_equality_test = ccode == kCondEq || ccode == kCondNe; 384 385 if (cu_->target64) { 386 if (is_equality_test && val == 0) { 387 // We can simplify of comparing for ==, != to 0. 388 NewLIR2(kX86Test64RR, rl_src1.reg.GetReg(), rl_src1.reg.GetReg()); 389 } else if (is_equality_test && val_hi == 0 && val_lo > 0) { 390 OpRegImm(kOpCmp, rl_src1.reg, val_lo); 391 } else { 392 RegStorage tmp = AllocTypedTempWide(false, kCoreReg); 393 LoadConstantWide(tmp, val); 394 OpRegReg(kOpCmp, rl_src1.reg, tmp); 395 FreeTemp(tmp); 396 } 397 OpCondBranch(ccode, taken); 398 return; 399 } 400 401 if (is_equality_test && val != 0) { 402 rl_src1 = ForceTempWide(rl_src1); 403 } 404 RegStorage low_reg = rl_src1.reg.GetLow(); 405 RegStorage high_reg = rl_src1.reg.GetHigh(); 406 407 if (is_equality_test) { 408 // We can simplify of comparing for ==, != to 0. 409 if (val == 0) { 410 if (IsTemp(low_reg)) { 411 OpRegReg(kOpOr, low_reg, high_reg); 412 // We have now changed it; ignore the old values. 413 Clobber(rl_src1.reg); 414 } else { 415 RegStorage t_reg = AllocTemp(); 416 OpRegRegReg(kOpOr, t_reg, low_reg, high_reg); 417 FreeTemp(t_reg); 418 } 419 OpCondBranch(ccode, taken); 420 return; 421 } 422 423 // Need to compute the actual value for ==, !=. 424 OpRegImm(kOpSub, low_reg, val_lo); 425 NewLIR2(kX86Sbb32RI, high_reg.GetReg(), val_hi); 426 OpRegReg(kOpOr, high_reg, low_reg); 427 Clobber(rl_src1.reg); 428 } else if (ccode == kCondLe || ccode == kCondGt) { 429 // Swap operands and condition code to prevent use of zero flag. 430 RegStorage tmp = AllocTypedTempWide(false, kCoreReg); 431 LoadConstantWide(tmp, val); 432 OpRegReg(kOpSub, tmp.GetLow(), low_reg); 433 OpRegReg(kOpSbc, tmp.GetHigh(), high_reg); 434 ccode = (ccode == kCondLe) ? kCondGe : kCondLt; 435 FreeTemp(tmp); 436 } else { 437 // We can use a compare for the low word to set CF. 438 OpRegImm(kOpCmp, low_reg, val_lo); 439 if (IsTemp(high_reg)) { 440 NewLIR2(kX86Sbb32RI, high_reg.GetReg(), val_hi); 441 // We have now changed it; ignore the old values. 442 Clobber(rl_src1.reg); 443 } else { 444 // mov temp_reg, high_reg; sbb temp_reg, high_constant 445 RegStorage t_reg = AllocTemp(); 446 OpRegCopy(t_reg, high_reg); 447 NewLIR2(kX86Sbb32RI, t_reg.GetReg(), val_hi); 448 FreeTemp(t_reg); 449 } 450 } 451 452 OpCondBranch(ccode, taken); 453} 454 455void X86Mir2Lir::CalculateMagicAndShift(int divisor, int& magic, int& shift) { 456 // It does not make sense to calculate magic and shift for zero divisor. 457 DCHECK_NE(divisor, 0); 458 459 /* According to H.S.Warren's Hacker's Delight Chapter 10 and 460 * T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication. 461 * The magic number M and shift S can be calculated in the following way: 462 * Let nc be the most positive value of numerator(n) such that nc = kd - 1, 463 * where divisor(d) >=2. 464 * Let nc be the most negative value of numerator(n) such that nc = kd + 1, 465 * where divisor(d) <= -2. 466 * Thus nc can be calculated like: 467 * nc = 2^31 + 2^31 % d - 1, where d >= 2 468 * nc = -2^31 + (2^31 + 1) % d, where d >= 2. 469 * 470 * So the shift p is the smallest p satisfying 471 * 2^p > nc * (d - 2^p % d), where d >= 2 472 * 2^p > nc * (d + 2^p % d), where d <= -2. 473 * 474 * the magic number M is calcuated by 475 * M = (2^p + d - 2^p % d) / d, where d >= 2 476 * M = (2^p - d - 2^p % d) / d, where d <= -2. 477 * 478 * Notice that p is always bigger than or equal to 32, so we just return 32-p as 479 * the shift number S. 480 */ 481 482 int32_t p = 31; 483 const uint32_t two31 = 0x80000000U; 484 485 // Initialize the computations. 486 uint32_t abs_d = (divisor >= 0) ? divisor : -divisor; 487 uint32_t tmp = two31 + (static_cast<uint32_t>(divisor) >> 31); 488 uint32_t abs_nc = tmp - 1 - tmp % abs_d; 489 uint32_t quotient1 = two31 / abs_nc; 490 uint32_t remainder1 = two31 % abs_nc; 491 uint32_t quotient2 = two31 / abs_d; 492 uint32_t remainder2 = two31 % abs_d; 493 494 /* 495 * To avoid handling both positive and negative divisor, Hacker's Delight 496 * introduces a method to handle these 2 cases together to avoid duplication. 497 */ 498 uint32_t delta; 499 do { 500 p++; 501 quotient1 = 2 * quotient1; 502 remainder1 = 2 * remainder1; 503 if (remainder1 >= abs_nc) { 504 quotient1++; 505 remainder1 = remainder1 - abs_nc; 506 } 507 quotient2 = 2 * quotient2; 508 remainder2 = 2 * remainder2; 509 if (remainder2 >= abs_d) { 510 quotient2++; 511 remainder2 = remainder2 - abs_d; 512 } 513 delta = abs_d - remainder2; 514 } while (quotient1 < delta || (quotient1 == delta && remainder1 == 0)); 515 516 magic = (divisor > 0) ? (quotient2 + 1) : (-quotient2 - 1); 517 shift = p - 32; 518} 519 520RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) { 521 LOG(FATAL) << "Unexpected use of GenDivRemLit for x86"; 522 return rl_dest; 523} 524 525RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, 526 int imm, bool is_div) { 527 // Use a multiply (and fixup) to perform an int div/rem by a constant. 528 529 // We have to use fixed registers, so flush all the temps. 530 FlushAllRegs(); 531 LockCallTemps(); // Prepare for explicit register usage. 532 533 // Assume that the result will be in EDX. 534 RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG}; 535 536 // handle div/rem by 1 special case. 537 if (imm == 1) { 538 if (is_div) { 539 // x / 1 == x. 540 StoreValue(rl_result, rl_src); 541 } else { 542 // x % 1 == 0. 543 LoadConstantNoClobber(rs_r0, 0); 544 // For this case, return the result in EAX. 545 rl_result.reg.SetReg(r0); 546 } 547 } else if (imm == -1) { // handle 0x80000000 / -1 special case. 548 if (is_div) { 549 LIR *minint_branch = 0; 550 LoadValueDirectFixed(rl_src, rs_r0); 551 OpRegImm(kOpCmp, rs_r0, 0x80000000); 552 minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); 553 554 // for x != MIN_INT, x / -1 == -x. 555 NewLIR1(kX86Neg32R, r0); 556 557 LIR* branch_around = NewLIR1(kX86Jmp8, 0); 558 // The target for cmp/jmp above. 559 minint_branch->target = NewLIR0(kPseudoTargetLabel); 560 // EAX already contains the right value (0x80000000), 561 branch_around->target = NewLIR0(kPseudoTargetLabel); 562 } else { 563 // x % -1 == 0. 564 LoadConstantNoClobber(rs_r0, 0); 565 } 566 // For this case, return the result in EAX. 567 rl_result.reg.SetReg(r0); 568 } else { 569 CHECK(imm <= -2 || imm >= 2); 570 // Use H.S.Warren's Hacker's Delight Chapter 10 and 571 // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication. 572 int magic, shift; 573 CalculateMagicAndShift(imm, magic, shift); 574 575 /* 576 * For imm >= 2, 577 * int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n > 0 578 * int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1, while n < 0. 579 * For imm <= -2, 580 * int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1 , while n > 0 581 * int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n < 0. 582 * We implement this algorithm in the following way: 583 * 1. multiply magic number m and numerator n, get the higher 32bit result in EDX 584 * 2. if imm > 0 and magic < 0, add numerator to EDX 585 * if imm < 0 and magic > 0, sub numerator from EDX 586 * 3. if S !=0, SAR S bits for EDX 587 * 4. add 1 to EDX if EDX < 0 588 * 5. Thus, EDX is the quotient 589 */ 590 591 // Numerator into EAX. 592 RegStorage numerator_reg; 593 if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) { 594 // We will need the value later. 595 if (rl_src.location == kLocPhysReg) { 596 // We can use it directly. 597 DCHECK(rl_src.reg.GetReg() != rs_r0.GetReg() && rl_src.reg.GetReg() != rs_r2.GetReg()); 598 numerator_reg = rl_src.reg; 599 } else { 600 numerator_reg = rs_r1; 601 LoadValueDirectFixed(rl_src, numerator_reg); 602 } 603 OpRegCopy(rs_r0, numerator_reg); 604 } else { 605 // Only need this once. Just put it into EAX. 606 LoadValueDirectFixed(rl_src, rs_r0); 607 } 608 609 // EDX = magic. 610 LoadConstantNoClobber(rs_r2, magic); 611 612 // EDX:EAX = magic & dividend. 613 NewLIR1(kX86Imul32DaR, rs_r2.GetReg()); 614 615 if (imm > 0 && magic < 0) { 616 // Add numerator to EDX. 617 DCHECK(numerator_reg.Valid()); 618 NewLIR2(kX86Add32RR, rs_r2.GetReg(), numerator_reg.GetReg()); 619 } else if (imm < 0 && magic > 0) { 620 DCHECK(numerator_reg.Valid()); 621 NewLIR2(kX86Sub32RR, rs_r2.GetReg(), numerator_reg.GetReg()); 622 } 623 624 // Do we need the shift? 625 if (shift != 0) { 626 // Shift EDX by 'shift' bits. 627 NewLIR2(kX86Sar32RI, rs_r2.GetReg(), shift); 628 } 629 630 // Add 1 to EDX if EDX < 0. 631 632 // Move EDX to EAX. 633 OpRegCopy(rs_r0, rs_r2); 634 635 // Move sign bit to bit 0, zeroing the rest. 636 NewLIR2(kX86Shr32RI, rs_r2.GetReg(), 31); 637 638 // EDX = EDX + EAX. 639 NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r0.GetReg()); 640 641 // Quotient is in EDX. 642 if (!is_div) { 643 // We need to compute the remainder. 644 // Remainder is divisor - (quotient * imm). 645 DCHECK(numerator_reg.Valid()); 646 OpRegCopy(rs_r0, numerator_reg); 647 648 // EAX = numerator * imm. 649 OpRegRegImm(kOpMul, rs_r2, rs_r2, imm); 650 651 // EDX -= EAX. 652 NewLIR2(kX86Sub32RR, rs_r0.GetReg(), rs_r2.GetReg()); 653 654 // For this case, return the result in EAX. 655 rl_result.reg.SetReg(r0); 656 } 657 } 658 659 return rl_result; 660} 661 662RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 663 bool is_div) { 664 LOG(FATAL) << "Unexpected use of GenDivRem for x86"; 665 return rl_dest; 666} 667 668RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 669 RegLocation rl_src2, bool is_div, bool check_zero) { 670 // We have to use fixed registers, so flush all the temps. 671 FlushAllRegs(); 672 LockCallTemps(); // Prepare for explicit register usage. 673 674 // Load LHS into EAX. 675 LoadValueDirectFixed(rl_src1, rs_r0); 676 677 // Load RHS into EBX. 678 LoadValueDirectFixed(rl_src2, rs_r1); 679 680 // Copy LHS sign bit into EDX. 681 NewLIR0(kx86Cdq32Da); 682 683 if (check_zero) { 684 // Handle division by zero case. 685 GenDivZeroCheck(rs_r1); 686 } 687 688 // Have to catch 0x80000000/-1 case, or we will get an exception! 689 OpRegImm(kOpCmp, rs_r1, -1); 690 LIR *minus_one_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe); 691 692 // RHS is -1. 693 OpRegImm(kOpCmp, rs_r0, 0x80000000); 694 LIR * minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe); 695 696 // In 0x80000000/-1 case. 697 if (!is_div) { 698 // For DIV, EAX is already right. For REM, we need EDX 0. 699 LoadConstantNoClobber(rs_r2, 0); 700 } 701 LIR* done = NewLIR1(kX86Jmp8, 0); 702 703 // Expected case. 704 minus_one_branch->target = NewLIR0(kPseudoTargetLabel); 705 minint_branch->target = minus_one_branch->target; 706 NewLIR1(kX86Idivmod32DaR, rs_r1.GetReg()); 707 done->target = NewLIR0(kPseudoTargetLabel); 708 709 // Result is in EAX for div and EDX for rem. 710 RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r0, INVALID_SREG, INVALID_SREG}; 711 if (!is_div) { 712 rl_result.reg.SetReg(r2); 713 } 714 return rl_result; 715} 716 717bool X86Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) { 718 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 719 720 if (is_long && cu_->instruction_set == kX86) { 721 return false; 722 } 723 724 // Get the two arguments to the invoke and place them in GP registers. 725 RegLocation rl_src1 = info->args[0]; 726 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1]; 727 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg); 728 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg); 729 730 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info); 731 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 732 733 /* 734 * If the result register is the same as the second element, then we need to be careful. 735 * The reason is that the first copy will inadvertently clobber the second element with 736 * the first one thus yielding the wrong result. Thus we do a swap in that case. 737 */ 738 if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) { 739 std::swap(rl_src1, rl_src2); 740 } 741 742 // Pick the first integer as min/max. 743 OpRegCopy(rl_result.reg, rl_src1.reg); 744 745 // If the integers are both in the same register, then there is nothing else to do 746 // because they are equal and we have already moved one into the result. 747 if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) { 748 // It is possible we didn't pick correctly so do the actual comparison now. 749 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg); 750 751 // Conditionally move the other integer into the destination register. 752 ConditionCode condition_code = is_min ? kCondGt : kCondLt; 753 OpCondRegReg(kOpCmov, condition_code, rl_result.reg, rl_src2.reg); 754 } 755 756 if (is_long) { 757 StoreValueWide(rl_dest, rl_result); 758 } else { 759 StoreValue(rl_dest, rl_result); 760 } 761 return true; 762} 763 764bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { 765 RegLocation rl_src_address = info->args[0]; // long address 766 RegLocation rl_address; 767 if (!cu_->target64) { 768 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[0] 769 rl_address = LoadValue(rl_src_address, kCoreReg); 770 } else { 771 rl_address = LoadValueWide(rl_src_address, kCoreReg); 772 } 773 RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info); 774 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 775 // Unaligned access is allowed on x86. 776 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile); 777 if (size == k64) { 778 StoreValueWide(rl_dest, rl_result); 779 } else { 780 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 781 StoreValue(rl_dest, rl_result); 782 } 783 return true; 784} 785 786bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { 787 RegLocation rl_src_address = info->args[0]; // long address 788 RegLocation rl_address; 789 if (!cu_->target64) { 790 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[0] 791 rl_address = LoadValue(rl_src_address, kCoreReg); 792 } else { 793 rl_address = LoadValueWide(rl_src_address, kCoreReg); 794 } 795 RegLocation rl_src_value = info->args[2]; // [size] value 796 RegLocation rl_value; 797 if (size == k64) { 798 // Unaligned access is allowed on x86. 799 rl_value = LoadValueWide(rl_src_value, kCoreReg); 800 } else { 801 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); 802 // In 32-bit mode the only EAX..EDX registers can be used with Mov8MR. 803 if (!cu_->target64 && size == kSignedByte) { 804 rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg); 805 if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) { 806 RegStorage temp = AllocateByteRegister(); 807 OpRegCopy(temp, rl_src_value.reg); 808 rl_value.reg = temp; 809 } else { 810 rl_value = LoadValue(rl_src_value, kCoreReg); 811 } 812 } else { 813 rl_value = LoadValue(rl_src_value, kCoreReg); 814 } 815 } 816 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile); 817 return true; 818} 819 820void X86Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) { 821 NewLIR5(kX86Lea32RA, r_base.GetReg(), reg1.GetReg(), reg2.GetReg(), scale, offset); 822} 823 824void X86Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) { 825 DCHECK_EQ(kX86, cu_->instruction_set); 826 NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val); 827} 828 829void X86Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) { 830 DCHECK_EQ(kX86_64, cu_->instruction_set); 831 NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val); 832} 833 834static bool IsInReg(X86Mir2Lir *pMir2Lir, const RegLocation &rl, RegStorage reg) { 835 return rl.reg.Valid() && rl.reg.GetReg() == reg.GetReg() && (pMir2Lir->IsLive(reg) || rl.home); 836} 837 838bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { 839 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); 840 // Unused - RegLocation rl_src_unsafe = info->args[0]; 841 RegLocation rl_src_obj = info->args[1]; // Object - known non-null 842 RegLocation rl_src_offset = info->args[2]; // long low 843 if (!cu_->target64) { 844 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3] 845 } 846 RegLocation rl_src_expected = info->args[4]; // int, long or Object 847 // If is_long, high half is in info->args[5] 848 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object 849 // If is_long, high half is in info->args[7] 850 851 if (is_long && cu_->target64) { 852 // RAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in RAX. 853 FlushReg(rs_r0q); 854 Clobber(rs_r0q); 855 LockTemp(rs_r0q); 856 857 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 858 RegLocation rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg); 859 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg); 860 LoadValueDirectWide(rl_src_expected, rs_r0q); 861 NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, 862 rl_new_value.reg.GetReg()); 863 864 // After a store we need to insert barrier in case of potential load. Since the 865 // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated. 866 GenMemBarrier(kAnyAny); 867 868 FreeTemp(rs_r0q); 869 } else if (is_long) { 870 // TODO: avoid unnecessary loads of SI and DI when the values are in registers. 871 // TODO: CFI support. 872 FlushAllRegs(); 873 LockCallTemps(); 874 RegStorage r_tmp1 = RegStorage::MakeRegPair(rs_rAX, rs_rDX); 875 RegStorage r_tmp2 = RegStorage::MakeRegPair(rs_rBX, rs_rCX); 876 LoadValueDirectWideFixed(rl_src_expected, r_tmp1); 877 LoadValueDirectWideFixed(rl_src_new_value, r_tmp2); 878 // FIXME: needs 64-bit update. 879 const bool obj_in_di = IsInReg(this, rl_src_obj, rs_rDI); 880 const bool obj_in_si = IsInReg(this, rl_src_obj, rs_rSI); 881 DCHECK(!obj_in_si || !obj_in_di); 882 const bool off_in_di = IsInReg(this, rl_src_offset, rs_rDI); 883 const bool off_in_si = IsInReg(this, rl_src_offset, rs_rSI); 884 DCHECK(!off_in_si || !off_in_di); 885 // If obj/offset is in a reg, use that reg. Otherwise, use the empty reg. 886 RegStorage rs_obj = obj_in_di ? rs_rDI : obj_in_si ? rs_rSI : !off_in_di ? rs_rDI : rs_rSI; 887 RegStorage rs_off = off_in_si ? rs_rSI : off_in_di ? rs_rDI : !obj_in_si ? rs_rSI : rs_rDI; 888 bool push_di = (!obj_in_di && !off_in_di) && (rs_obj == rs_rDI || rs_off == rs_rDI); 889 bool push_si = (!obj_in_si && !off_in_si) && (rs_obj == rs_rSI || rs_off == rs_rSI); 890 if (push_di) { 891 NewLIR1(kX86Push32R, rs_rDI.GetReg()); 892 MarkTemp(rs_rDI); 893 LockTemp(rs_rDI); 894 } 895 if (push_si) { 896 NewLIR1(kX86Push32R, rs_rSI.GetReg()); 897 MarkTemp(rs_rSI); 898 LockTemp(rs_rSI); 899 } 900 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 901 const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u); 902 if (!obj_in_si && !obj_in_di) { 903 LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj); 904 // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it. 905 DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info)); 906 int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u; 907 AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false); 908 } 909 if (!off_in_si && !off_in_di) { 910 LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off); 911 // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it. 912 DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info)); 913 int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u; 914 AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false); 915 } 916 NewLIR4(kX86LockCmpxchg64A, rs_obj.GetReg(), rs_off.GetReg(), 0, 0); 917 918 // After a store we need to insert barrier to prevent reordering with either 919 // earlier or later memory accesses. Since 920 // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated, 921 // and it will be associated with the cmpxchg instruction, preventing both. 922 GenMemBarrier(kAnyAny); 923 924 if (push_si) { 925 FreeTemp(rs_rSI); 926 UnmarkTemp(rs_rSI); 927 NewLIR1(kX86Pop32R, rs_rSI.GetReg()); 928 } 929 if (push_di) { 930 FreeTemp(rs_rDI); 931 UnmarkTemp(rs_rDI); 932 NewLIR1(kX86Pop32R, rs_rDI.GetReg()); 933 } 934 FreeCallTemps(); 935 } else { 936 // EAX must hold expected for CMPXCHG. Neither rl_new_value, nor r_ptr may be in EAX. 937 FlushReg(rs_r0); 938 Clobber(rs_r0); 939 LockTemp(rs_r0); 940 941 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg); 942 RegLocation rl_new_value = LoadValue(rl_src_new_value); 943 944 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) { 945 // Mark card for object assuming new value is stored. 946 FreeTemp(rs_r0); // Temporarily release EAX for MarkGCCard(). 947 MarkGCCard(rl_new_value.reg, rl_object.reg); 948 LockTemp(rs_r0); 949 } 950 951 RegLocation rl_offset; 952 if (cu_->target64) { 953 rl_offset = LoadValueWide(rl_src_offset, kCoreReg); 954 } else { 955 rl_offset = LoadValue(rl_src_offset, kCoreReg); 956 } 957 LoadValueDirect(rl_src_expected, rs_r0); 958 NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, 959 rl_new_value.reg.GetReg()); 960 961 // After a store we need to insert barrier to prevent reordering with either 962 // earlier or later memory accesses. Since 963 // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated, 964 // and it will be associated with the cmpxchg instruction, preventing both. 965 GenMemBarrier(kAnyAny); 966 967 FreeTemp(rs_r0); 968 } 969 970 // Convert ZF to boolean 971 RegLocation rl_dest = InlineTarget(info); // boolean place for result 972 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 973 RegStorage result_reg = rl_result.reg; 974 975 // For 32-bit, SETcc only works with EAX..EDX. 976 if (!IsByteRegister(result_reg)) { 977 result_reg = AllocateByteRegister(); 978 } 979 NewLIR2(kX86Set8R, result_reg.GetReg(), kX86CondZ); 980 NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), result_reg.GetReg()); 981 if (IsTemp(result_reg)) { 982 FreeTemp(result_reg); 983 } 984 StoreValue(rl_dest, rl_result); 985 return true; 986} 987 988LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { 989 CHECK(base_of_code_ != nullptr); 990 991 // Address the start of the method 992 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 993 if (rl_method.wide) { 994 LoadValueDirectWideFixed(rl_method, reg); 995 } else { 996 LoadValueDirectFixed(rl_method, reg); 997 } 998 store_method_addr_used_ = true; 999 1000 // Load the proper value from the literal area. 1001 // We don't know the proper offset for the value, so pick one that will force 1002 // 4 byte offset. We will fix this up in the assembler later to have the right 1003 // value. 1004 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 1005 LIR *res = RawLIR(current_dalvik_offset_, kX86Mov32RM, reg.GetReg(), reg.GetReg(), 256, 1006 0, 0, target); 1007 res->target = target; 1008 res->flags.fixup = kFixupLoad; 1009 store_method_addr_used_ = true; 1010 return res; 1011} 1012 1013LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) { 1014 LOG(FATAL) << "Unexpected use of OpVldm for x86"; 1015 return NULL; 1016} 1017 1018LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) { 1019 LOG(FATAL) << "Unexpected use of OpVstm for x86"; 1020 return NULL; 1021} 1022 1023void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, 1024 RegLocation rl_result, int lit, 1025 int first_bit, int second_bit) { 1026 RegStorage t_reg = AllocTemp(); 1027 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit); 1028 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg); 1029 FreeTemp(t_reg); 1030 if (first_bit != 0) { 1031 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit); 1032 } 1033} 1034 1035void X86Mir2Lir::GenDivZeroCheckWide(RegStorage reg) { 1036 if (cu_->target64) { 1037 DCHECK(reg.Is64Bit()); 1038 1039 NewLIR2(kX86Cmp64RI8, reg.GetReg(), 0); 1040 } else { 1041 DCHECK(reg.IsPair()); 1042 1043 // We are not supposed to clobber the incoming storage, so allocate a temporary. 1044 RegStorage t_reg = AllocTemp(); 1045 // Doing an OR is a quick way to check if both registers are zero. This will set the flags. 1046 OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh()); 1047 // The temp is no longer needed so free it at this time. 1048 FreeTemp(t_reg); 1049 } 1050 1051 // In case of zero, throw ArithmeticException. 1052 GenDivZeroCheck(kCondEq); 1053} 1054 1055void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index, 1056 RegStorage array_base, 1057 int len_offset) { 1058 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 1059 public: 1060 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, 1061 RegStorage index, RegStorage array_base, int32_t len_offset) 1062 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 1063 index_(index), array_base_(array_base), len_offset_(len_offset) { 1064 } 1065 1066 void Compile() OVERRIDE { 1067 m2l_->ResetRegPool(); 1068 m2l_->ResetDefTracking(); 1069 GenerateTargetLabel(kPseudoThrowTarget); 1070 1071 RegStorage new_index = index_; 1072 // Move index out of kArg1, either directly to kArg0, or to kArg2. 1073 // TODO: clean-up to check not a number but with type 1074 if (index_ == m2l_->TargetReg(kArg1, kNotWide)) { 1075 if (array_base_ == m2l_->TargetReg(kArg0, kRef)) { 1076 m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kNotWide), index_); 1077 new_index = m2l_->TargetReg(kArg2, kNotWide); 1078 } else { 1079 m2l_->OpRegCopy(m2l_->TargetReg(kArg0, kNotWide), index_); 1080 new_index = m2l_->TargetReg(kArg0, kNotWide); 1081 } 1082 } 1083 // Load array length to kArg1. 1084 m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_); 1085 if (cu_->target64) { 1086 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 1087 new_index, m2l_->TargetReg(kArg1, kNotWide), true); 1088 } else { 1089 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 1090 new_index, m2l_->TargetReg(kArg1, kNotWide), true); 1091 } 1092 } 1093 1094 private: 1095 const RegStorage index_; 1096 const RegStorage array_base_; 1097 const int32_t len_offset_; 1098 }; 1099 1100 OpRegMem(kOpCmp, index, array_base, len_offset); 1101 MarkPossibleNullPointerException(0); 1102 LIR* branch = OpCondBranch(kCondUge, nullptr); 1103 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, 1104 index, array_base, len_offset)); 1105} 1106 1107void X86Mir2Lir::GenArrayBoundsCheck(int32_t index, 1108 RegStorage array_base, 1109 int32_t len_offset) { 1110 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 1111 public: 1112 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, 1113 int32_t index, RegStorage array_base, int32_t len_offset) 1114 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 1115 index_(index), array_base_(array_base), len_offset_(len_offset) { 1116 } 1117 1118 void Compile() OVERRIDE { 1119 m2l_->ResetRegPool(); 1120 m2l_->ResetDefTracking(); 1121 GenerateTargetLabel(kPseudoThrowTarget); 1122 1123 // Load array length to kArg1. 1124 m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_); 1125 m2l_->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_); 1126 if (cu_->target64) { 1127 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), 1128 m2l_->TargetReg(kArg0, kNotWide), 1129 m2l_->TargetReg(kArg1, kNotWide), true); 1130 } else { 1131 m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), 1132 m2l_->TargetReg(kArg0, kNotWide), 1133 m2l_->TargetReg(kArg1, kNotWide), true); 1134 } 1135 } 1136 1137 private: 1138 const int32_t index_; 1139 const RegStorage array_base_; 1140 const int32_t len_offset_; 1141 }; 1142 1143 NewLIR3(IS_SIMM8(index) ? kX86Cmp32MI8 : kX86Cmp32MI, array_base.GetReg(), len_offset, index); 1144 MarkPossibleNullPointerException(0); 1145 LIR* branch = OpCondBranch(kCondLs, nullptr); 1146 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, 1147 index, array_base, len_offset)); 1148} 1149 1150// Test suspend flag, return target of taken suspend branch 1151LIR* X86Mir2Lir::OpTestSuspend(LIR* target) { 1152 if (cu_->target64) { 1153 OpTlsCmp(Thread::ThreadFlagsOffset<8>(), 0); 1154 } else { 1155 OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0); 1156 } 1157 return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target); 1158} 1159 1160// Decrement register and branch on condition 1161LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) { 1162 OpRegImm(kOpSub, reg, 1); 1163 return OpCondBranch(c_code, target); 1164} 1165 1166bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1167 RegLocation rl_src, RegLocation rl_dest, int lit) { 1168 LOG(FATAL) << "Unexpected use of smallLiteralDive in x86"; 1169 return false; 1170} 1171 1172bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1173 LOG(FATAL) << "Unexpected use of easyMultiply in x86"; 1174 return false; 1175} 1176 1177LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) { 1178 LOG(FATAL) << "Unexpected use of OpIT in x86"; 1179 return NULL; 1180} 1181 1182void X86Mir2Lir::OpEndIT(LIR* it) { 1183 LOG(FATAL) << "Unexpected use of OpEndIT in x86"; 1184} 1185 1186void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) { 1187 switch (val) { 1188 case 0: 1189 NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg()); 1190 break; 1191 case 1: 1192 OpRegCopy(dest, src); 1193 break; 1194 default: 1195 OpRegRegImm(kOpMul, dest, src, val); 1196 break; 1197 } 1198} 1199 1200void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) { 1201 // All memory accesses below reference dalvik regs. 1202 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1203 1204 LIR *m; 1205 switch (val) { 1206 case 0: 1207 NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg()); 1208 break; 1209 case 1: 1210 LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile); 1211 break; 1212 default: 1213 m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(), 1214 rs_rX86_SP.GetReg(), displacement, val); 1215 AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */); 1216 break; 1217 } 1218} 1219 1220void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1221 RegLocation rl_src2) { 1222 // All memory accesses below reference dalvik regs. 1223 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1224 1225 if (cu_->target64) { 1226 if (rl_src1.is_const) { 1227 std::swap(rl_src1, rl_src2); 1228 } 1229 // Are we multiplying by a constant? 1230 if (rl_src2.is_const) { 1231 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 1232 if (val == 0) { 1233 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1234 OpRegReg(kOpXor, rl_result.reg, rl_result.reg); 1235 StoreValueWide(rl_dest, rl_result); 1236 return; 1237 } else if (val == 1) { 1238 StoreValueWide(rl_dest, rl_src1); 1239 return; 1240 } else if (val == 2) { 1241 GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1); 1242 return; 1243 } else if (IsPowerOfTwo(val)) { 1244 int shift_amount = LowestSetBit(val); 1245 if (!BadOverlap(rl_src1, rl_dest)) { 1246 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1247 RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, 1248 rl_src1, shift_amount); 1249 StoreValueWide(rl_dest, rl_result); 1250 return; 1251 } 1252 } 1253 } 1254 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1255 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1256 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1257 if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() && 1258 rl_result.reg.GetReg() == rl_src2.reg.GetReg()) { 1259 NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg()); 1260 } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() && 1261 rl_result.reg.GetReg() == rl_src2.reg.GetReg()) { 1262 NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg()); 1263 } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() && 1264 rl_result.reg.GetReg() != rl_src2.reg.GetReg()) { 1265 NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); 1266 } else { 1267 OpRegCopy(rl_result.reg, rl_src1.reg); 1268 NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); 1269 } 1270 StoreValueWide(rl_dest, rl_result); 1271 return; 1272 } 1273 1274 if (rl_src1.is_const) { 1275 std::swap(rl_src1, rl_src2); 1276 } 1277 // Are we multiplying by a constant? 1278 if (rl_src2.is_const) { 1279 // Do special compare/branch against simple const operand 1280 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 1281 if (val == 0) { 1282 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1283 OpRegReg(kOpXor, rl_result.reg.GetLow(), rl_result.reg.GetLow()); 1284 OpRegReg(kOpXor, rl_result.reg.GetHigh(), rl_result.reg.GetHigh()); 1285 StoreValueWide(rl_dest, rl_result); 1286 return; 1287 } else if (val == 1) { 1288 StoreValueWide(rl_dest, rl_src1); 1289 return; 1290 } else if (val == 2) { 1291 GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1); 1292 return; 1293 } else if (IsPowerOfTwo(val)) { 1294 int shift_amount = LowestSetBit(val); 1295 if (!BadOverlap(rl_src1, rl_dest)) { 1296 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1297 RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, 1298 rl_src1, shift_amount); 1299 StoreValueWide(rl_dest, rl_result); 1300 return; 1301 } 1302 } 1303 1304 // Okay, just bite the bullet and do it. 1305 int32_t val_lo = Low32Bits(val); 1306 int32_t val_hi = High32Bits(val); 1307 FlushAllRegs(); 1308 LockCallTemps(); // Prepare for explicit register usage. 1309 rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg); 1310 bool src1_in_reg = rl_src1.location == kLocPhysReg; 1311 int displacement = SRegOffset(rl_src1.s_reg_low); 1312 1313 // ECX <- 1H * 2L 1314 // EAX <- 1L * 2H 1315 if (src1_in_reg) { 1316 GenImulRegImm(rs_r1, rl_src1.reg.GetHigh(), val_lo); 1317 GenImulRegImm(rs_r0, rl_src1.reg.GetLow(), val_hi); 1318 } else { 1319 GenImulMemImm(rs_r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo); 1320 GenImulMemImm(rs_r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi); 1321 } 1322 1323 // ECX <- ECX + EAX (2H * 1L) + (1H * 2L) 1324 NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r0.GetReg()); 1325 1326 // EAX <- 2L 1327 LoadConstantNoClobber(rs_r0, val_lo); 1328 1329 // EDX:EAX <- 2L * 1L (double precision) 1330 if (src1_in_reg) { 1331 NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg()); 1332 } else { 1333 LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET); 1334 AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, 1335 true /* is_load */, true /* is_64bit */); 1336 } 1337 1338 // EDX <- EDX + ECX (add high words) 1339 NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r1.GetReg()); 1340 1341 // Result is EDX:EAX 1342 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, 1343 RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG}; 1344 StoreValueWide(rl_dest, rl_result); 1345 return; 1346 } 1347 1348 // Nope. Do it the hard way 1349 // Check for V*V. We can eliminate a multiply in that case, as 2L*1H == 2H*1L. 1350 bool is_square = mir_graph_->SRegToVReg(rl_src1.s_reg_low) == 1351 mir_graph_->SRegToVReg(rl_src2.s_reg_low); 1352 1353 FlushAllRegs(); 1354 LockCallTemps(); // Prepare for explicit register usage. 1355 rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg); 1356 rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg); 1357 1358 // At this point, the VRs are in their home locations. 1359 bool src1_in_reg = rl_src1.location == kLocPhysReg; 1360 bool src2_in_reg = rl_src2.location == kLocPhysReg; 1361 1362 // ECX <- 1H 1363 if (src1_in_reg) { 1364 NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg()); 1365 } else { 1366 LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32, 1367 kNotVolatile); 1368 } 1369 1370 if (is_square) { 1371 // Take advantage of the fact that the values are the same. 1372 // ECX <- ECX * 2L (1H * 2L) 1373 if (src2_in_reg) { 1374 NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg()); 1375 } else { 1376 int displacement = SRegOffset(rl_src2.s_reg_low); 1377 LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(), 1378 displacement + LOWORD_OFFSET); 1379 AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, 1380 true /* is_load */, true /* is_64bit */); 1381 } 1382 1383 // ECX <- 2*ECX (2H * 1L) + (1H * 2L) 1384 NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r1.GetReg()); 1385 } else { 1386 // EAX <- 2H 1387 if (src2_in_reg) { 1388 NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg()); 1389 } else { 1390 LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32, 1391 kNotVolatile); 1392 } 1393 1394 // EAX <- EAX * 1L (2H * 1L) 1395 if (src1_in_reg) { 1396 NewLIR2(kX86Imul32RR, rs_r0.GetReg(), rl_src1.reg.GetLowReg()); 1397 } else { 1398 int displacement = SRegOffset(rl_src1.s_reg_low); 1399 LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP.GetReg(), 1400 displacement + LOWORD_OFFSET); 1401 AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, 1402 true /* is_load */, true /* is_64bit */); 1403 } 1404 1405 // ECX <- ECX * 2L (1H * 2L) 1406 if (src2_in_reg) { 1407 NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg()); 1408 } else { 1409 int displacement = SRegOffset(rl_src2.s_reg_low); 1410 LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(), 1411 displacement + LOWORD_OFFSET); 1412 AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, 1413 true /* is_load */, true /* is_64bit */); 1414 } 1415 1416 // ECX <- ECX + EAX (2H * 1L) + (1H * 2L) 1417 NewLIR2(kX86Add32RR, rs_r1.GetReg(), rs_r0.GetReg()); 1418 } 1419 1420 // EAX <- 2L 1421 if (src2_in_reg) { 1422 NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg()); 1423 } else { 1424 LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32, 1425 kNotVolatile); 1426 } 1427 1428 // EDX:EAX <- 2L * 1L (double precision) 1429 if (src1_in_reg) { 1430 NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg()); 1431 } else { 1432 int displacement = SRegOffset(rl_src1.s_reg_low); 1433 LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET); 1434 AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, 1435 true /* is_load */, true /* is_64bit */); 1436 } 1437 1438 // EDX <- EDX + ECX (add high words) 1439 NewLIR2(kX86Add32RR, rs_r2.GetReg(), rs_r1.GetReg()); 1440 1441 // Result is EDX:EAX 1442 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, 1443 RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG}; 1444 StoreValueWide(rl_dest, rl_result); 1445} 1446 1447void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, 1448 Instruction::Code op) { 1449 DCHECK_EQ(rl_dest.location, kLocPhysReg); 1450 X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false); 1451 if (rl_src.location == kLocPhysReg) { 1452 // Both operands are in registers. 1453 // But we must ensure that rl_src is in pair 1454 if (cu_->target64) { 1455 NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg()); 1456 } else { 1457 rl_src = LoadValueWide(rl_src, kCoreReg); 1458 if (rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()) { 1459 // The registers are the same, so we would clobber it before the use. 1460 RegStorage temp_reg = AllocTemp(); 1461 OpRegCopy(temp_reg, rl_dest.reg); 1462 rl_src.reg.SetHighReg(temp_reg.GetReg()); 1463 } 1464 NewLIR2(x86op, rl_dest.reg.GetLowReg(), rl_src.reg.GetLowReg()); 1465 1466 x86op = GetOpcode(op, rl_dest, rl_src, true); 1467 NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg()); 1468 FreeTemp(rl_src.reg); // ??? 1469 } 1470 return; 1471 } 1472 1473 // RHS is in memory. 1474 DCHECK((rl_src.location == kLocDalvikFrame) || 1475 (rl_src.location == kLocCompilerTemp)); 1476 int r_base = rs_rX86_SP.GetReg(); 1477 int displacement = SRegOffset(rl_src.s_reg_low); 1478 1479 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1480 LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), 1481 r_base, displacement + LOWORD_OFFSET); 1482 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 1483 true /* is_load */, true /* is64bit */); 1484 if (!cu_->target64) { 1485 x86op = GetOpcode(op, rl_dest, rl_src, true); 1486 lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), r_base, displacement + HIWORD_OFFSET); 1487 AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, 1488 true /* is_load */, true /* is64bit */); 1489 } 1490} 1491 1492void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) { 1493 rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg); 1494 if (rl_dest.location == kLocPhysReg) { 1495 // Ensure we are in a register pair 1496 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1497 1498 rl_src = UpdateLocWideTyped(rl_src, kCoreReg); 1499 GenLongRegOrMemOp(rl_result, rl_src, op); 1500 StoreFinalValueWide(rl_dest, rl_result); 1501 return; 1502 } 1503 1504 // It wasn't in registers, so it better be in memory. 1505 DCHECK((rl_dest.location == kLocDalvikFrame) || 1506 (rl_dest.location == kLocCompilerTemp)); 1507 rl_src = LoadValueWide(rl_src, kCoreReg); 1508 1509 // Operate directly into memory. 1510 X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false); 1511 int r_base = rs_rX86_SP.GetReg(); 1512 int displacement = SRegOffset(rl_dest.s_reg_low); 1513 1514 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 1515 LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, 1516 cu_->target64 ? rl_src.reg.GetReg() : rl_src.reg.GetLowReg()); 1517 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 1518 true /* is_load */, true /* is64bit */); 1519 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 1520 false /* is_load */, true /* is64bit */); 1521 if (!cu_->target64) { 1522 x86op = GetOpcode(op, rl_dest, rl_src, true); 1523 lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg()); 1524 AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, 1525 true /* is_load */, true /* is64bit */); 1526 AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, 1527 false /* is_load */, true /* is64bit */); 1528 } 1529 FreeTemp(rl_src.reg); 1530} 1531 1532void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1, 1533 RegLocation rl_src2, Instruction::Code op, 1534 bool is_commutative) { 1535 // Is this really a 2 operand operation? 1536 switch (op) { 1537 case Instruction::ADD_LONG_2ADDR: 1538 case Instruction::SUB_LONG_2ADDR: 1539 case Instruction::AND_LONG_2ADDR: 1540 case Instruction::OR_LONG_2ADDR: 1541 case Instruction::XOR_LONG_2ADDR: 1542 if (GenerateTwoOperandInstructions()) { 1543 GenLongArith(rl_dest, rl_src2, op); 1544 return; 1545 } 1546 break; 1547 1548 default: 1549 break; 1550 } 1551 1552 if (rl_dest.location == kLocPhysReg) { 1553 RegLocation rl_result = LoadValueWide(rl_src1, kCoreReg); 1554 1555 // We are about to clobber the LHS, so it needs to be a temp. 1556 rl_result = ForceTempWide(rl_result); 1557 1558 // Perform the operation using the RHS. 1559 rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg); 1560 GenLongRegOrMemOp(rl_result, rl_src2, op); 1561 1562 // And now record that the result is in the temp. 1563 StoreFinalValueWide(rl_dest, rl_result); 1564 return; 1565 } 1566 1567 // It wasn't in registers, so it better be in memory. 1568 DCHECK((rl_dest.location == kLocDalvikFrame) || 1569 (rl_dest.location == kLocCompilerTemp)); 1570 rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg); 1571 rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg); 1572 1573 // Get one of the source operands into temporary register. 1574 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1575 if (cu_->target64) { 1576 if (IsTemp(rl_src1.reg)) { 1577 GenLongRegOrMemOp(rl_src1, rl_src2, op); 1578 } else if (is_commutative) { 1579 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1580 // We need at least one of them to be a temporary. 1581 if (!IsTemp(rl_src2.reg)) { 1582 rl_src1 = ForceTempWide(rl_src1); 1583 GenLongRegOrMemOp(rl_src1, rl_src2, op); 1584 } else { 1585 GenLongRegOrMemOp(rl_src2, rl_src1, op); 1586 StoreFinalValueWide(rl_dest, rl_src2); 1587 return; 1588 } 1589 } else { 1590 // Need LHS to be the temp. 1591 rl_src1 = ForceTempWide(rl_src1); 1592 GenLongRegOrMemOp(rl_src1, rl_src2, op); 1593 } 1594 } else { 1595 if (IsTemp(rl_src1.reg.GetLow()) && IsTemp(rl_src1.reg.GetHigh())) { 1596 GenLongRegOrMemOp(rl_src1, rl_src2, op); 1597 } else if (is_commutative) { 1598 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1599 // We need at least one of them to be a temporary. 1600 if (!(IsTemp(rl_src2.reg.GetLow()) && IsTemp(rl_src2.reg.GetHigh()))) { 1601 rl_src1 = ForceTempWide(rl_src1); 1602 GenLongRegOrMemOp(rl_src1, rl_src2, op); 1603 } else { 1604 GenLongRegOrMemOp(rl_src2, rl_src1, op); 1605 StoreFinalValueWide(rl_dest, rl_src2); 1606 return; 1607 } 1608 } else { 1609 // Need LHS to be the temp. 1610 rl_src1 = ForceTempWide(rl_src1); 1611 GenLongRegOrMemOp(rl_src1, rl_src2, op); 1612 } 1613 } 1614 1615 StoreFinalValueWide(rl_dest, rl_src1); 1616} 1617 1618void X86Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, 1619 RegLocation rl_src1, RegLocation rl_src2) { 1620 GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true); 1621} 1622 1623void X86Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, 1624 RegLocation rl_src1, RegLocation rl_src2) { 1625 GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false); 1626} 1627 1628void X86Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, 1629 RegLocation rl_src1, RegLocation rl_src2) { 1630 GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true); 1631} 1632 1633void X86Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, 1634 RegLocation rl_src1, RegLocation rl_src2) { 1635 GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true); 1636} 1637 1638void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, 1639 RegLocation rl_src1, RegLocation rl_src2) { 1640 GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true); 1641} 1642 1643void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) { 1644 if (cu_->target64) { 1645 rl_src = LoadValueWide(rl_src, kCoreReg); 1646 RegLocation rl_result; 1647 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1648 OpRegCopy(rl_result.reg, rl_src.reg); 1649 OpReg(kOpNot, rl_result.reg); 1650 StoreValueWide(rl_dest, rl_result); 1651 } else { 1652 LOG(FATAL) << "Unexpected use GenNotLong()"; 1653 } 1654} 1655 1656void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1, 1657 RegLocation rl_src2, bool is_div) { 1658 if (!cu_->target64) { 1659 LOG(FATAL) << "Unexpected use GenDivRemLong()"; 1660 return; 1661 } 1662 1663 // We have to use fixed registers, so flush all the temps. 1664 FlushAllRegs(); 1665 LockCallTemps(); // Prepare for explicit register usage. 1666 1667 // Load LHS into RAX. 1668 LoadValueDirectWideFixed(rl_src1, rs_r0q); 1669 1670 // Load RHS into RCX. 1671 LoadValueDirectWideFixed(rl_src2, rs_r1q); 1672 1673 // Copy LHS sign bit into RDX. 1674 NewLIR0(kx86Cqo64Da); 1675 1676 // Handle division by zero case. 1677 GenDivZeroCheckWide(rs_r1q); 1678 1679 // Have to catch 0x8000000000000000/-1 case, or we will get an exception! 1680 NewLIR2(kX86Cmp64RI8, rs_r1q.GetReg(), -1); 1681 LIR *minus_one_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe); 1682 1683 // RHS is -1. 1684 LoadConstantWide(rs_r6q, 0x8000000000000000); 1685 NewLIR2(kX86Cmp64RR, rs_r0q.GetReg(), rs_r6q.GetReg()); 1686 LIR * minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe); 1687 1688 // In 0x8000000000000000/-1 case. 1689 if (!is_div) { 1690 // For DIV, RAX is already right. For REM, we need RDX 0. 1691 NewLIR2(kX86Xor64RR, rs_r2q.GetReg(), rs_r2q.GetReg()); 1692 } 1693 LIR* done = NewLIR1(kX86Jmp8, 0); 1694 1695 // Expected case. 1696 minus_one_branch->target = NewLIR0(kPseudoTargetLabel); 1697 minint_branch->target = minus_one_branch->target; 1698 NewLIR1(kX86Idivmod64DaR, rs_r1q.GetReg()); 1699 done->target = NewLIR0(kPseudoTargetLabel); 1700 1701 // Result is in RAX for div and RDX for rem. 1702 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_r0q, INVALID_SREG, INVALID_SREG}; 1703 if (!is_div) { 1704 rl_result.reg.SetReg(r2q); 1705 } 1706 1707 StoreValueWide(rl_dest, rl_result); 1708} 1709 1710void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { 1711 rl_src = LoadValueWide(rl_src, kCoreReg); 1712 RegLocation rl_result; 1713 if (cu_->target64) { 1714 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1715 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1716 } else { 1717 rl_result = ForceTempWide(rl_src); 1718 if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) && 1719 ((rl_dest.reg.GetLowReg() == rl_src.reg.GetHighReg()))) { 1720 // The registers are the same, so we would clobber it before the use. 1721 RegStorage temp_reg = AllocTemp(); 1722 OpRegCopy(temp_reg, rl_result.reg); 1723 rl_result.reg.SetHighReg(temp_reg.GetReg()); 1724 } 1725 OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_result.reg.GetLow()); // rLow = -rLow 1726 OpRegImm(kOpAdc, rl_result.reg.GetHigh(), 0); // rHigh = rHigh + CF 1727 OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_result.reg.GetHigh()); // rHigh = -rHigh 1728 } 1729 StoreValueWide(rl_dest, rl_result); 1730} 1731 1732void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset) { 1733 DCHECK_EQ(kX86, cu_->instruction_set); 1734 X86OpCode opcode = kX86Bkpt; 1735 switch (op) { 1736 case kOpCmp: opcode = kX86Cmp32RT; break; 1737 case kOpMov: opcode = kX86Mov32RT; break; 1738 default: 1739 LOG(FATAL) << "Bad opcode: " << op; 1740 break; 1741 } 1742 NewLIR2(opcode, r_dest.GetReg(), thread_offset.Int32Value()); 1743} 1744 1745void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset) { 1746 DCHECK_EQ(kX86_64, cu_->instruction_set); 1747 X86OpCode opcode = kX86Bkpt; 1748 if (cu_->target64 && r_dest.Is64BitSolo()) { 1749 switch (op) { 1750 case kOpCmp: opcode = kX86Cmp64RT; break; 1751 case kOpMov: opcode = kX86Mov64RT; break; 1752 default: 1753 LOG(FATAL) << "Bad opcode(OpRegThreadMem 64): " << op; 1754 break; 1755 } 1756 } else { 1757 switch (op) { 1758 case kOpCmp: opcode = kX86Cmp32RT; break; 1759 case kOpMov: opcode = kX86Mov32RT; break; 1760 default: 1761 LOG(FATAL) << "Bad opcode: " << op; 1762 break; 1763 } 1764 } 1765 NewLIR2(opcode, r_dest.GetReg(), thread_offset.Int32Value()); 1766} 1767 1768/* 1769 * Generate array load 1770 */ 1771void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1772 RegLocation rl_index, RegLocation rl_dest, int scale) { 1773 RegisterClass reg_class = RegClassBySize(size); 1774 int len_offset = mirror::Array::LengthOffset().Int32Value(); 1775 RegLocation rl_result; 1776 rl_array = LoadValue(rl_array, kRefReg); 1777 1778 int data_offset; 1779 if (size == k64 || size == kDouble) { 1780 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 1781 } else { 1782 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 1783 } 1784 1785 bool constant_index = rl_index.is_const; 1786 int32_t constant_index_value = 0; 1787 if (!constant_index) { 1788 rl_index = LoadValue(rl_index, kCoreReg); 1789 } else { 1790 constant_index_value = mir_graph_->ConstantValue(rl_index); 1791 // If index is constant, just fold it into the data offset 1792 data_offset += constant_index_value << scale; 1793 // treat as non array below 1794 rl_index.reg = RegStorage::InvalidReg(); 1795 } 1796 1797 /* null object? */ 1798 GenNullCheck(rl_array.reg, opt_flags); 1799 1800 if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) { 1801 if (constant_index) { 1802 GenArrayBoundsCheck(constant_index_value, rl_array.reg, len_offset); 1803 } else { 1804 GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset); 1805 } 1806 } 1807 rl_result = EvalLoc(rl_dest, reg_class, true); 1808 LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg, size); 1809 if ((size == k64) || (size == kDouble)) { 1810 StoreValueWide(rl_dest, rl_result); 1811 } else { 1812 StoreValue(rl_dest, rl_result); 1813 } 1814} 1815 1816/* 1817 * Generate array store 1818 * 1819 */ 1820void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1821 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) { 1822 RegisterClass reg_class = RegClassBySize(size); 1823 int len_offset = mirror::Array::LengthOffset().Int32Value(); 1824 int data_offset; 1825 1826 if (size == k64 || size == kDouble) { 1827 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value(); 1828 } else { 1829 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value(); 1830 } 1831 1832 rl_array = LoadValue(rl_array, kRefReg); 1833 bool constant_index = rl_index.is_const; 1834 int32_t constant_index_value = 0; 1835 if (!constant_index) { 1836 rl_index = LoadValue(rl_index, kCoreReg); 1837 } else { 1838 // If index is constant, just fold it into the data offset 1839 constant_index_value = mir_graph_->ConstantValue(rl_index); 1840 data_offset += constant_index_value << scale; 1841 // treat as non array below 1842 rl_index.reg = RegStorage::InvalidReg(); 1843 } 1844 1845 /* null object? */ 1846 GenNullCheck(rl_array.reg, opt_flags); 1847 1848 if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) { 1849 if (constant_index) { 1850 GenArrayBoundsCheck(constant_index_value, rl_array.reg, len_offset); 1851 } else { 1852 GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset); 1853 } 1854 } 1855 if ((size == k64) || (size == kDouble)) { 1856 rl_src = LoadValueWide(rl_src, reg_class); 1857 } else { 1858 rl_src = LoadValue(rl_src, reg_class); 1859 } 1860 // If the src reg can't be byte accessed, move it to a temp first. 1861 if ((size == kSignedByte || size == kUnsignedByte) && !IsByteRegister(rl_src.reg)) { 1862 RegStorage temp = AllocTemp(); 1863 OpRegCopy(temp, rl_src.reg); 1864 StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size); 1865 } else { 1866 StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size); 1867 } 1868 if (card_mark) { 1869 // Free rl_index if its a temp. Ensures there are 2 free regs for card mark. 1870 if (!constant_index) { 1871 FreeTemp(rl_index.reg); 1872 } 1873 MarkGCCard(rl_src.reg, rl_array.reg); 1874 } 1875} 1876 1877RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1878 RegLocation rl_src, int shift_amount) { 1879 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 1880 if (cu_->target64) { 1881 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1882 switch (opcode) { 1883 case Instruction::SHL_LONG: 1884 case Instruction::SHL_LONG_2ADDR: 1885 op = kOpLsl; 1886 break; 1887 case Instruction::SHR_LONG: 1888 case Instruction::SHR_LONG_2ADDR: 1889 op = kOpAsr; 1890 break; 1891 case Instruction::USHR_LONG: 1892 case Instruction::USHR_LONG_2ADDR: 1893 op = kOpLsr; 1894 break; 1895 default: 1896 LOG(FATAL) << "Unexpected case"; 1897 } 1898 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount); 1899 } else { 1900 switch (opcode) { 1901 case Instruction::SHL_LONG: 1902 case Instruction::SHL_LONG_2ADDR: 1903 DCHECK_NE(shift_amount, 1); // Prevent a double store from happening. 1904 if (shift_amount == 32) { 1905 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow()); 1906 LoadConstant(rl_result.reg.GetLow(), 0); 1907 } else if (shift_amount > 31) { 1908 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetLow()); 1909 NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32); 1910 LoadConstant(rl_result.reg.GetLow(), 0); 1911 } else { 1912 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow()); 1913 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh()); 1914 NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(), 1915 shift_amount); 1916 NewLIR2(kX86Sal32RI, rl_result.reg.GetLowReg(), shift_amount); 1917 } 1918 break; 1919 case Instruction::SHR_LONG: 1920 case Instruction::SHR_LONG_2ADDR: 1921 if (shift_amount == 32) { 1922 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh()); 1923 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh()); 1924 NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31); 1925 } else if (shift_amount > 31) { 1926 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh()); 1927 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh()); 1928 NewLIR2(kX86Sar32RI, rl_result.reg.GetLowReg(), shift_amount - 32); 1929 NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31); 1930 } else { 1931 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow()); 1932 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh()); 1933 NewLIR3(kX86Shrd32RRI, rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg(), 1934 shift_amount); 1935 NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount); 1936 } 1937 break; 1938 case Instruction::USHR_LONG: 1939 case Instruction::USHR_LONG_2ADDR: 1940 if (shift_amount == 32) { 1941 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh()); 1942 LoadConstant(rl_result.reg.GetHigh(), 0); 1943 } else if (shift_amount > 31) { 1944 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh()); 1945 NewLIR2(kX86Shr32RI, rl_result.reg.GetLowReg(), shift_amount - 32); 1946 LoadConstant(rl_result.reg.GetHigh(), 0); 1947 } else { 1948 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow()); 1949 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg.GetHigh()); 1950 NewLIR3(kX86Shrd32RRI, rl_result.reg.GetLowReg(), rl_result.reg.GetHighReg(), 1951 shift_amount); 1952 NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount); 1953 } 1954 break; 1955 default: 1956 LOG(FATAL) << "Unexpected case"; 1957 } 1958 } 1959 return rl_result; 1960} 1961 1962void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1963 RegLocation rl_src, RegLocation rl_shift) { 1964 // Per spec, we only care about low 6 bits of shift amount. 1965 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f; 1966 if (shift_amount == 0) { 1967 rl_src = LoadValueWide(rl_src, kCoreReg); 1968 StoreValueWide(rl_dest, rl_src); 1969 return; 1970 } else if (shift_amount == 1 && 1971 (opcode == Instruction::SHL_LONG || opcode == Instruction::SHL_LONG_2ADDR)) { 1972 // Need to handle this here to avoid calling StoreValueWide twice. 1973 GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src); 1974 return; 1975 } 1976 if (BadOverlap(rl_src, rl_dest)) { 1977 GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift); 1978 return; 1979 } 1980 rl_src = LoadValueWide(rl_src, kCoreReg); 1981 RegLocation rl_result = GenShiftImmOpLong(opcode, rl_dest, rl_src, shift_amount); 1982 StoreValueWide(rl_dest, rl_result); 1983} 1984 1985void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, 1986 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { 1987 bool isConstSuccess = false; 1988 switch (opcode) { 1989 case Instruction::ADD_LONG: 1990 case Instruction::AND_LONG: 1991 case Instruction::OR_LONG: 1992 case Instruction::XOR_LONG: 1993 if (rl_src2.is_const) { 1994 isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode); 1995 } else { 1996 DCHECK(rl_src1.is_const); 1997 isConstSuccess = GenLongLongImm(rl_dest, rl_src2, rl_src1, opcode); 1998 } 1999 break; 2000 case Instruction::SUB_LONG: 2001 case Instruction::SUB_LONG_2ADDR: 2002 if (rl_src2.is_const) { 2003 isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode); 2004 } else { 2005 GenSubLong(opcode, rl_dest, rl_src1, rl_src2); 2006 isConstSuccess = true; 2007 } 2008 break; 2009 case Instruction::ADD_LONG_2ADDR: 2010 case Instruction::OR_LONG_2ADDR: 2011 case Instruction::XOR_LONG_2ADDR: 2012 case Instruction::AND_LONG_2ADDR: 2013 if (rl_src2.is_const) { 2014 if (GenerateTwoOperandInstructions()) { 2015 isConstSuccess = GenLongImm(rl_dest, rl_src2, opcode); 2016 } else { 2017 isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode); 2018 } 2019 } else { 2020 DCHECK(rl_src1.is_const); 2021 isConstSuccess = GenLongLongImm(rl_dest, rl_src2, rl_src1, opcode); 2022 } 2023 break; 2024 default: 2025 isConstSuccess = false; 2026 break; 2027 } 2028 2029 if (!isConstSuccess) { 2030 // Default - bail to non-const handler. 2031 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2); 2032 } 2033} 2034 2035bool X86Mir2Lir::IsNoOp(Instruction::Code op, int32_t value) { 2036 switch (op) { 2037 case Instruction::AND_LONG_2ADDR: 2038 case Instruction::AND_LONG: 2039 return value == -1; 2040 case Instruction::OR_LONG: 2041 case Instruction::OR_LONG_2ADDR: 2042 case Instruction::XOR_LONG: 2043 case Instruction::XOR_LONG_2ADDR: 2044 return value == 0; 2045 default: 2046 return false; 2047 } 2048} 2049 2050X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation dest, RegLocation rhs, 2051 bool is_high_op) { 2052 bool rhs_in_mem = rhs.location != kLocPhysReg; 2053 bool dest_in_mem = dest.location != kLocPhysReg; 2054 bool is64Bit = cu_->target64; 2055 DCHECK(!rhs_in_mem || !dest_in_mem); 2056 switch (op) { 2057 case Instruction::ADD_LONG: 2058 case Instruction::ADD_LONG_2ADDR: 2059 if (dest_in_mem) { 2060 return is64Bit ? kX86Add64MR : is_high_op ? kX86Adc32MR : kX86Add32MR; 2061 } else if (rhs_in_mem) { 2062 return is64Bit ? kX86Add64RM : is_high_op ? kX86Adc32RM : kX86Add32RM; 2063 } 2064 return is64Bit ? kX86Add64RR : is_high_op ? kX86Adc32RR : kX86Add32RR; 2065 case Instruction::SUB_LONG: 2066 case Instruction::SUB_LONG_2ADDR: 2067 if (dest_in_mem) { 2068 return is64Bit ? kX86Sub64MR : is_high_op ? kX86Sbb32MR : kX86Sub32MR; 2069 } else if (rhs_in_mem) { 2070 return is64Bit ? kX86Sub64RM : is_high_op ? kX86Sbb32RM : kX86Sub32RM; 2071 } 2072 return is64Bit ? kX86Sub64RR : is_high_op ? kX86Sbb32RR : kX86Sub32RR; 2073 case Instruction::AND_LONG_2ADDR: 2074 case Instruction::AND_LONG: 2075 if (dest_in_mem) { 2076 return is64Bit ? kX86And64MR : kX86And32MR; 2077 } 2078 if (is64Bit) { 2079 return rhs_in_mem ? kX86And64RM : kX86And64RR; 2080 } 2081 return rhs_in_mem ? kX86And32RM : kX86And32RR; 2082 case Instruction::OR_LONG: 2083 case Instruction::OR_LONG_2ADDR: 2084 if (dest_in_mem) { 2085 return is64Bit ? kX86Or64MR : kX86Or32MR; 2086 } 2087 if (is64Bit) { 2088 return rhs_in_mem ? kX86Or64RM : kX86Or64RR; 2089 } 2090 return rhs_in_mem ? kX86Or32RM : kX86Or32RR; 2091 case Instruction::XOR_LONG: 2092 case Instruction::XOR_LONG_2ADDR: 2093 if (dest_in_mem) { 2094 return is64Bit ? kX86Xor64MR : kX86Xor32MR; 2095 } 2096 if (is64Bit) { 2097 return rhs_in_mem ? kX86Xor64RM : kX86Xor64RR; 2098 } 2099 return rhs_in_mem ? kX86Xor32RM : kX86Xor32RR; 2100 default: 2101 LOG(FATAL) << "Unexpected opcode: " << op; 2102 return kX86Add32RR; 2103 } 2104} 2105 2106X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op, 2107 int32_t value) { 2108 bool in_mem = loc.location != kLocPhysReg; 2109 bool is64Bit = cu_->target64; 2110 bool byte_imm = IS_SIMM8(value); 2111 DCHECK(in_mem || !loc.reg.IsFloat()); 2112 switch (op) { 2113 case Instruction::ADD_LONG: 2114 case Instruction::ADD_LONG_2ADDR: 2115 if (byte_imm) { 2116 if (in_mem) { 2117 return is64Bit ? kX86Add64MI8 : is_high_op ? kX86Adc32MI8 : kX86Add32MI8; 2118 } 2119 return is64Bit ? kX86Add64RI8 : is_high_op ? kX86Adc32RI8 : kX86Add32RI8; 2120 } 2121 if (in_mem) { 2122 return is64Bit ? kX86Add64MI : is_high_op ? kX86Adc32MI : kX86Add32MI; 2123 } 2124 return is64Bit ? kX86Add64RI : is_high_op ? kX86Adc32RI : kX86Add32RI; 2125 case Instruction::SUB_LONG: 2126 case Instruction::SUB_LONG_2ADDR: 2127 if (byte_imm) { 2128 if (in_mem) { 2129 return is64Bit ? kX86Sub64MI8 : is_high_op ? kX86Sbb32MI8 : kX86Sub32MI8; 2130 } 2131 return is64Bit ? kX86Sub64RI8 : is_high_op ? kX86Sbb32RI8 : kX86Sub32RI8; 2132 } 2133 if (in_mem) { 2134 return is64Bit ? kX86Sub64MI : is_high_op ? kX86Sbb32MI : kX86Sub32MI; 2135 } 2136 return is64Bit ? kX86Sub64RI : is_high_op ? kX86Sbb32RI : kX86Sub32RI; 2137 case Instruction::AND_LONG_2ADDR: 2138 case Instruction::AND_LONG: 2139 if (byte_imm) { 2140 if (is64Bit) { 2141 return in_mem ? kX86And64MI8 : kX86And64RI8; 2142 } 2143 return in_mem ? kX86And32MI8 : kX86And32RI8; 2144 } 2145 if (is64Bit) { 2146 return in_mem ? kX86And64MI : kX86And64RI; 2147 } 2148 return in_mem ? kX86And32MI : kX86And32RI; 2149 case Instruction::OR_LONG: 2150 case Instruction::OR_LONG_2ADDR: 2151 if (byte_imm) { 2152 if (is64Bit) { 2153 return in_mem ? kX86Or64MI8 : kX86Or64RI8; 2154 } 2155 return in_mem ? kX86Or32MI8 : kX86Or32RI8; 2156 } 2157 if (is64Bit) { 2158 return in_mem ? kX86Or64MI : kX86Or64RI; 2159 } 2160 return in_mem ? kX86Or32MI : kX86Or32RI; 2161 case Instruction::XOR_LONG: 2162 case Instruction::XOR_LONG_2ADDR: 2163 if (byte_imm) { 2164 if (is64Bit) { 2165 return in_mem ? kX86Xor64MI8 : kX86Xor64RI8; 2166 } 2167 return in_mem ? kX86Xor32MI8 : kX86Xor32RI8; 2168 } 2169 if (is64Bit) { 2170 return in_mem ? kX86Xor64MI : kX86Xor64RI; 2171 } 2172 return in_mem ? kX86Xor32MI : kX86Xor32RI; 2173 default: 2174 LOG(FATAL) << "Unexpected opcode: " << op; 2175 return kX86Add32MI; 2176 } 2177} 2178 2179bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) { 2180 DCHECK(rl_src.is_const); 2181 int64_t val = mir_graph_->ConstantValueWide(rl_src); 2182 2183 if (cu_->target64) { 2184 // We can do with imm only if it fits 32 bit 2185 if (val != (static_cast<int64_t>(static_cast<int32_t>(val)))) { 2186 return false; 2187 } 2188 2189 rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg); 2190 2191 if ((rl_dest.location == kLocDalvikFrame) || 2192 (rl_dest.location == kLocCompilerTemp)) { 2193 int r_base = rs_rX86_SP.GetReg(); 2194 int displacement = SRegOffset(rl_dest.s_reg_low); 2195 2196 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2197 X86OpCode x86op = GetOpcode(op, rl_dest, false, val); 2198 LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val); 2199 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 2200 true /* is_load */, true /* is64bit */); 2201 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 2202 false /* is_load */, true /* is64bit */); 2203 return true; 2204 } 2205 2206 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 2207 DCHECK_EQ(rl_result.location, kLocPhysReg); 2208 DCHECK(!rl_result.reg.IsFloat()); 2209 2210 X86OpCode x86op = GetOpcode(op, rl_result, false, val); 2211 NewLIR2(x86op, rl_result.reg.GetReg(), val); 2212 2213 StoreValueWide(rl_dest, rl_result); 2214 return true; 2215 } 2216 2217 int32_t val_lo = Low32Bits(val); 2218 int32_t val_hi = High32Bits(val); 2219 rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg); 2220 2221 // Can we just do this into memory? 2222 if ((rl_dest.location == kLocDalvikFrame) || 2223 (rl_dest.location == kLocCompilerTemp)) { 2224 int r_base = rs_rX86_SP.GetReg(); 2225 int displacement = SRegOffset(rl_dest.s_reg_low); 2226 2227 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2228 if (!IsNoOp(op, val_lo)) { 2229 X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo); 2230 LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val_lo); 2231 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 2232 true /* is_load */, true /* is64bit */); 2233 AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, 2234 false /* is_load */, true /* is64bit */); 2235 } 2236 if (!IsNoOp(op, val_hi)) { 2237 X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi); 2238 LIR *lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, val_hi); 2239 AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, 2240 true /* is_load */, true /* is64bit */); 2241 AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, 2242 false /* is_load */, true /* is64bit */); 2243 } 2244 return true; 2245 } 2246 2247 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); 2248 DCHECK_EQ(rl_result.location, kLocPhysReg); 2249 DCHECK(!rl_result.reg.IsFloat()); 2250 2251 if (!IsNoOp(op, val_lo)) { 2252 X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo); 2253 NewLIR2(x86op, rl_result.reg.GetLowReg(), val_lo); 2254 } 2255 if (!IsNoOp(op, val_hi)) { 2256 X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi); 2257 NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi); 2258 } 2259 StoreValueWide(rl_dest, rl_result); 2260 return true; 2261} 2262 2263bool X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, 2264 RegLocation rl_src2, Instruction::Code op) { 2265 DCHECK(rl_src2.is_const); 2266 int64_t val = mir_graph_->ConstantValueWide(rl_src2); 2267 2268 if (cu_->target64) { 2269 // We can do with imm only if it fits 32 bit 2270 if (val != (static_cast<int64_t>(static_cast<int32_t>(val)))) { 2271 return false; 2272 } 2273 if (rl_dest.location == kLocPhysReg && 2274 rl_src1.location == kLocPhysReg && !rl_dest.reg.IsFloat()) { 2275 X86OpCode x86op = GetOpcode(op, rl_dest, false, val); 2276 OpRegCopy(rl_dest.reg, rl_src1.reg); 2277 NewLIR2(x86op, rl_dest.reg.GetReg(), val); 2278 StoreFinalValueWide(rl_dest, rl_dest); 2279 return true; 2280 } 2281 2282 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 2283 // We need the values to be in a temporary 2284 RegLocation rl_result = ForceTempWide(rl_src1); 2285 2286 X86OpCode x86op = GetOpcode(op, rl_result, false, val); 2287 NewLIR2(x86op, rl_result.reg.GetReg(), val); 2288 2289 StoreFinalValueWide(rl_dest, rl_result); 2290 return true; 2291 } 2292 2293 int32_t val_lo = Low32Bits(val); 2294 int32_t val_hi = High32Bits(val); 2295 rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg); 2296 rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg); 2297 2298 // Can we do this directly into the destination registers? 2299 if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg && 2300 rl_dest.reg.GetLowReg() == rl_src1.reg.GetLowReg() && 2301 rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() && !rl_dest.reg.IsFloat()) { 2302 if (!IsNoOp(op, val_lo)) { 2303 X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo); 2304 NewLIR2(x86op, rl_dest.reg.GetLowReg(), val_lo); 2305 } 2306 if (!IsNoOp(op, val_hi)) { 2307 X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi); 2308 NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi); 2309 } 2310 2311 StoreFinalValueWide(rl_dest, rl_dest); 2312 return true; 2313 } 2314 2315 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 2316 DCHECK_EQ(rl_src1.location, kLocPhysReg); 2317 2318 // We need the values to be in a temporary 2319 RegLocation rl_result = ForceTempWide(rl_src1); 2320 if (!IsNoOp(op, val_lo)) { 2321 X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo); 2322 NewLIR2(x86op, rl_result.reg.GetLowReg(), val_lo); 2323 } 2324 if (!IsNoOp(op, val_hi)) { 2325 X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi); 2326 NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi); 2327 } 2328 2329 StoreFinalValueWide(rl_dest, rl_result); 2330 return true; 2331} 2332 2333// For final classes there are no sub-classes to check and so we can answer the instance-of 2334// question with simple comparisons. Use compares to memory and SETEQ to optimize for x86. 2335void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 2336 RegLocation rl_dest, RegLocation rl_src) { 2337 RegLocation object = LoadValue(rl_src, kRefReg); 2338 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 2339 RegStorage result_reg = rl_result.reg; 2340 2341 // For 32-bit, SETcc only works with EAX..EDX. 2342 RegStorage object_32reg = object.reg.Is64Bit() ? As32BitReg(object.reg) : object.reg; 2343 if (result_reg.GetRegNum() == object_32reg.GetRegNum() || !IsByteRegister(result_reg)) { 2344 result_reg = AllocateByteRegister(); 2345 } 2346 2347 // Assume that there is no match. 2348 LoadConstant(result_reg, 0); 2349 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 2350 2351 // We will use this register to compare to memory below. 2352 // References are 32 bit in memory, and 64 bit in registers (in 64 bit mode). 2353 // For this reason, force allocation of a 32 bit register to use, so that the 2354 // compare to memory will be done using a 32 bit comparision. 2355 // The LoadRefDisp(s) below will work normally, even in 64 bit mode. 2356 RegStorage check_class = AllocTemp(); 2357 2358 // If Method* is already in a register, we can save a copy. 2359 RegLocation rl_method = mir_graph_->GetMethodLoc(); 2360 int32_t offset_of_type = mirror::Array::DataOffset( 2361 sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + 2362 (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx); 2363 2364 if (rl_method.location == kLocPhysReg) { 2365 if (use_declaring_class) { 2366 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 2367 check_class, kNotVolatile); 2368 } else { 2369 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 2370 check_class, kNotVolatile); 2371 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 2372 } 2373 } else { 2374 LoadCurrMethodDirect(check_class); 2375 if (use_declaring_class) { 2376 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 2377 check_class, kNotVolatile); 2378 } else { 2379 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 2380 check_class, kNotVolatile); 2381 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 2382 } 2383 } 2384 2385 // Compare the computed class to the class in the object. 2386 DCHECK_EQ(object.location, kLocPhysReg); 2387 OpRegMem(kOpCmp, check_class, object.reg, mirror::Object::ClassOffset().Int32Value()); 2388 2389 // Set the low byte of the result to 0 or 1 from the compare condition code. 2390 NewLIR2(kX86Set8R, result_reg.GetReg(), kX86CondEq); 2391 2392 LIR* target = NewLIR0(kPseudoTargetLabel); 2393 null_branchover->target = target; 2394 FreeTemp(check_class); 2395 if (IsTemp(result_reg)) { 2396 OpRegCopy(rl_result.reg, result_reg); 2397 FreeTemp(result_reg); 2398 } 2399 StoreValue(rl_dest, rl_result); 2400} 2401 2402void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 2403 bool type_known_abstract, bool use_declaring_class, 2404 bool can_assume_type_is_in_dex_cache, 2405 uint32_t type_idx, RegLocation rl_dest, 2406 RegLocation rl_src) { 2407 FlushAllRegs(); 2408 // May generate a call - use explicit registers. 2409 LockCallTemps(); 2410 RegStorage method_reg = TargetReg(kArg1, kRef); // kArg1 gets current Method*. 2411 LoadCurrMethodDirect(method_reg); 2412 RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*. 2413 RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg2 will hold the ref. 2414 // Reference must end up in kArg0. 2415 if (needs_access_check) { 2416 // Check we have access to type_idx and if not throw IllegalAccessError, 2417 // Caller function returns Class* in kArg0. 2418 if (cu_->target64) { 2419 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess), 2420 type_idx, true); 2421 } else { 2422 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), 2423 type_idx, true); 2424 } 2425 OpRegCopy(class_reg, TargetReg(kRet0, kRef)); 2426 LoadValueDirectFixed(rl_src, ref_reg); 2427 } else if (use_declaring_class) { 2428 LoadValueDirectFixed(rl_src, ref_reg); 2429 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 2430 class_reg, kNotVolatile); 2431 } else { 2432 // Load dex cache entry into class_reg (kArg2). 2433 LoadValueDirectFixed(rl_src, ref_reg); 2434 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 2435 class_reg, kNotVolatile); 2436 int32_t offset_of_type = 2437 mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + 2438 (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx); 2439 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 2440 if (!can_assume_type_is_in_dex_cache) { 2441 // Need to test presence of type in dex cache at runtime. 2442 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); 2443 // Type is not resolved. Call out to helper, which will return resolved type in kRet0/kArg0. 2444 if (cu_->target64) { 2445 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true); 2446 } else { 2447 CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); 2448 } 2449 OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path. 2450 LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */ 2451 // Rejoin code paths 2452 LIR* hop_target = NewLIR0(kPseudoTargetLabel); 2453 hop_branch->target = hop_target; 2454 } 2455 } 2456 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result. */ 2457 RegLocation rl_result = GetReturn(kRefReg); 2458 2459 // On x86-64 kArg0 is not EAX, so we have to copy ref from kArg0 to EAX. 2460 if (cu_->target64) { 2461 OpRegCopy(rl_result.reg, ref_reg); 2462 } 2463 2464 // For 32-bit, SETcc only works with EAX..EDX. 2465 DCHECK_LT(rl_result.reg.GetRegNum(), 4); 2466 2467 // Is the class NULL? 2468 LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); 2469 2470 RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg2 will hold the Class*. 2471 /* Load object->klass_. */ 2472 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 2473 LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg, 2474 kNotVolatile); 2475 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */ 2476 LIR* branchover = nullptr; 2477 if (type_known_final) { 2478 // Ensure top 3 bytes of result are 0. 2479 LoadConstant(rl_result.reg, 0); 2480 OpRegReg(kOpCmp, ref_class_reg, class_reg); 2481 // Set the low byte of the result to 0 or 1 from the compare condition code. 2482 NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq); 2483 } else { 2484 if (!type_known_abstract) { 2485 LoadConstant(rl_result.reg, 1); // Assume result succeeds. 2486 branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL); 2487 } 2488 OpRegCopy(TargetReg(kArg0, kRef), class_reg); 2489 if (cu_->target64) { 2490 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)); 2491 } else { 2492 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); 2493 } 2494 } 2495 // TODO: only clobber when type isn't final? 2496 ClobberCallerSave(); 2497 /* Branch targets here. */ 2498 LIR* target = NewLIR0(kPseudoTargetLabel); 2499 StoreValue(rl_dest, rl_result); 2500 branch1->target = target; 2501 if (branchover != nullptr) { 2502 branchover->target = target; 2503 } 2504} 2505 2506void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 2507 RegLocation rl_lhs, RegLocation rl_rhs) { 2508 OpKind op = kOpBkpt; 2509 bool is_div_rem = false; 2510 bool unary = false; 2511 bool shift_op = false; 2512 bool is_two_addr = false; 2513 RegLocation rl_result; 2514 switch (opcode) { 2515 case Instruction::NEG_INT: 2516 op = kOpNeg; 2517 unary = true; 2518 break; 2519 case Instruction::NOT_INT: 2520 op = kOpMvn; 2521 unary = true; 2522 break; 2523 case Instruction::ADD_INT_2ADDR: 2524 is_two_addr = true; 2525 // Fallthrough 2526 case Instruction::ADD_INT: 2527 op = kOpAdd; 2528 break; 2529 case Instruction::SUB_INT_2ADDR: 2530 is_two_addr = true; 2531 // Fallthrough 2532 case Instruction::SUB_INT: 2533 op = kOpSub; 2534 break; 2535 case Instruction::MUL_INT_2ADDR: 2536 is_two_addr = true; 2537 // Fallthrough 2538 case Instruction::MUL_INT: 2539 op = kOpMul; 2540 break; 2541 case Instruction::DIV_INT_2ADDR: 2542 is_two_addr = true; 2543 // Fallthrough 2544 case Instruction::DIV_INT: 2545 op = kOpDiv; 2546 is_div_rem = true; 2547 break; 2548 /* NOTE: returns in kArg1 */ 2549 case Instruction::REM_INT_2ADDR: 2550 is_two_addr = true; 2551 // Fallthrough 2552 case Instruction::REM_INT: 2553 op = kOpRem; 2554 is_div_rem = true; 2555 break; 2556 case Instruction::AND_INT_2ADDR: 2557 is_two_addr = true; 2558 // Fallthrough 2559 case Instruction::AND_INT: 2560 op = kOpAnd; 2561 break; 2562 case Instruction::OR_INT_2ADDR: 2563 is_two_addr = true; 2564 // Fallthrough 2565 case Instruction::OR_INT: 2566 op = kOpOr; 2567 break; 2568 case Instruction::XOR_INT_2ADDR: 2569 is_two_addr = true; 2570 // Fallthrough 2571 case Instruction::XOR_INT: 2572 op = kOpXor; 2573 break; 2574 case Instruction::SHL_INT_2ADDR: 2575 is_two_addr = true; 2576 // Fallthrough 2577 case Instruction::SHL_INT: 2578 shift_op = true; 2579 op = kOpLsl; 2580 break; 2581 case Instruction::SHR_INT_2ADDR: 2582 is_two_addr = true; 2583 // Fallthrough 2584 case Instruction::SHR_INT: 2585 shift_op = true; 2586 op = kOpAsr; 2587 break; 2588 case Instruction::USHR_INT_2ADDR: 2589 is_two_addr = true; 2590 // Fallthrough 2591 case Instruction::USHR_INT: 2592 shift_op = true; 2593 op = kOpLsr; 2594 break; 2595 default: 2596 LOG(FATAL) << "Invalid word arith op: " << opcode; 2597 } 2598 2599 // Can we convert to a two address instruction? 2600 if (!is_two_addr && 2601 (mir_graph_->SRegToVReg(rl_dest.s_reg_low) == 2602 mir_graph_->SRegToVReg(rl_lhs.s_reg_low))) { 2603 is_two_addr = true; 2604 } 2605 2606 if (!GenerateTwoOperandInstructions()) { 2607 is_two_addr = false; 2608 } 2609 2610 // Get the div/rem stuff out of the way. 2611 if (is_div_rem) { 2612 rl_result = GenDivRem(rl_dest, rl_lhs, rl_rhs, op == kOpDiv, true); 2613 StoreValue(rl_dest, rl_result); 2614 return; 2615 } 2616 2617 // If we generate any memory access below, it will reference a dalvik reg. 2618 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2619 2620 if (unary) { 2621 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2622 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2623 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2624 OpRegReg(op, rl_result.reg, rl_lhs.reg); 2625 } else { 2626 if (shift_op) { 2627 // X86 doesn't require masking and must use ECX. 2628 RegStorage t_reg = TargetReg(kCount, kNotWide); // rCX 2629 LoadValueDirectFixed(rl_rhs, t_reg); 2630 if (is_two_addr) { 2631 // Can we do this directly into memory? 2632 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2633 rl_rhs = LoadValue(rl_rhs, kCoreReg); 2634 if (rl_result.location != kLocPhysReg) { 2635 // Okay, we can do this into memory 2636 OpMemReg(op, rl_result, t_reg.GetReg()); 2637 FreeTemp(t_reg); 2638 return; 2639 } else if (!rl_result.reg.IsFloat()) { 2640 // Can do this directly into the result register 2641 OpRegReg(op, rl_result.reg, t_reg); 2642 FreeTemp(t_reg); 2643 StoreFinalValue(rl_dest, rl_result); 2644 return; 2645 } 2646 } 2647 // Three address form, or we can't do directly. 2648 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2649 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2650 OpRegRegReg(op, rl_result.reg, rl_lhs.reg, t_reg); 2651 FreeTemp(t_reg); 2652 } else { 2653 // Multiply is 3 operand only (sort of). 2654 if (is_two_addr && op != kOpMul) { 2655 // Can we do this directly into memory? 2656 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2657 if (rl_result.location == kLocPhysReg) { 2658 // Ensure res is in a core reg 2659 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2660 // Can we do this from memory directly? 2661 rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg); 2662 if (rl_rhs.location != kLocPhysReg) { 2663 OpRegMem(op, rl_result.reg, rl_rhs); 2664 StoreFinalValue(rl_dest, rl_result); 2665 return; 2666 } else if (!rl_rhs.reg.IsFloat()) { 2667 OpRegReg(op, rl_result.reg, rl_rhs.reg); 2668 StoreFinalValue(rl_dest, rl_result); 2669 return; 2670 } 2671 } 2672 rl_rhs = LoadValue(rl_rhs, kCoreReg); 2673 // It might happen rl_rhs and rl_dest are the same VR 2674 // in this case rl_dest is in reg after LoadValue while 2675 // rl_result is not updated yet, so do this 2676 rl_result = UpdateLocTyped(rl_dest, kCoreReg); 2677 if (rl_result.location != kLocPhysReg) { 2678 // Okay, we can do this into memory. 2679 OpMemReg(op, rl_result, rl_rhs.reg.GetReg()); 2680 return; 2681 } else if (!rl_result.reg.IsFloat()) { 2682 // Can do this directly into the result register. 2683 OpRegReg(op, rl_result.reg, rl_rhs.reg); 2684 StoreFinalValue(rl_dest, rl_result); 2685 return; 2686 } else { 2687 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2688 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2689 OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg); 2690 } 2691 } else { 2692 // Try to use reg/memory instructions. 2693 rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg); 2694 rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg); 2695 // We can't optimize with FP registers. 2696 if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) { 2697 // Something is difficult, so fall back to the standard case. 2698 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2699 rl_rhs = LoadValue(rl_rhs, kCoreReg); 2700 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2701 OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg); 2702 } else { 2703 // We can optimize by moving to result and using memory operands. 2704 if (rl_rhs.location != kLocPhysReg) { 2705 // Force LHS into result. 2706 // We should be careful with order here 2707 // If rl_dest and rl_lhs points to the same VR we should load first 2708 // If the are different we should find a register first for dest 2709 if (mir_graph_->SRegToVReg(rl_dest.s_reg_low) == 2710 mir_graph_->SRegToVReg(rl_lhs.s_reg_low)) { 2711 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2712 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2713 // No-op if these are the same. 2714 OpRegCopy(rl_result.reg, rl_lhs.reg); 2715 } else { 2716 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2717 LoadValueDirect(rl_lhs, rl_result.reg); 2718 } 2719 OpRegMem(op, rl_result.reg, rl_rhs); 2720 } else if (rl_lhs.location != kLocPhysReg) { 2721 // RHS is in a register; LHS is in memory. 2722 if (op != kOpSub) { 2723 // Force RHS into result and operate on memory. 2724 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2725 OpRegCopy(rl_result.reg, rl_rhs.reg); 2726 OpRegMem(op, rl_result.reg, rl_lhs); 2727 } else { 2728 // Subtraction isn't commutative. 2729 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2730 rl_rhs = LoadValue(rl_rhs, kCoreReg); 2731 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2732 OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg); 2733 } 2734 } else { 2735 // Both are in registers. 2736 rl_lhs = LoadValue(rl_lhs, kCoreReg); 2737 rl_rhs = LoadValue(rl_rhs, kCoreReg); 2738 rl_result = EvalLoc(rl_dest, kCoreReg, true); 2739 OpRegRegReg(op, rl_result.reg, rl_lhs.reg, rl_rhs.reg); 2740 } 2741 } 2742 } 2743 } 2744 } 2745 StoreValue(rl_dest, rl_result); 2746} 2747 2748bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) { 2749 // If we have non-core registers, then we can't do good things. 2750 if (rl_lhs.location == kLocPhysReg && rl_lhs.reg.IsFloat()) { 2751 return false; 2752 } 2753 if (rl_rhs.location == kLocPhysReg && rl_rhs.reg.IsFloat()) { 2754 return false; 2755 } 2756 2757 // Everything will be fine :-). 2758 return true; 2759} 2760 2761void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 2762 if (!cu_->target64) { 2763 Mir2Lir::GenIntToLong(rl_dest, rl_src); 2764 return; 2765 } 2766 rl_src = UpdateLocTyped(rl_src, kCoreReg); 2767 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 2768 if (rl_src.location == kLocPhysReg) { 2769 NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg()); 2770 } else { 2771 int displacement = SRegOffset(rl_src.s_reg_low); 2772 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2773 LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP.GetReg(), 2774 displacement + LOWORD_OFFSET); 2775 AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, 2776 true /* is_load */, true /* is_64bit */); 2777 } 2778 StoreValueWide(rl_dest, rl_result); 2779} 2780 2781void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 2782 RegLocation rl_src1, RegLocation rl_shift) { 2783 if (!cu_->target64) { 2784 Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift); 2785 return; 2786 } 2787 2788 bool is_two_addr = false; 2789 OpKind op = kOpBkpt; 2790 RegLocation rl_result; 2791 2792 switch (opcode) { 2793 case Instruction::SHL_LONG_2ADDR: 2794 is_two_addr = true; 2795 // Fallthrough 2796 case Instruction::SHL_LONG: 2797 op = kOpLsl; 2798 break; 2799 case Instruction::SHR_LONG_2ADDR: 2800 is_two_addr = true; 2801 // Fallthrough 2802 case Instruction::SHR_LONG: 2803 op = kOpAsr; 2804 break; 2805 case Instruction::USHR_LONG_2ADDR: 2806 is_two_addr = true; 2807 // Fallthrough 2808 case Instruction::USHR_LONG: 2809 op = kOpLsr; 2810 break; 2811 default: 2812 op = kOpBkpt; 2813 } 2814 2815 // X86 doesn't require masking and must use ECX. 2816 RegStorage t_reg = TargetReg(kCount, kNotWide); // rCX 2817 LoadValueDirectFixed(rl_shift, t_reg); 2818 if (is_two_addr) { 2819 // Can we do this directly into memory? 2820 rl_result = UpdateLocWideTyped(rl_dest, kCoreReg); 2821 if (rl_result.location != kLocPhysReg) { 2822 // Okay, we can do this into memory 2823 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 2824 OpMemReg(op, rl_result, t_reg.GetReg()); 2825 } else if (!rl_result.reg.IsFloat()) { 2826 // Can do this directly into the result register 2827 OpRegReg(op, rl_result.reg, t_reg); 2828 StoreFinalValueWide(rl_dest, rl_result); 2829 } 2830 } else { 2831 // Three address form, or we can't do directly. 2832 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 2833 rl_result = EvalLocWide(rl_dest, kCoreReg, true); 2834 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 2835 StoreFinalValueWide(rl_dest, rl_result); 2836 } 2837 2838 FreeTemp(t_reg); 2839} 2840 2841} // namespace art 2842