utility_x86.cc revision 2689fbad6b5ec1ae8f8c8791a80c6fd3cf24144d
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "codegen_x86.h" 18#include "dex/quick/mir_to_lir-inl.h" 19#include "dex/dataflow_iterator-inl.h" 20#include "x86_lir.h" 21#include "dex/quick/dex_file_method_inliner.h" 22#include "dex/quick/dex_file_to_method_inliner_map.h" 23 24namespace art { 25 26/* This file contains codegen for the X86 ISA */ 27 28LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { 29 int opcode; 30 /* must be both DOUBLE or both not DOUBLE */ 31 DCHECK(r_dest.IsFloat() || r_src.IsFloat()); 32 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble()); 33 if (r_dest.IsDouble()) { 34 opcode = kX86MovsdRR; 35 } else { 36 if (r_dest.IsSingle()) { 37 if (r_src.IsSingle()) { 38 opcode = kX86MovssRR; 39 } else { // Fpr <- Gpr 40 opcode = kX86MovdxrRR; 41 } 42 } else { // Gpr <- Fpr 43 DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits(); 44 opcode = kX86MovdrxRR; 45 } 46 } 47 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL); 48 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); 49 if (r_dest == r_src) { 50 res->flags.is_nop = true; 51 } 52 return res; 53} 54 55bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { 56 return true; 57} 58 59bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { 60 return false; 61} 62 63bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { 64 return true; 65} 66 67bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) { 68 return value == 0; 69} 70 71/* 72 * Load a immediate using a shortcut if possible; otherwise 73 * grab from the per-translation literal pool. If target is 74 * a high register, build constant into a low register and copy. 75 * 76 * No additional register clobbering operation performed. Use this version when 77 * 1) r_dest is freshly returned from AllocTemp or 78 * 2) The codegen is under fixed register usage 79 */ 80LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) { 81 RegStorage r_dest_save = r_dest; 82 if (r_dest.IsFloat()) { 83 if (value == 0) { 84 return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg()); 85 } 86 r_dest = AllocTemp(); 87 } 88 89 LIR *res; 90 if (value == 0) { 91 res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg()); 92 } else { 93 // Note, there is no byte immediate form of a 32 bit immediate move. 94 // 64-bit immediate is not supported by LIR structure 95 res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value); 96 } 97 98 if (r_dest_save.IsFloat()) { 99 NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg()); 100 FreeTemp(r_dest); 101 } 102 103 return res; 104} 105 106LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { 107 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/); 108 res->target = target; 109 return res; 110} 111 112LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { 113 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, 114 X86ConditionEncoding(cc)); 115 branch->target = target; 116 return branch; 117} 118 119LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) { 120 X86OpCode opcode = kX86Bkpt; 121 switch (op) { 122 case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break; 123 case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break; 124 case kOpRev: opcode = kX86Bswap32R; break; 125 case kOpBlx: opcode = kX86CallR; break; 126 default: 127 LOG(FATAL) << "Bad case in OpReg " << op; 128 } 129 return NewLIR1(opcode, r_dest_src.GetReg()); 130} 131 132LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) { 133 X86OpCode opcode = kX86Bkpt; 134 bool byte_imm = IS_SIMM8(value); 135 DCHECK(!r_dest_src1.IsFloat()); 136 if (r_dest_src1.Is64Bit()) { 137 switch (op) { 138 case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break; 139 case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break; 140 case kOpLsl: opcode = kX86Sal64RI; break; 141 case kOpLsr: opcode = kX86Shr64RI; break; 142 case kOpAsr: opcode = kX86Sar64RI; break; 143 case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break; 144 default: 145 LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op; 146 } 147 } else { 148 switch (op) { 149 case kOpLsl: opcode = kX86Sal32RI; break; 150 case kOpLsr: opcode = kX86Shr32RI; break; 151 case kOpAsr: opcode = kX86Sar32RI; break; 152 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break; 153 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break; 154 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break; 155 // case kOpSbb: opcode = kX86Sbb32RI; break; 156 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break; 157 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break; 158 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break; 159 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break; 160 case kOpMov: 161 /* 162 * Moving the constant zero into register can be specialized as an xor of the register. 163 * However, that sets eflags while the move does not. For that reason here, always do 164 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead. 165 */ 166 opcode = kX86Mov32RI; 167 break; 168 case kOpMul: 169 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI; 170 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value); 171 case kOp2Byte: 172 opcode = kX86Mov32RI; 173 value = static_cast<int8_t>(value); 174 break; 175 case kOp2Short: 176 opcode = kX86Mov32RI; 177 value = static_cast<int16_t>(value); 178 break; 179 case kOp2Char: 180 opcode = kX86Mov32RI; 181 value = static_cast<uint16_t>(value); 182 break; 183 case kOpNeg: 184 opcode = kX86Mov32RI; 185 value = -value; 186 break; 187 default: 188 LOG(FATAL) << "Bad case in OpRegImm " << op; 189 } 190 } 191 return NewLIR2(opcode, r_dest_src1.GetReg(), value); 192} 193 194LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) { 195 bool is64Bit = r_dest_src1.Is64Bit(); 196 X86OpCode opcode = kX86Nop; 197 bool src2_must_be_cx = false; 198 switch (op) { 199 // X86 unary opcodes 200 case kOpMvn: 201 OpRegCopy(r_dest_src1, r_src2); 202 return OpReg(kOpNot, r_dest_src1); 203 case kOpNeg: 204 OpRegCopy(r_dest_src1, r_src2); 205 return OpReg(kOpNeg, r_dest_src1); 206 case kOpRev: 207 OpRegCopy(r_dest_src1, r_src2); 208 return OpReg(kOpRev, r_dest_src1); 209 case kOpRevsh: 210 OpRegCopy(r_dest_src1, r_src2); 211 OpReg(kOpRev, r_dest_src1); 212 return OpRegImm(kOpAsr, r_dest_src1, 16); 213 // X86 binary opcodes 214 case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break; 215 case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break; 216 case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break; 217 case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break; 218 case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break; 219 case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break; 220 case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break; 221 case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break; 222 case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break; 223 case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break; 224 case kOpOr: opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break; 225 case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break; 226 case kOp2Byte: 227 // TODO: there are several instances of this check. A utility function perhaps? 228 // TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage? 229 // Use shifts instead of a byte operand if the source can't be byte accessed. 230 if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) { 231 NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg()); 232 NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24); 233 return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(), 234 is64Bit ? 56 : 24); 235 } else { 236 opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR; 237 } 238 break; 239 case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break; 240 case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break; 241 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break; 242 default: 243 LOG(FATAL) << "Bad case in OpRegReg " << op; 244 break; 245 } 246 CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg()); 247 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg()); 248} 249 250LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) { 251 DCHECK(!r_base.IsFloat()); 252 X86OpCode opcode = kX86Nop; 253 int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); 254 switch (move_type) { 255 case kMov8GP: 256 CHECK(!r_dest.IsFloat()); 257 opcode = kX86Mov8RM; 258 break; 259 case kMov16GP: 260 CHECK(!r_dest.IsFloat()); 261 opcode = kX86Mov16RM; 262 break; 263 case kMov32GP: 264 CHECK(!r_dest.IsFloat()); 265 opcode = kX86Mov32RM; 266 break; 267 case kMov32FP: 268 CHECK(r_dest.IsFloat()); 269 opcode = kX86MovssRM; 270 break; 271 case kMov64FP: 272 CHECK(r_dest.IsFloat()); 273 opcode = kX86MovsdRM; 274 break; 275 case kMovU128FP: 276 CHECK(r_dest.IsFloat()); 277 opcode = kX86MovupsRM; 278 break; 279 case kMovA128FP: 280 CHECK(r_dest.IsFloat()); 281 opcode = kX86MovapsRM; 282 break; 283 case kMovLo128FP: 284 CHECK(r_dest.IsFloat()); 285 opcode = kX86MovlpsRM; 286 break; 287 case kMovHi128FP: 288 CHECK(r_dest.IsFloat()); 289 opcode = kX86MovhpsRM; 290 break; 291 case kMov64GP: 292 case kMovLo64FP: 293 case kMovHi64FP: 294 default: 295 LOG(FATAL) << "Bad case in OpMovRegMem"; 296 break; 297 } 298 299 return NewLIR3(opcode, dest, r_base.GetReg(), offset); 300} 301 302LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { 303 DCHECK(!r_base.IsFloat()); 304 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg(); 305 306 X86OpCode opcode = kX86Nop; 307 switch (move_type) { 308 case kMov8GP: 309 CHECK(!r_src.IsFloat()); 310 opcode = kX86Mov8MR; 311 break; 312 case kMov16GP: 313 CHECK(!r_src.IsFloat()); 314 opcode = kX86Mov16MR; 315 break; 316 case kMov32GP: 317 CHECK(!r_src.IsFloat()); 318 opcode = kX86Mov32MR; 319 break; 320 case kMov32FP: 321 CHECK(r_src.IsFloat()); 322 opcode = kX86MovssMR; 323 break; 324 case kMov64FP: 325 CHECK(r_src.IsFloat()); 326 opcode = kX86MovsdMR; 327 break; 328 case kMovU128FP: 329 CHECK(r_src.IsFloat()); 330 opcode = kX86MovupsMR; 331 break; 332 case kMovA128FP: 333 CHECK(r_src.IsFloat()); 334 opcode = kX86MovapsMR; 335 break; 336 case kMovLo128FP: 337 CHECK(r_src.IsFloat()); 338 opcode = kX86MovlpsMR; 339 break; 340 case kMovHi128FP: 341 CHECK(r_src.IsFloat()); 342 opcode = kX86MovhpsMR; 343 break; 344 case kMov64GP: 345 case kMovLo64FP: 346 case kMovHi64FP: 347 default: 348 LOG(FATAL) << "Bad case in OpMovMemReg"; 349 break; 350 } 351 352 return NewLIR3(opcode, r_base.GetReg(), offset, src); 353} 354 355LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { 356 // The only conditional reg to reg operation supported is Cmov 357 DCHECK_EQ(op, kOpCmov); 358 return NewLIR3(kX86Cmov32RRC, r_dest.GetReg(), r_src.GetReg(), X86ConditionEncoding(cc)); 359} 360 361LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) { 362 bool is64Bit = r_dest.Is64Bit(); 363 X86OpCode opcode = kX86Nop; 364 switch (op) { 365 // X86 binary opcodes 366 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break; 367 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break; 368 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break; 369 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break; 370 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break; 371 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break; 372 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break; 373 case kOp2Byte: opcode = kX86Movsx8RM; break; 374 case kOp2Short: opcode = kX86Movsx16RM; break; 375 case kOp2Char: opcode = kX86Movzx16RM; break; 376 case kOpMul: 377 default: 378 LOG(FATAL) << "Bad case in OpRegMem " << op; 379 break; 380 } 381 LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset); 382 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 383 DCHECK(r_base == rs_rX86_SP); 384 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */); 385 } 386 return l; 387} 388 389LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) { 390 DCHECK_NE(rl_dest.location, kLocPhysReg); 391 int displacement = SRegOffset(rl_dest.s_reg_low); 392 bool is64Bit = rl_dest.wide != 0; 393 X86OpCode opcode = kX86Nop; 394 switch (op) { 395 case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break; 396 case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break; 397 case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break; 398 case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break; 399 case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break; 400 case kOpOr: opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break; 401 case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break; 402 case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break; 403 case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break; 404 case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break; 405 default: 406 LOG(FATAL) << "Bad case in OpMemReg " << op; 407 break; 408 } 409 LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value); 410 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 411 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */); 412 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */); 413 } 414 return l; 415} 416 417LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) { 418 DCHECK_NE(rl_value.location, kLocPhysReg); 419 bool is64Bit = r_dest.Is64Bit(); 420 int displacement = SRegOffset(rl_value.s_reg_low); 421 X86OpCode opcode = kX86Nop; 422 switch (op) { 423 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break; 424 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break; 425 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break; 426 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break; 427 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break; 428 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break; 429 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break; 430 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break; 431 default: 432 LOG(FATAL) << "Bad case in OpRegMem " << op; 433 break; 434 } 435 LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement); 436 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 437 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */); 438 } 439 return l; 440} 441 442LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 443 RegStorage r_src2) { 444 bool is64Bit = r_dest.Is64Bit(); 445 if (r_dest != r_src1 && r_dest != r_src2) { 446 if (op == kOpAdd) { // lea special case, except can't encode rbp as base 447 if (r_src1 == r_src2) { 448 OpRegCopy(r_dest, r_src1); 449 return OpRegImm(kOpLsl, r_dest, 1); 450 } else if (r_src1 != rs_rBP) { 451 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(), 452 r_src1.GetReg() /* base */, r_src2.GetReg() /* index */, 453 0 /* scale */, 0 /* disp */); 454 } else { 455 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(), 456 r_src2.GetReg() /* base */, r_src1.GetReg() /* index */, 457 0 /* scale */, 0 /* disp */); 458 } 459 } else { 460 OpRegCopy(r_dest, r_src1); 461 return OpRegReg(op, r_dest, r_src2); 462 } 463 } else if (r_dest == r_src1) { 464 return OpRegReg(op, r_dest, r_src2); 465 } else { // r_dest == r_src2 466 switch (op) { 467 case kOpSub: // non-commutative 468 OpReg(kOpNeg, r_dest); 469 op = kOpAdd; 470 break; 471 case kOpSbc: 472 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: { 473 RegStorage t_reg = AllocTemp(); 474 OpRegCopy(t_reg, r_src1); 475 OpRegReg(op, t_reg, r_src2); 476 LIR* res = OpRegCopyNoInsert(r_dest, t_reg); 477 AppendLIR(res); 478 FreeTemp(t_reg); 479 return res; 480 } 481 case kOpAdd: // commutative 482 case kOpOr: 483 case kOpAdc: 484 case kOpAnd: 485 case kOpXor: 486 break; 487 default: 488 LOG(FATAL) << "Bad case in OpRegRegReg " << op; 489 } 490 return OpRegReg(op, r_dest, r_src1); 491 } 492} 493 494LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) { 495 if (op == kOpMul && !Gen64Bit()) { 496 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI; 497 return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value); 498 } else if (op == kOpAnd && !Gen64Bit()) { 499 if (value == 0xFF && r_src.Low4()) { 500 return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg()); 501 } else if (value == 0xFFFF) { 502 return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg()); 503 } 504 } 505 if (r_dest != r_src) { 506 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case 507 // TODO: fix bug in LEA encoding when disp == 0 508 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */, 509 r_src.GetReg() /* index */, value /* scale */, 0 /* disp */); 510 } else if (op == kOpAdd) { // lea add special case 511 return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(), 512 r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */, 513 0 /* scale */, value /* disp */); 514 } 515 OpRegCopy(r_dest, r_src); 516 } 517 return OpRegImm(op, r_dest, value); 518} 519 520LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) { 521 DCHECK_EQ(kX86, cu_->instruction_set); 522 X86OpCode opcode = kX86Bkpt; 523 switch (op) { 524 case kOpBlx: opcode = kX86CallT; break; 525 case kOpBx: opcode = kX86JmpT; break; 526 default: 527 LOG(FATAL) << "Bad opcode: " << op; 528 break; 529 } 530 return NewLIR1(opcode, thread_offset.Int32Value()); 531} 532 533LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) { 534 DCHECK_EQ(kX86_64, cu_->instruction_set); 535 X86OpCode opcode = kX86Bkpt; 536 switch (op) { 537 case kOpBlx: opcode = kX86CallT; break; 538 case kOpBx: opcode = kX86JmpT; break; 539 default: 540 LOG(FATAL) << "Bad opcode: " << op; 541 break; 542 } 543 return NewLIR1(opcode, thread_offset.Int32Value()); 544} 545 546LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { 547 X86OpCode opcode = kX86Bkpt; 548 switch (op) { 549 case kOpBlx: opcode = kX86CallM; break; 550 default: 551 LOG(FATAL) << "Bad opcode: " << op; 552 break; 553 } 554 return NewLIR2(opcode, r_base.GetReg(), disp); 555} 556 557LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { 558 int32_t val_lo = Low32Bits(value); 559 int32_t val_hi = High32Bits(value); 560 int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); 561 LIR *res; 562 bool is_fp = r_dest.IsFloat(); 563 // TODO: clean this up once we fully recognize 64-bit storage containers. 564 if (is_fp) { 565 if (value == 0) { 566 return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val); 567 } else if (base_of_code_ != nullptr) { 568 // We will load the value from the literal area. 569 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); 570 if (data_target == NULL) { 571 data_target = AddWideData(&literal_list_, val_lo, val_hi); 572 } 573 574 // Address the start of the method 575 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 576 if (rl_method.wide) { 577 rl_method = LoadValueWide(rl_method, kCoreReg); 578 } else { 579 rl_method = LoadValue(rl_method, kCoreReg); 580 } 581 582 // Load the proper value from the literal area. 583 // We don't know the proper offset for the value, so pick one that will force 584 // 4 byte offset. We will fix this up in the assembler later to have the right 585 // value. 586 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 587 res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val), 588 kDouble, kNotVolatile); 589 res->target = data_target; 590 res->flags.fixup = kFixupLoad; 591 store_method_addr_used_ = true; 592 } else { 593 if (val_lo == 0) { 594 res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val); 595 } else { 596 res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo); 597 } 598 if (val_hi != 0) { 599 RegStorage r_dest_hi = AllocTempDouble(); 600 LoadConstantNoClobber(r_dest_hi, val_hi); 601 NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg()); 602 FreeTemp(r_dest_hi); 603 } 604 } 605 } else { 606 if (r_dest.IsPair()) { 607 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo); 608 LoadConstantNoClobber(r_dest.GetHigh(), val_hi); 609 } else { 610 // TODO(64) make int64_t value parameter of LoadConstantNoClobber 611 if (val_lo < 0) { 612 val_hi += 1; 613 } 614 if (val_hi != 0) { 615 res = LoadConstantNoClobber(RegStorage::Solo32(r_dest.GetReg()), val_hi); 616 NewLIR2(kX86Sal64RI, r_dest.GetReg(), 32); 617 } else { 618 res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg()); 619 } 620 if (val_lo != 0) { 621 NewLIR2(kX86Add64RI, r_dest.GetReg(), val_lo); 622 } 623 } 624 } 625 return res; 626} 627 628LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 629 int displacement, RegStorage r_dest, OpSize size) { 630 LIR *load = NULL; 631 LIR *load2 = NULL; 632 bool is_array = r_index.Valid(); 633 bool pair = r_dest.IsPair(); 634 bool is64bit = ((size == k64) || (size == kDouble)); 635 X86OpCode opcode = kX86Nop; 636 switch (size) { 637 case k64: 638 case kDouble: 639 if (r_dest.IsFloat()) { 640 opcode = is_array ? kX86MovsdRA : kX86MovsdRM; 641 } else if (!pair) { 642 opcode = is_array ? kX86Mov64RA : kX86Mov64RM; 643 } else { 644 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 645 } 646 // TODO: double store is to unaligned address 647 DCHECK_EQ((displacement & 0x3), 0); 648 break; 649 case kWord: 650 if (Gen64Bit()) { 651 opcode = is_array ? kX86Mov64RA : kX86Mov64RM; 652 CHECK_EQ(is_array, false); 653 CHECK_EQ(r_dest.IsFloat(), false); 654 break; 655 } // else fall-through to k32 case 656 case k32: 657 case kSingle: 658 case kReference: // TODO: update for reference decompression on 64-bit targets. 659 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 660 if (r_dest.IsFloat()) { 661 opcode = is_array ? kX86MovssRA : kX86MovssRM; 662 DCHECK(r_dest.IsFloat()); 663 } 664 DCHECK_EQ((displacement & 0x3), 0); 665 break; 666 case kUnsignedHalf: 667 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM; 668 DCHECK_EQ((displacement & 0x1), 0); 669 break; 670 case kSignedHalf: 671 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM; 672 DCHECK_EQ((displacement & 0x1), 0); 673 break; 674 case kUnsignedByte: 675 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM; 676 break; 677 case kSignedByte: 678 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM; 679 break; 680 default: 681 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody"; 682 } 683 684 if (!is_array) { 685 if (!pair) { 686 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 687 } else { 688 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here. 689 if (r_base == r_dest.GetLow()) { 690 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), 691 displacement + HIWORD_OFFSET); 692 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 693 } else { 694 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 695 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), 696 displacement + HIWORD_OFFSET); 697 } 698 } 699 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 700 DCHECK(r_base == rs_rX86_SP); 701 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 702 true /* is_load */, is64bit); 703 if (pair) { 704 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2, 705 true /* is_load */, is64bit); 706 } 707 } 708 } else { 709 if (!pair) { 710 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 711 displacement + LOWORD_OFFSET); 712 } else { 713 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here. 714 if (r_base == r_dest.GetLow()) { 715 if (r_dest.GetHigh() == r_index) { 716 // We can't use either register for the first load. 717 RegStorage temp = AllocTemp(); 718 load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 719 displacement + HIWORD_OFFSET); 720 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, 721 displacement + LOWORD_OFFSET); 722 OpRegCopy(r_dest.GetHigh(), temp); 723 FreeTemp(temp); 724 } else { 725 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, 726 displacement + HIWORD_OFFSET); 727 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, 728 displacement + LOWORD_OFFSET); 729 } 730 } else { 731 if (r_dest.GetLow() == r_index) { 732 // We can't use either register for the first load. 733 RegStorage temp = AllocTemp(); 734 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 735 displacement + LOWORD_OFFSET); 736 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, 737 displacement + HIWORD_OFFSET); 738 OpRegCopy(r_dest.GetLow(), temp); 739 FreeTemp(temp); 740 } else { 741 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, 742 displacement + LOWORD_OFFSET); 743 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, 744 displacement + HIWORD_OFFSET); 745 } 746 } 747 } 748 } 749 750 return load; 751} 752 753/* Load value from base + scaled index. */ 754LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 755 int scale, OpSize size) { 756 return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size); 757} 758 759LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 760 OpSize size, VolatileKind is_volatile) { 761 // LoadBaseDisp() will emit correct insn for atomic load on x86 762 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore(). 763 764 LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, 765 size); 766 767 if (UNLIKELY(is_volatile == kVolatile)) { 768 // Without context sensitive analysis, we must issue the most conservative barriers. 769 // In this case, either a load or store may follow so we issue both barriers. 770 GenMemBarrier(kLoadLoad); 771 GenMemBarrier(kLoadStore); 772 } 773 774 return load; 775} 776 777LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 778 int displacement, RegStorage r_src, OpSize size) { 779 LIR *store = NULL; 780 LIR *store2 = NULL; 781 bool is_array = r_index.Valid(); 782 bool pair = r_src.IsPair(); 783 bool is64bit = (size == k64) || (size == kDouble); 784 X86OpCode opcode = kX86Nop; 785 switch (size) { 786 case k64: 787 case kDouble: 788 if (r_src.IsFloat()) { 789 opcode = is_array ? kX86MovsdAR : kX86MovsdMR; 790 } else if (!pair) { 791 opcode = is_array ? kX86Mov64AR : kX86Mov64MR; 792 } else { 793 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 794 } 795 // TODO: double store is to unaligned address 796 DCHECK_EQ((displacement & 0x3), 0); 797 break; 798 case kWord: 799 if (Gen64Bit()) { 800 opcode = is_array ? kX86Mov64AR : kX86Mov64MR; 801 CHECK_EQ(is_array, false); 802 CHECK_EQ(r_src.IsFloat(), false); 803 break; 804 } // else fall-through to k32 case 805 case k32: 806 case kSingle: 807 case kReference: 808 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 809 if (r_src.IsFloat()) { 810 opcode = is_array ? kX86MovssAR : kX86MovssMR; 811 DCHECK(r_src.IsSingle()); 812 } 813 DCHECK_EQ((displacement & 0x3), 0); 814 break; 815 case kUnsignedHalf: 816 case kSignedHalf: 817 opcode = is_array ? kX86Mov16AR : kX86Mov16MR; 818 DCHECK_EQ((displacement & 0x1), 0); 819 break; 820 case kUnsignedByte: 821 case kSignedByte: 822 opcode = is_array ? kX86Mov8AR : kX86Mov8MR; 823 break; 824 default: 825 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody"; 826 } 827 828 if (!is_array) { 829 if (!pair) { 830 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg()); 831 } else { 832 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here. 833 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg()); 834 store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg()); 835 } 836 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 837 DCHECK(r_base == rs_rX86_SP); 838 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 839 false /* is_load */, is64bit); 840 if (pair) { 841 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2, 842 false /* is_load */, is64bit); 843 } 844 } 845 } else { 846 if (!pair) { 847 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 848 displacement + LOWORD_OFFSET, r_src.GetReg()); 849 } else { 850 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here. 851 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 852 displacement + LOWORD_OFFSET, r_src.GetLowReg()); 853 store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 854 displacement + HIWORD_OFFSET, r_src.GetHighReg()); 855 } 856 } 857 return store; 858} 859 860/* store value base base + scaled index. */ 861LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 862 int scale, OpSize size) { 863 return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size); 864} 865 866LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size, 867 VolatileKind is_volatile) { 868 if (UNLIKELY(is_volatile == kVolatile)) { 869 // There might have been a store before this volatile one so insert StoreStore barrier. 870 GenMemBarrier(kStoreStore); 871 } 872 873 // StoreBaseDisp() will emit correct insn for atomic store on x86 874 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore(). 875 876 LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size); 877 878 if (UNLIKELY(is_volatile == kVolatile)) { 879 // A load might follow the volatile store so insert a StoreLoad barrier. 880 GenMemBarrier(kStoreLoad); 881 } 882 883 return store; 884} 885 886LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 887 int offset, int check_value, LIR* target) { 888 NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset, 889 check_value); 890 LIR* branch = OpCondBranch(cond, target); 891 return branch; 892} 893 894void X86Mir2Lir::AnalyzeMIR() { 895 // Assume we don't need a pointer to the base of the code. 896 cu_->NewTimingSplit("X86 MIR Analysis"); 897 store_method_addr_ = false; 898 899 // Walk the MIR looking for interesting items. 900 PreOrderDfsIterator iter(mir_graph_); 901 BasicBlock* curr_bb = iter.Next(); 902 while (curr_bb != NULL) { 903 AnalyzeBB(curr_bb); 904 curr_bb = iter.Next(); 905 } 906 907 // Did we need a pointer to the method code? 908 if (store_method_addr_) { 909 base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, Gen64Bit() == true); 910 } else { 911 base_of_code_ = nullptr; 912 } 913} 914 915void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) { 916 if (bb->block_type == kDead) { 917 // Ignore dead blocks 918 return; 919 } 920 921 for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { 922 int opcode = mir->dalvikInsn.opcode; 923 if (MIRGraph::IsPseudoMirOp(opcode)) { 924 AnalyzeExtendedMIR(opcode, bb, mir); 925 } else { 926 AnalyzeMIR(opcode, bb, mir); 927 } 928 } 929} 930 931 932void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) { 933 switch (opcode) { 934 // Instructions referencing doubles. 935 case kMirOpFusedCmplDouble: 936 case kMirOpFusedCmpgDouble: 937 AnalyzeFPInstruction(opcode, bb, mir); 938 break; 939 case kMirOpConstVector: 940 store_method_addr_ = true; 941 break; 942 default: 943 // Ignore the rest. 944 break; 945 } 946} 947 948void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) { 949 // Looking for 950 // - Do we need a pointer to the code (used for packed switches and double lits)? 951 952 switch (opcode) { 953 // Instructions referencing doubles. 954 case Instruction::CMPL_DOUBLE: 955 case Instruction::CMPG_DOUBLE: 956 case Instruction::NEG_DOUBLE: 957 case Instruction::ADD_DOUBLE: 958 case Instruction::SUB_DOUBLE: 959 case Instruction::MUL_DOUBLE: 960 case Instruction::DIV_DOUBLE: 961 case Instruction::REM_DOUBLE: 962 case Instruction::ADD_DOUBLE_2ADDR: 963 case Instruction::SUB_DOUBLE_2ADDR: 964 case Instruction::MUL_DOUBLE_2ADDR: 965 case Instruction::DIV_DOUBLE_2ADDR: 966 case Instruction::REM_DOUBLE_2ADDR: 967 AnalyzeFPInstruction(opcode, bb, mir); 968 break; 969 970 // Packed switches and array fills need a pointer to the base of the method. 971 case Instruction::FILL_ARRAY_DATA: 972 case Instruction::PACKED_SWITCH: 973 store_method_addr_ = true; 974 break; 975 case Instruction::INVOKE_STATIC: 976 AnalyzeInvokeStatic(opcode, bb, mir); 977 break; 978 default: 979 // Other instructions are not interesting yet. 980 break; 981 } 982} 983 984void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) { 985 // Look at all the uses, and see if they are double constants. 986 uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode)); 987 int next_sreg = 0; 988 if (attrs & DF_UA) { 989 if (attrs & DF_A_WIDE) { 990 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 991 next_sreg += 2; 992 } else { 993 next_sreg++; 994 } 995 } 996 if (attrs & DF_UB) { 997 if (attrs & DF_B_WIDE) { 998 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 999 next_sreg += 2; 1000 } else { 1001 next_sreg++; 1002 } 1003 } 1004 if (attrs & DF_UC) { 1005 if (attrs & DF_C_WIDE) { 1006 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 1007 } 1008 } 1009} 1010 1011void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) { 1012 // If this is a double literal, we will want it in the literal pool. 1013 if (use.is_const) { 1014 store_method_addr_ = true; 1015 } 1016} 1017 1018RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) { 1019 loc = UpdateLoc(loc); 1020 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) { 1021 if (GetRegInfo(loc.reg)->IsTemp()) { 1022 Clobber(loc.reg); 1023 FreeTemp(loc.reg); 1024 loc.reg = RegStorage::InvalidReg(); 1025 loc.location = kLocDalvikFrame; 1026 } 1027 } 1028 DCHECK(CheckCorePoolSanity()); 1029 return loc; 1030} 1031 1032RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) { 1033 loc = UpdateLocWide(loc); 1034 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) { 1035 if (GetRegInfo(loc.reg)->IsTemp()) { 1036 Clobber(loc.reg); 1037 FreeTemp(loc.reg); 1038 loc.reg = RegStorage::InvalidReg(); 1039 loc.location = kLocDalvikFrame; 1040 } 1041 } 1042 DCHECK(CheckCorePoolSanity()); 1043 return loc; 1044} 1045 1046void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) { 1047 uint32_t index = mir->dalvikInsn.vB; 1048 if (!(mir->optimization_flags & MIR_INLINED)) { 1049 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 1050 InlineMethod method; 1051 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 1052 ->IsIntrinsic(index, &method)) { 1053 switch (method.opcode) { 1054 case kIntrinsicAbsDouble: 1055 store_method_addr_ = true; 1056 break; 1057 default: 1058 break; 1059 } 1060 } 1061 } 1062} 1063} // namespace art 1064