utility_x86.cc revision 0025a86411145eb7cd4971f9234fc21c7b4aced1
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "codegen_x86.h" 18#include "dex/quick/mir_to_lir-inl.h" 19#include "dex/dataflow_iterator-inl.h" 20#include "x86_lir.h" 21#include "dex/quick/dex_file_method_inliner.h" 22#include "dex/quick/dex_file_to_method_inliner_map.h" 23#include "dex/reg_storage_eq.h" 24 25namespace art { 26 27/* This file contains codegen for the X86 ISA */ 28 29LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { 30 int opcode; 31 /* must be both DOUBLE or both not DOUBLE */ 32 DCHECK(r_dest.IsFloat() || r_src.IsFloat()); 33 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble()); 34 if (r_dest.IsDouble()) { 35 opcode = kX86MovsdRR; 36 } else { 37 if (r_dest.IsSingle()) { 38 if (r_src.IsSingle()) { 39 opcode = kX86MovssRR; 40 } else { // Fpr <- Gpr 41 opcode = kX86MovdxrRR; 42 } 43 } else { // Gpr <- Fpr 44 DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits(); 45 opcode = kX86MovdrxRR; 46 } 47 } 48 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL); 49 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); 50 if (r_dest == r_src) { 51 res->flags.is_nop = true; 52 } 53 return res; 54} 55 56bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { 57 return true; 58} 59 60bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { 61 return false; 62} 63 64bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { 65 return true; 66} 67 68bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) { 69 return value == 0; 70} 71 72/* 73 * Load a immediate using a shortcut if possible; otherwise 74 * grab from the per-translation literal pool. If target is 75 * a high register, build constant into a low register and copy. 76 * 77 * No additional register clobbering operation performed. Use this version when 78 * 1) r_dest is freshly returned from AllocTemp or 79 * 2) The codegen is under fixed register usage 80 */ 81LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) { 82 RegStorage r_dest_save = r_dest; 83 if (r_dest.IsFloat()) { 84 if (value == 0) { 85 return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg()); 86 } 87 r_dest = AllocTemp(); 88 } 89 90 LIR *res; 91 if (value == 0) { 92 res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg()); 93 } else { 94 // Note, there is no byte immediate form of a 32 bit immediate move. 95 // 64-bit immediate is not supported by LIR structure 96 res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value); 97 } 98 99 if (r_dest_save.IsFloat()) { 100 NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg()); 101 FreeTemp(r_dest); 102 } 103 104 return res; 105} 106 107LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { 108 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/); 109 res->target = target; 110 return res; 111} 112 113LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { 114 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, 115 X86ConditionEncoding(cc)); 116 branch->target = target; 117 return branch; 118} 119 120LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) { 121 X86OpCode opcode = kX86Bkpt; 122 switch (op) { 123 case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break; 124 case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break; 125 case kOpRev: opcode = r_dest_src.Is64Bit() ? kX86Bswap64R : kX86Bswap32R; break; 126 case kOpBlx: opcode = kX86CallR; break; 127 default: 128 LOG(FATAL) << "Bad case in OpReg " << op; 129 } 130 return NewLIR1(opcode, r_dest_src.GetReg()); 131} 132 133LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) { 134 X86OpCode opcode = kX86Bkpt; 135 bool byte_imm = IS_SIMM8(value); 136 DCHECK(!r_dest_src1.IsFloat()); 137 if (r_dest_src1.Is64Bit()) { 138 switch (op) { 139 case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break; 140 case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break; 141 case kOpLsl: opcode = kX86Sal64RI; break; 142 case kOpLsr: opcode = kX86Shr64RI; break; 143 case kOpAsr: opcode = kX86Sar64RI; break; 144 case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break; 145 default: 146 LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op; 147 } 148 } else { 149 switch (op) { 150 case kOpLsl: opcode = kX86Sal32RI; break; 151 case kOpLsr: opcode = kX86Shr32RI; break; 152 case kOpAsr: opcode = kX86Sar32RI; break; 153 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break; 154 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break; 155 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break; 156 // case kOpSbb: opcode = kX86Sbb32RI; break; 157 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break; 158 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break; 159 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break; 160 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break; 161 case kOpMov: 162 /* 163 * Moving the constant zero into register can be specialized as an xor of the register. 164 * However, that sets eflags while the move does not. For that reason here, always do 165 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead. 166 */ 167 opcode = kX86Mov32RI; 168 break; 169 case kOpMul: 170 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI; 171 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value); 172 case kOp2Byte: 173 opcode = kX86Mov32RI; 174 value = static_cast<int8_t>(value); 175 break; 176 case kOp2Short: 177 opcode = kX86Mov32RI; 178 value = static_cast<int16_t>(value); 179 break; 180 case kOp2Char: 181 opcode = kX86Mov32RI; 182 value = static_cast<uint16_t>(value); 183 break; 184 case kOpNeg: 185 opcode = kX86Mov32RI; 186 value = -value; 187 break; 188 default: 189 LOG(FATAL) << "Bad case in OpRegImm " << op; 190 } 191 } 192 return NewLIR2(opcode, r_dest_src1.GetReg(), value); 193} 194 195LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) { 196 bool is64Bit = r_dest_src1.Is64Bit(); 197 X86OpCode opcode = kX86Nop; 198 bool src2_must_be_cx = false; 199 switch (op) { 200 // X86 unary opcodes 201 case kOpMvn: 202 OpRegCopy(r_dest_src1, r_src2); 203 return OpReg(kOpNot, r_dest_src1); 204 case kOpNeg: 205 OpRegCopy(r_dest_src1, r_src2); 206 return OpReg(kOpNeg, r_dest_src1); 207 case kOpRev: 208 OpRegCopy(r_dest_src1, r_src2); 209 return OpReg(kOpRev, r_dest_src1); 210 case kOpRevsh: 211 OpRegCopy(r_dest_src1, r_src2); 212 OpReg(kOpRev, r_dest_src1); 213 return OpRegImm(kOpAsr, r_dest_src1, 16); 214 // X86 binary opcodes 215 case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break; 216 case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break; 217 case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break; 218 case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break; 219 case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break; 220 case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break; 221 case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break; 222 case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break; 223 case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break; 224 case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break; 225 case kOpOr: opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break; 226 case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break; 227 case kOp2Byte: 228 // TODO: there are several instances of this check. A utility function perhaps? 229 // TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage? 230 // Use shifts instead of a byte operand if the source can't be byte accessed. 231 if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) { 232 NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg()); 233 NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24); 234 return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(), 235 is64Bit ? 56 : 24); 236 } else { 237 opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR; 238 } 239 break; 240 case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break; 241 case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break; 242 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break; 243 default: 244 LOG(FATAL) << "Bad case in OpRegReg " << op; 245 break; 246 } 247 CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg()); 248 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg()); 249} 250 251LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) { 252 DCHECK(!r_base.IsFloat()); 253 X86OpCode opcode = kX86Nop; 254 int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); 255 switch (move_type) { 256 case kMov8GP: 257 CHECK(!r_dest.IsFloat()); 258 opcode = kX86Mov8RM; 259 break; 260 case kMov16GP: 261 CHECK(!r_dest.IsFloat()); 262 opcode = kX86Mov16RM; 263 break; 264 case kMov32GP: 265 CHECK(!r_dest.IsFloat()); 266 opcode = kX86Mov32RM; 267 break; 268 case kMov32FP: 269 CHECK(r_dest.IsFloat()); 270 opcode = kX86MovssRM; 271 break; 272 case kMov64FP: 273 CHECK(r_dest.IsFloat()); 274 opcode = kX86MovsdRM; 275 break; 276 case kMovU128FP: 277 CHECK(r_dest.IsFloat()); 278 opcode = kX86MovupsRM; 279 break; 280 case kMovA128FP: 281 CHECK(r_dest.IsFloat()); 282 opcode = kX86MovapsRM; 283 break; 284 case kMovLo128FP: 285 CHECK(r_dest.IsFloat()); 286 opcode = kX86MovlpsRM; 287 break; 288 case kMovHi128FP: 289 CHECK(r_dest.IsFloat()); 290 opcode = kX86MovhpsRM; 291 break; 292 case kMov64GP: 293 case kMovLo64FP: 294 case kMovHi64FP: 295 default: 296 LOG(FATAL) << "Bad case in OpMovRegMem"; 297 break; 298 } 299 300 return NewLIR3(opcode, dest, r_base.GetReg(), offset); 301} 302 303LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { 304 DCHECK(!r_base.IsFloat()); 305 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg(); 306 307 X86OpCode opcode = kX86Nop; 308 switch (move_type) { 309 case kMov8GP: 310 CHECK(!r_src.IsFloat()); 311 opcode = kX86Mov8MR; 312 break; 313 case kMov16GP: 314 CHECK(!r_src.IsFloat()); 315 opcode = kX86Mov16MR; 316 break; 317 case kMov32GP: 318 CHECK(!r_src.IsFloat()); 319 opcode = kX86Mov32MR; 320 break; 321 case kMov32FP: 322 CHECK(r_src.IsFloat()); 323 opcode = kX86MovssMR; 324 break; 325 case kMov64FP: 326 CHECK(r_src.IsFloat()); 327 opcode = kX86MovsdMR; 328 break; 329 case kMovU128FP: 330 CHECK(r_src.IsFloat()); 331 opcode = kX86MovupsMR; 332 break; 333 case kMovA128FP: 334 CHECK(r_src.IsFloat()); 335 opcode = kX86MovapsMR; 336 break; 337 case kMovLo128FP: 338 CHECK(r_src.IsFloat()); 339 opcode = kX86MovlpsMR; 340 break; 341 case kMovHi128FP: 342 CHECK(r_src.IsFloat()); 343 opcode = kX86MovhpsMR; 344 break; 345 case kMov64GP: 346 case kMovLo64FP: 347 case kMovHi64FP: 348 default: 349 LOG(FATAL) << "Bad case in OpMovMemReg"; 350 break; 351 } 352 353 return NewLIR3(opcode, r_base.GetReg(), offset, src); 354} 355 356LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { 357 // The only conditional reg to reg operation supported is Cmov 358 DCHECK_EQ(op, kOpCmov); 359 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit()); 360 return NewLIR3(r_dest.Is64Bit() ? kX86Cmov64RRC : kX86Cmov32RRC, r_dest.GetReg(), 361 r_src.GetReg(), X86ConditionEncoding(cc)); 362} 363 364LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) { 365 bool is64Bit = r_dest.Is64Bit(); 366 X86OpCode opcode = kX86Nop; 367 switch (op) { 368 // X86 binary opcodes 369 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break; 370 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break; 371 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break; 372 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break; 373 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break; 374 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break; 375 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break; 376 case kOp2Byte: opcode = kX86Movsx8RM; break; 377 case kOp2Short: opcode = kX86Movsx16RM; break; 378 case kOp2Char: opcode = kX86Movzx16RM; break; 379 case kOpMul: 380 default: 381 LOG(FATAL) << "Bad case in OpRegMem " << op; 382 break; 383 } 384 LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset); 385 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 386 DCHECK(r_base == rs_rX86_SP); 387 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */); 388 } 389 return l; 390} 391 392LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) { 393 DCHECK_NE(rl_dest.location, kLocPhysReg); 394 int displacement = SRegOffset(rl_dest.s_reg_low); 395 bool is64Bit = rl_dest.wide != 0; 396 X86OpCode opcode = kX86Nop; 397 switch (op) { 398 case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break; 399 case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break; 400 case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break; 401 case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break; 402 case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break; 403 case kOpOr: opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break; 404 case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break; 405 case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break; 406 case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break; 407 case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break; 408 default: 409 LOG(FATAL) << "Bad case in OpMemReg " << op; 410 break; 411 } 412 LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value); 413 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 414 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */); 415 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */); 416 } 417 return l; 418} 419 420LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) { 421 DCHECK_NE(rl_value.location, kLocPhysReg); 422 bool is64Bit = r_dest.Is64Bit(); 423 int displacement = SRegOffset(rl_value.s_reg_low); 424 X86OpCode opcode = kX86Nop; 425 switch (op) { 426 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break; 427 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break; 428 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break; 429 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break; 430 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break; 431 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break; 432 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break; 433 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break; 434 default: 435 LOG(FATAL) << "Bad case in OpRegMem " << op; 436 break; 437 } 438 LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement); 439 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 440 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */); 441 } 442 return l; 443} 444 445LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 446 RegStorage r_src2) { 447 bool is64Bit = r_dest.Is64Bit(); 448 if (r_dest != r_src1 && r_dest != r_src2) { 449 if (op == kOpAdd) { // lea special case, except can't encode rbp as base 450 if (r_src1 == r_src2) { 451 OpRegCopy(r_dest, r_src1); 452 return OpRegImm(kOpLsl, r_dest, 1); 453 } else if (r_src1 != rs_rBP) { 454 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(), 455 r_src1.GetReg() /* base */, r_src2.GetReg() /* index */, 456 0 /* scale */, 0 /* disp */); 457 } else { 458 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(), 459 r_src2.GetReg() /* base */, r_src1.GetReg() /* index */, 460 0 /* scale */, 0 /* disp */); 461 } 462 } else { 463 OpRegCopy(r_dest, r_src1); 464 return OpRegReg(op, r_dest, r_src2); 465 } 466 } else if (r_dest == r_src1) { 467 return OpRegReg(op, r_dest, r_src2); 468 } else { // r_dest == r_src2 469 switch (op) { 470 case kOpSub: // non-commutative 471 OpReg(kOpNeg, r_dest); 472 op = kOpAdd; 473 break; 474 case kOpSbc: 475 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: { 476 RegStorage t_reg = AllocTemp(); 477 OpRegCopy(t_reg, r_src1); 478 OpRegReg(op, t_reg, r_src2); 479 LIR* res = OpRegCopyNoInsert(r_dest, t_reg); 480 AppendLIR(res); 481 FreeTemp(t_reg); 482 return res; 483 } 484 case kOpAdd: // commutative 485 case kOpOr: 486 case kOpAdc: 487 case kOpAnd: 488 case kOpXor: 489 break; 490 default: 491 LOG(FATAL) << "Bad case in OpRegRegReg " << op; 492 } 493 return OpRegReg(op, r_dest, r_src1); 494 } 495} 496 497LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) { 498 if (op == kOpMul && !cu_->target64) { 499 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI; 500 return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value); 501 } else if (op == kOpAnd && !cu_->target64) { 502 if (value == 0xFF && r_src.Low4()) { 503 return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg()); 504 } else if (value == 0xFFFF) { 505 return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg()); 506 } 507 } 508 if (r_dest != r_src) { 509 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case 510 // TODO: fix bug in LEA encoding when disp == 0 511 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */, 512 r_src.GetReg() /* index */, value /* scale */, 0 /* disp */); 513 } else if (op == kOpAdd) { // lea add special case 514 return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(), 515 r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */, 516 0 /* scale */, value /* disp */); 517 } 518 OpRegCopy(r_dest, r_src); 519 } 520 return OpRegImm(op, r_dest, value); 521} 522 523LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) { 524 DCHECK_EQ(kX86, cu_->instruction_set); 525 X86OpCode opcode = kX86Bkpt; 526 switch (op) { 527 case kOpBlx: opcode = kX86CallT; break; 528 case kOpBx: opcode = kX86JmpT; break; 529 default: 530 LOG(FATAL) << "Bad opcode: " << op; 531 break; 532 } 533 return NewLIR1(opcode, thread_offset.Int32Value()); 534} 535 536LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) { 537 DCHECK_EQ(kX86_64, cu_->instruction_set); 538 X86OpCode opcode = kX86Bkpt; 539 switch (op) { 540 case kOpBlx: opcode = kX86CallT; break; 541 case kOpBx: opcode = kX86JmpT; break; 542 default: 543 LOG(FATAL) << "Bad opcode: " << op; 544 break; 545 } 546 return NewLIR1(opcode, thread_offset.Int32Value()); 547} 548 549LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { 550 X86OpCode opcode = kX86Bkpt; 551 switch (op) { 552 case kOpBlx: opcode = kX86CallM; break; 553 default: 554 LOG(FATAL) << "Bad opcode: " << op; 555 break; 556 } 557 return NewLIR2(opcode, r_base.GetReg(), disp); 558} 559 560LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { 561 int32_t val_lo = Low32Bits(value); 562 int32_t val_hi = High32Bits(value); 563 int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); 564 LIR *res; 565 bool is_fp = r_dest.IsFloat(); 566 // TODO: clean this up once we fully recognize 64-bit storage containers. 567 if (is_fp) { 568 if (value == 0) { 569 return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val); 570 } else if (base_of_code_ != nullptr) { 571 // We will load the value from the literal area. 572 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); 573 if (data_target == NULL) { 574 data_target = AddWideData(&literal_list_, val_lo, val_hi); 575 } 576 577 // Address the start of the method 578 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 579 if (rl_method.wide) { 580 rl_method = LoadValueWide(rl_method, kCoreReg); 581 } else { 582 rl_method = LoadValue(rl_method, kCoreReg); 583 } 584 585 // Load the proper value from the literal area. 586 // We don't know the proper offset for the value, so pick one that will force 587 // 4 byte offset. We will fix this up in the assembler later to have the right 588 // value. 589 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); 590 res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::FloatSolo64(low_reg_val), 591 kDouble, kNotVolatile); 592 res->target = data_target; 593 res->flags.fixup = kFixupLoad; 594 store_method_addr_used_ = true; 595 } else { 596 if (val_lo == 0) { 597 res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val); 598 } else { 599 res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo); 600 } 601 if (val_hi != 0) { 602 RegStorage r_dest_hi = AllocTempDouble(); 603 LoadConstantNoClobber(r_dest_hi, val_hi); 604 NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg()); 605 FreeTemp(r_dest_hi); 606 } 607 } 608 } else { 609 if (r_dest.IsPair()) { 610 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo); 611 LoadConstantNoClobber(r_dest.GetHigh(), val_hi); 612 } else { 613 if (value == 0) { 614 res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg()); 615 } else if (value >= INT_MIN && value <= INT_MAX) { 616 res = NewLIR2(kX86Mov64RI32, r_dest.GetReg(), val_lo); 617 } else { 618 res = NewLIR3(kX86Mov64RI64, r_dest.GetReg(), val_hi, val_lo); 619 } 620 } 621 } 622 return res; 623} 624 625LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 626 int displacement, RegStorage r_dest, OpSize size) { 627 LIR *load = NULL; 628 LIR *load2 = NULL; 629 bool is_array = r_index.Valid(); 630 bool pair = r_dest.IsPair(); 631 bool is64bit = ((size == k64) || (size == kDouble)); 632 X86OpCode opcode = kX86Nop; 633 switch (size) { 634 case k64: 635 case kDouble: 636 if (r_dest.IsFloat()) { 637 opcode = is_array ? kX86MovsdRA : kX86MovsdRM; 638 } else if (!pair) { 639 opcode = is_array ? kX86Mov64RA : kX86Mov64RM; 640 } else { 641 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 642 } 643 // TODO: double store is to unaligned address 644 DCHECK_EQ((displacement & 0x3), 0); 645 break; 646 case kWord: 647 if (cu_->target64) { 648 opcode = is_array ? kX86Mov64RA : kX86Mov64RM; 649 CHECK_EQ(is_array, false); 650 CHECK_EQ(r_dest.IsFloat(), false); 651 break; 652 } // else fall-through to k32 case 653 case k32: 654 case kSingle: 655 case kReference: // TODO: update for reference decompression on 64-bit targets. 656 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 657 if (r_dest.IsFloat()) { 658 opcode = is_array ? kX86MovssRA : kX86MovssRM; 659 DCHECK(r_dest.IsFloat()); 660 } 661 DCHECK_EQ((displacement & 0x3), 0); 662 break; 663 case kUnsignedHalf: 664 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM; 665 DCHECK_EQ((displacement & 0x1), 0); 666 break; 667 case kSignedHalf: 668 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM; 669 DCHECK_EQ((displacement & 0x1), 0); 670 break; 671 case kUnsignedByte: 672 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM; 673 break; 674 case kSignedByte: 675 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM; 676 break; 677 default: 678 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody"; 679 } 680 681 if (!is_array) { 682 if (!pair) { 683 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 684 } else { 685 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here. 686 if (r_base == r_dest.GetLow()) { 687 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), 688 displacement + HIWORD_OFFSET); 689 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 690 } else { 691 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 692 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(), 693 displacement + HIWORD_OFFSET); 694 } 695 } 696 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 697 DCHECK(r_base == rs_rX86_SP); 698 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 699 true /* is_load */, is64bit); 700 if (pair) { 701 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2, 702 true /* is_load */, is64bit); 703 } 704 } 705 } else { 706 if (!pair) { 707 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 708 displacement + LOWORD_OFFSET); 709 } else { 710 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here. 711 if (r_base == r_dest.GetLow()) { 712 if (r_dest.GetHigh() == r_index) { 713 // We can't use either register for the first load. 714 RegStorage temp = AllocTemp(); 715 load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 716 displacement + HIWORD_OFFSET); 717 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, 718 displacement + LOWORD_OFFSET); 719 OpRegCopy(r_dest.GetHigh(), temp); 720 FreeTemp(temp); 721 } else { 722 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, 723 displacement + HIWORD_OFFSET); 724 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, 725 displacement + LOWORD_OFFSET); 726 } 727 } else { 728 if (r_dest.GetLow() == r_index) { 729 // We can't use either register for the first load. 730 RegStorage temp = AllocTemp(); 731 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 732 displacement + LOWORD_OFFSET); 733 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, 734 displacement + HIWORD_OFFSET); 735 OpRegCopy(r_dest.GetLow(), temp); 736 FreeTemp(temp); 737 } else { 738 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale, 739 displacement + LOWORD_OFFSET); 740 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale, 741 displacement + HIWORD_OFFSET); 742 } 743 } 744 } 745 } 746 747 return load; 748} 749 750/* Load value from base + scaled index. */ 751LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 752 int scale, OpSize size) { 753 return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size); 754} 755 756LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 757 OpSize size, VolatileKind is_volatile) { 758 // LoadBaseDisp() will emit correct insn for atomic load on x86 759 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore(). 760 761 LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, 762 size); 763 764 if (UNLIKELY(is_volatile == kVolatile)) { 765 // Without context sensitive analysis, we must issue the most conservative barriers. 766 // In this case, either a load or store may follow so we issue both barriers. 767 GenMemBarrier(kLoadLoad); 768 GenMemBarrier(kLoadStore); 769 } 770 771 return load; 772} 773 774LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 775 int displacement, RegStorage r_src, OpSize size) { 776 LIR *store = NULL; 777 LIR *store2 = NULL; 778 bool is_array = r_index.Valid(); 779 bool pair = r_src.IsPair(); 780 bool is64bit = (size == k64) || (size == kDouble); 781 X86OpCode opcode = kX86Nop; 782 switch (size) { 783 case k64: 784 case kDouble: 785 if (r_src.IsFloat()) { 786 opcode = is_array ? kX86MovsdAR : kX86MovsdMR; 787 } else if (!pair) { 788 opcode = is_array ? kX86Mov64AR : kX86Mov64MR; 789 } else { 790 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 791 } 792 // TODO: double store is to unaligned address 793 DCHECK_EQ((displacement & 0x3), 0); 794 break; 795 case kWord: 796 if (cu_->target64) { 797 opcode = is_array ? kX86Mov64AR : kX86Mov64MR; 798 CHECK_EQ(is_array, false); 799 CHECK_EQ(r_src.IsFloat(), false); 800 break; 801 } // else fall-through to k32 case 802 case k32: 803 case kSingle: 804 case kReference: 805 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 806 if (r_src.IsFloat()) { 807 opcode = is_array ? kX86MovssAR : kX86MovssMR; 808 DCHECK(r_src.IsSingle()); 809 } 810 DCHECK_EQ((displacement & 0x3), 0); 811 break; 812 case kUnsignedHalf: 813 case kSignedHalf: 814 opcode = is_array ? kX86Mov16AR : kX86Mov16MR; 815 DCHECK_EQ((displacement & 0x1), 0); 816 break; 817 case kUnsignedByte: 818 case kSignedByte: 819 opcode = is_array ? kX86Mov8AR : kX86Mov8MR; 820 break; 821 default: 822 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody"; 823 } 824 825 if (!is_array) { 826 if (!pair) { 827 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg()); 828 } else { 829 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here. 830 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg()); 831 store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg()); 832 } 833 if (mem_ref_type_ == ResourceMask::kDalvikReg) { 834 DCHECK(r_base == rs_rX86_SP); 835 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 836 false /* is_load */, is64bit); 837 if (pair) { 838 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2, 839 false /* is_load */, is64bit); 840 } 841 } 842 } else { 843 if (!pair) { 844 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 845 displacement + LOWORD_OFFSET, r_src.GetReg()); 846 } else { 847 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here. 848 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 849 displacement + LOWORD_OFFSET, r_src.GetLowReg()); 850 store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 851 displacement + HIWORD_OFFSET, r_src.GetHighReg()); 852 } 853 } 854 return store; 855} 856 857/* store value base base + scaled index. */ 858LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 859 int scale, OpSize size) { 860 return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size); 861} 862 863LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size, 864 VolatileKind is_volatile) { 865 if (UNLIKELY(is_volatile == kVolatile)) { 866 // There might have been a store before this volatile one so insert StoreStore barrier. 867 GenMemBarrier(kStoreStore); 868 } 869 870 // StoreBaseDisp() will emit correct insn for atomic store on x86 871 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore(). 872 873 LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size); 874 875 if (UNLIKELY(is_volatile == kVolatile)) { 876 // A load might follow the volatile store so insert a StoreLoad barrier. 877 GenMemBarrier(kStoreLoad); 878 } 879 880 return store; 881} 882 883LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 884 int offset, int check_value, LIR* target) { 885 NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset, 886 check_value); 887 LIR* branch = OpCondBranch(cond, target); 888 return branch; 889} 890 891void X86Mir2Lir::AnalyzeMIR() { 892 // Assume we don't need a pointer to the base of the code. 893 cu_->NewTimingSplit("X86 MIR Analysis"); 894 store_method_addr_ = false; 895 896 // Walk the MIR looking for interesting items. 897 PreOrderDfsIterator iter(mir_graph_); 898 BasicBlock* curr_bb = iter.Next(); 899 while (curr_bb != NULL) { 900 AnalyzeBB(curr_bb); 901 curr_bb = iter.Next(); 902 } 903 904 // Did we need a pointer to the method code? 905 if (store_method_addr_) { 906 base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, cu_->target64 == true); 907 } else { 908 base_of_code_ = nullptr; 909 } 910} 911 912void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) { 913 if (bb->block_type == kDead) { 914 // Ignore dead blocks 915 return; 916 } 917 918 for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { 919 int opcode = mir->dalvikInsn.opcode; 920 if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) { 921 AnalyzeExtendedMIR(opcode, bb, mir); 922 } else { 923 AnalyzeMIR(opcode, bb, mir); 924 } 925 } 926} 927 928 929void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) { 930 switch (opcode) { 931 // Instructions referencing doubles. 932 case kMirOpFusedCmplDouble: 933 case kMirOpFusedCmpgDouble: 934 AnalyzeFPInstruction(opcode, bb, mir); 935 break; 936 case kMirOpConstVector: 937 store_method_addr_ = true; 938 break; 939 default: 940 // Ignore the rest. 941 break; 942 } 943} 944 945void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) { 946 // Looking for 947 // - Do we need a pointer to the code (used for packed switches and double lits)? 948 949 switch (opcode) { 950 // Instructions referencing doubles. 951 case Instruction::CMPL_DOUBLE: 952 case Instruction::CMPG_DOUBLE: 953 case Instruction::NEG_DOUBLE: 954 case Instruction::ADD_DOUBLE: 955 case Instruction::SUB_DOUBLE: 956 case Instruction::MUL_DOUBLE: 957 case Instruction::DIV_DOUBLE: 958 case Instruction::REM_DOUBLE: 959 case Instruction::ADD_DOUBLE_2ADDR: 960 case Instruction::SUB_DOUBLE_2ADDR: 961 case Instruction::MUL_DOUBLE_2ADDR: 962 case Instruction::DIV_DOUBLE_2ADDR: 963 case Instruction::REM_DOUBLE_2ADDR: 964 AnalyzeFPInstruction(opcode, bb, mir); 965 break; 966 967 // Packed switches and array fills need a pointer to the base of the method. 968 case Instruction::FILL_ARRAY_DATA: 969 case Instruction::PACKED_SWITCH: 970 store_method_addr_ = true; 971 break; 972 case Instruction::INVOKE_STATIC: 973 AnalyzeInvokeStatic(opcode, bb, mir); 974 break; 975 default: 976 // Other instructions are not interesting yet. 977 break; 978 } 979} 980 981void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) { 982 // Look at all the uses, and see if they are double constants. 983 uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode)); 984 int next_sreg = 0; 985 if (attrs & DF_UA) { 986 if (attrs & DF_A_WIDE) { 987 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 988 next_sreg += 2; 989 } else { 990 next_sreg++; 991 } 992 } 993 if (attrs & DF_UB) { 994 if (attrs & DF_B_WIDE) { 995 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 996 next_sreg += 2; 997 } else { 998 next_sreg++; 999 } 1000 } 1001 if (attrs & DF_UC) { 1002 if (attrs & DF_C_WIDE) { 1003 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 1004 } 1005 } 1006} 1007 1008void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) { 1009 // If this is a double literal, we will want it in the literal pool. 1010 if (use.is_const) { 1011 store_method_addr_ = true; 1012 } 1013} 1014 1015RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) { 1016 loc = UpdateLoc(loc); 1017 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) { 1018 if (GetRegInfo(loc.reg)->IsTemp()) { 1019 Clobber(loc.reg); 1020 FreeTemp(loc.reg); 1021 loc.reg = RegStorage::InvalidReg(); 1022 loc.location = kLocDalvikFrame; 1023 } 1024 } 1025 DCHECK(CheckCorePoolSanity()); 1026 return loc; 1027} 1028 1029RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) { 1030 loc = UpdateLocWide(loc); 1031 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) { 1032 if (GetRegInfo(loc.reg)->IsTemp()) { 1033 Clobber(loc.reg); 1034 FreeTemp(loc.reg); 1035 loc.reg = RegStorage::InvalidReg(); 1036 loc.location = kLocDalvikFrame; 1037 } 1038 } 1039 DCHECK(CheckCorePoolSanity()); 1040 return loc; 1041} 1042 1043void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) { 1044 uint32_t index = mir->dalvikInsn.vB; 1045 if (!(mir->optimization_flags & MIR_INLINED)) { 1046 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); 1047 InlineMethod method; 1048 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) 1049 ->IsIntrinsic(index, &method)) { 1050 switch (method.opcode) { 1051 case kIntrinsicAbsDouble: 1052 store_method_addr_ = true; 1053 break; 1054 default: 1055 break; 1056 } 1057 } 1058 } 1059} 1060} // namespace art 1061