utility_x86.cc revision 60d7a65f7fb60f502160a2e479e86014c7787553
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "codegen_x86.h" 18#include "dex/quick/mir_to_lir-inl.h" 19#include "dex/dataflow_iterator-inl.h" 20#include "x86_lir.h" 21 22namespace art { 23 24/* This file contains codegen for the X86 ISA */ 25 26LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) { 27 int opcode; 28 /* must be both DOUBLE or both not DOUBLE */ 29 DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src)); 30 if (X86_DOUBLEREG(r_dest)) { 31 opcode = kX86MovsdRR; 32 } else { 33 if (X86_SINGLEREG(r_dest)) { 34 if (X86_SINGLEREG(r_src)) { 35 opcode = kX86MovssRR; 36 } else { // Fpr <- Gpr 37 opcode = kX86MovdxrRR; 38 } 39 } else { // Gpr <- Fpr 40 DCHECK(X86_SINGLEREG(r_src)); 41 opcode = kX86MovdrxRR; 42 } 43 } 44 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL); 45 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src); 46 if (r_dest == r_src) { 47 res->flags.is_nop = true; 48 } 49 return res; 50} 51 52bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { 53 return true; 54} 55 56bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { 57 return false; 58} 59 60bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { 61 return true; 62} 63 64bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) { 65 return value == 0; 66} 67 68/* 69 * Load a immediate using a shortcut if possible; otherwise 70 * grab from the per-translation literal pool. If target is 71 * a high register, build constant into a low register and copy. 72 * 73 * No additional register clobbering operation performed. Use this version when 74 * 1) r_dest is freshly returned from AllocTemp or 75 * 2) The codegen is under fixed register usage 76 */ 77LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) { 78 int r_dest_save = r_dest; 79 if (X86_FPREG(r_dest)) { 80 if (value == 0) { 81 return NewLIR2(kX86XorpsRR, r_dest, r_dest); 82 } 83 DCHECK(X86_SINGLEREG(r_dest)); 84 r_dest = AllocTemp(); 85 } 86 87 LIR *res; 88 if (value == 0) { 89 res = NewLIR2(kX86Xor32RR, r_dest, r_dest); 90 } else { 91 // Note, there is no byte immediate form of a 32 bit immediate move. 92 res = NewLIR2(kX86Mov32RI, r_dest, value); 93 } 94 95 if (X86_FPREG(r_dest_save)) { 96 NewLIR2(kX86MovdxrRR, r_dest_save, r_dest); 97 FreeTemp(r_dest); 98 } 99 100 return res; 101} 102 103LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { 104 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/); 105 res->target = target; 106 return res; 107} 108 109LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { 110 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, 111 X86ConditionEncoding(cc)); 112 branch->target = target; 113 return branch; 114} 115 116LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) { 117 X86OpCode opcode = kX86Bkpt; 118 switch (op) { 119 case kOpNeg: opcode = kX86Neg32R; break; 120 case kOpNot: opcode = kX86Not32R; break; 121 case kOpRev: opcode = kX86Bswap32R; break; 122 case kOpBlx: opcode = kX86CallR; break; 123 default: 124 LOG(FATAL) << "Bad case in OpReg " << op; 125 } 126 return NewLIR1(opcode, r_dest_src); 127} 128 129LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { 130 X86OpCode opcode = kX86Bkpt; 131 bool byte_imm = IS_SIMM8(value); 132 DCHECK(!X86_FPREG(r_dest_src1)); 133 switch (op) { 134 case kOpLsl: opcode = kX86Sal32RI; break; 135 case kOpLsr: opcode = kX86Shr32RI; break; 136 case kOpAsr: opcode = kX86Sar32RI; break; 137 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break; 138 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break; 139 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break; 140 // case kOpSbb: opcode = kX86Sbb32RI; break; 141 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break; 142 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break; 143 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break; 144 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break; 145 case kOpMov: 146 /* 147 * Moving the constant zero into register can be specialized as an xor of the register. 148 * However, that sets eflags while the move does not. For that reason here, always do 149 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead. 150 */ 151 opcode = kX86Mov32RI; 152 break; 153 case kOpMul: 154 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI; 155 return NewLIR3(opcode, r_dest_src1, r_dest_src1, value); 156 default: 157 LOG(FATAL) << "Bad case in OpRegImm " << op; 158 } 159 return NewLIR2(opcode, r_dest_src1, value); 160} 161 162LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { 163 X86OpCode opcode = kX86Nop; 164 bool src2_must_be_cx = false; 165 switch (op) { 166 // X86 unary opcodes 167 case kOpMvn: 168 OpRegCopy(r_dest_src1, r_src2); 169 return OpReg(kOpNot, r_dest_src1); 170 case kOpNeg: 171 OpRegCopy(r_dest_src1, r_src2); 172 return OpReg(kOpNeg, r_dest_src1); 173 case kOpRev: 174 OpRegCopy(r_dest_src1, r_src2); 175 return OpReg(kOpRev, r_dest_src1); 176 case kOpRevsh: 177 OpRegCopy(r_dest_src1, r_src2); 178 OpReg(kOpRev, r_dest_src1); 179 return OpRegImm(kOpAsr, r_dest_src1, 16); 180 // X86 binary opcodes 181 case kOpSub: opcode = kX86Sub32RR; break; 182 case kOpSbc: opcode = kX86Sbb32RR; break; 183 case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break; 184 case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break; 185 case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break; 186 case kOpMov: opcode = kX86Mov32RR; break; 187 case kOpCmp: opcode = kX86Cmp32RR; break; 188 case kOpAdd: opcode = kX86Add32RR; break; 189 case kOpAdc: opcode = kX86Adc32RR; break; 190 case kOpAnd: opcode = kX86And32RR; break; 191 case kOpOr: opcode = kX86Or32RR; break; 192 case kOpXor: opcode = kX86Xor32RR; break; 193 case kOp2Byte: 194 // Use shifts instead of a byte operand if the source can't be byte accessed. 195 if (r_src2 >= 4) { 196 NewLIR2(kX86Mov32RR, r_dest_src1, r_src2); 197 NewLIR2(kX86Sal32RI, r_dest_src1, 24); 198 return NewLIR2(kX86Sar32RI, r_dest_src1, 24); 199 } else { 200 opcode = kX86Movsx8RR; 201 } 202 break; 203 case kOp2Short: opcode = kX86Movsx16RR; break; 204 case kOp2Char: opcode = kX86Movzx16RR; break; 205 case kOpMul: opcode = kX86Imul32RR; break; 206 default: 207 LOG(FATAL) << "Bad case in OpRegReg " << op; 208 break; 209 } 210 CHECK(!src2_must_be_cx || r_src2 == rCX); 211 return NewLIR2(opcode, r_dest_src1, r_src2); 212} 213 214LIR* X86Mir2Lir::OpMovRegMem(int r_dest, int r_base, int offset, MoveType move_type) { 215 DCHECK(!(X86_FPREG(r_base))); 216 217 X86OpCode opcode = kX86Nop; 218 switch (move_type) { 219 case kMov8GP: 220 CHECK(!X86_FPREG(r_dest)); 221 opcode = kX86Mov8RM; 222 break; 223 case kMov16GP: 224 CHECK(!X86_FPREG(r_dest)); 225 opcode = kX86Mov16RM; 226 break; 227 case kMov32GP: 228 CHECK(!X86_FPREG(r_dest)); 229 opcode = kX86Mov32RM; 230 break; 231 case kMov32FP: 232 CHECK(X86_FPREG(r_dest)); 233 opcode = kX86MovssRM; 234 break; 235 case kMov64FP: 236 CHECK(X86_FPREG(r_dest)); 237 opcode = kX86MovsdRM; 238 break; 239 case kMovU128FP: 240 CHECK(X86_FPREG(r_dest)); 241 opcode = kX86MovupsRM; 242 break; 243 case kMovA128FP: 244 CHECK(X86_FPREG(r_dest)); 245 opcode = kX86MovapsRM; 246 break; 247 case kMovLo128FP: 248 CHECK(X86_FPREG(r_dest)); 249 opcode = kX86MovlpsRM; 250 break; 251 case kMovHi128FP: 252 CHECK(X86_FPREG(r_dest)); 253 opcode = kX86MovhpsRM; 254 break; 255 case kMov64GP: 256 case kMovLo64FP: 257 case kMovHi64FP: 258 default: 259 LOG(FATAL) << "Bad case in OpMovRegMem"; 260 break; 261 } 262 263 return NewLIR3(opcode, r_dest, r_base, offset); 264} 265 266LIR* X86Mir2Lir::OpMovMemReg(int r_base, int offset, int r_src, MoveType move_type) { 267 DCHECK(!(X86_FPREG(r_base))); 268 269 X86OpCode opcode = kX86Nop; 270 switch (move_type) { 271 case kMov8GP: 272 CHECK(!X86_FPREG(r_src)); 273 opcode = kX86Mov8MR; 274 break; 275 case kMov16GP: 276 CHECK(!X86_FPREG(r_src)); 277 opcode = kX86Mov16MR; 278 break; 279 case kMov32GP: 280 CHECK(!X86_FPREG(r_src)); 281 opcode = kX86Mov32MR; 282 break; 283 case kMov32FP: 284 CHECK(X86_FPREG(r_src)); 285 opcode = kX86MovssMR; 286 break; 287 case kMov64FP: 288 CHECK(X86_FPREG(r_src)); 289 opcode = kX86MovsdMR; 290 break; 291 case kMovU128FP: 292 CHECK(X86_FPREG(r_src)); 293 opcode = kX86MovupsMR; 294 break; 295 case kMovA128FP: 296 CHECK(X86_FPREG(r_src)); 297 opcode = kX86MovapsMR; 298 break; 299 case kMovLo128FP: 300 CHECK(X86_FPREG(r_src)); 301 opcode = kX86MovlpsMR; 302 break; 303 case kMovHi128FP: 304 CHECK(X86_FPREG(r_src)); 305 opcode = kX86MovhpsMR; 306 break; 307 case kMov64GP: 308 case kMovLo64FP: 309 case kMovHi64FP: 310 default: 311 LOG(FATAL) << "Bad case in OpMovMemReg"; 312 break; 313 } 314 315 return NewLIR3(opcode, r_base, offset, r_src); 316} 317 318LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) { 319 // The only conditional reg to reg operation supported is Cmov 320 DCHECK_EQ(op, kOpCmov); 321 return NewLIR3(kX86Cmov32RRC, r_dest, r_src, X86ConditionEncoding(cc)); 322} 323 324LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, 325 int offset) { 326 X86OpCode opcode = kX86Nop; 327 switch (op) { 328 // X86 binary opcodes 329 case kOpSub: opcode = kX86Sub32RM; break; 330 case kOpMov: opcode = kX86Mov32RM; break; 331 case kOpCmp: opcode = kX86Cmp32RM; break; 332 case kOpAdd: opcode = kX86Add32RM; break; 333 case kOpAnd: opcode = kX86And32RM; break; 334 case kOpOr: opcode = kX86Or32RM; break; 335 case kOpXor: opcode = kX86Xor32RM; break; 336 case kOp2Byte: opcode = kX86Movsx8RM; break; 337 case kOp2Short: opcode = kX86Movsx16RM; break; 338 case kOp2Char: opcode = kX86Movzx16RM; break; 339 case kOpMul: 340 default: 341 LOG(FATAL) << "Bad case in OpRegMem " << op; 342 break; 343 } 344 LIR *l = NewLIR3(opcode, r_dest, rBase, offset); 345 if (rBase == rX86_SP) { 346 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */); 347 } 348 return l; 349} 350 351LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) { 352 DCHECK_NE(rl_dest.location, kLocPhysReg); 353 int displacement = SRegOffset(rl_dest.s_reg_low); 354 X86OpCode opcode = kX86Nop; 355 switch (op) { 356 case kOpSub: opcode = kX86Sub32MR; break; 357 case kOpMov: opcode = kX86Mov32MR; break; 358 case kOpCmp: opcode = kX86Cmp32MR; break; 359 case kOpAdd: opcode = kX86Add32MR; break; 360 case kOpAnd: opcode = kX86And32MR; break; 361 case kOpOr: opcode = kX86Or32MR; break; 362 case kOpXor: opcode = kX86Xor32MR; break; 363 case kOpLsl: opcode = kX86Sal32MC; break; 364 case kOpLsr: opcode = kX86Shr32MC; break; 365 case kOpAsr: opcode = kX86Sar32MC; break; 366 default: 367 LOG(FATAL) << "Bad case in OpMemReg " << op; 368 break; 369 } 370 LIR *l = NewLIR3(opcode, rX86_SP, displacement, r_value); 371 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, false /* is_64bit */); 372 return l; 373} 374 375LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, RegLocation rl_value) { 376 DCHECK_NE(rl_value.location, kLocPhysReg); 377 int displacement = SRegOffset(rl_value.s_reg_low); 378 X86OpCode opcode = kX86Nop; 379 switch (op) { 380 case kOpSub: opcode = kX86Sub32RM; break; 381 case kOpMov: opcode = kX86Mov32RM; break; 382 case kOpCmp: opcode = kX86Cmp32RM; break; 383 case kOpAdd: opcode = kX86Add32RM; break; 384 case kOpAnd: opcode = kX86And32RM; break; 385 case kOpOr: opcode = kX86Or32RM; break; 386 case kOpXor: opcode = kX86Xor32RM; break; 387 case kOpMul: opcode = kX86Imul32RM; break; 388 default: 389 LOG(FATAL) << "Bad case in OpRegMem " << op; 390 break; 391 } 392 LIR *l = NewLIR3(opcode, r_dest, rX86_SP, displacement); 393 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, false /* is_64bit */); 394 return l; 395} 396 397LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, 398 int r_src2) { 399 if (r_dest != r_src1 && r_dest != r_src2) { 400 if (op == kOpAdd) { // lea special case, except can't encode rbp as base 401 if (r_src1 == r_src2) { 402 OpRegCopy(r_dest, r_src1); 403 return OpRegImm(kOpLsl, r_dest, 1); 404 } else if (r_src1 != rBP) { 405 return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */, 406 r_src2 /* index */, 0 /* scale */, 0 /* disp */); 407 } else { 408 return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */, 409 r_src1 /* index */, 0 /* scale */, 0 /* disp */); 410 } 411 } else { 412 OpRegCopy(r_dest, r_src1); 413 return OpRegReg(op, r_dest, r_src2); 414 } 415 } else if (r_dest == r_src1) { 416 return OpRegReg(op, r_dest, r_src2); 417 } else { // r_dest == r_src2 418 switch (op) { 419 case kOpSub: // non-commutative 420 OpReg(kOpNeg, r_dest); 421 op = kOpAdd; 422 break; 423 case kOpSbc: 424 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: { 425 int t_reg = AllocTemp(); 426 OpRegCopy(t_reg, r_src1); 427 OpRegReg(op, t_reg, r_src2); 428 LIR* res = OpRegCopy(r_dest, t_reg); 429 FreeTemp(t_reg); 430 return res; 431 } 432 case kOpAdd: // commutative 433 case kOpOr: 434 case kOpAdc: 435 case kOpAnd: 436 case kOpXor: 437 break; 438 default: 439 LOG(FATAL) << "Bad case in OpRegRegReg " << op; 440 } 441 return OpRegReg(op, r_dest, r_src1); 442 } 443} 444 445LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src, 446 int value) { 447 if (op == kOpMul) { 448 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI; 449 return NewLIR3(opcode, r_dest, r_src, value); 450 } else if (op == kOpAnd) { 451 if (value == 0xFF && r_src < 4) { 452 return NewLIR2(kX86Movzx8RR, r_dest, r_src); 453 } else if (value == 0xFFFF) { 454 return NewLIR2(kX86Movzx16RR, r_dest, r_src); 455 } 456 } 457 if (r_dest != r_src) { 458 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case 459 // TODO: fix bug in LEA encoding when disp == 0 460 return NewLIR5(kX86Lea32RA, r_dest, r5sib_no_base /* base */, 461 r_src /* index */, value /* scale */, 0 /* disp */); 462 } else if (op == kOpAdd) { // lea add special case 463 return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */, 464 r4sib_no_index /* index */, 0 /* scale */, value /* disp */); 465 } 466 OpRegCopy(r_dest, r_src); 467 } 468 return OpRegImm(op, r_dest, value); 469} 470 471LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) { 472 X86OpCode opcode = kX86Bkpt; 473 switch (op) { 474 case kOpBlx: opcode = kX86CallT; break; 475 case kOpBx: opcode = kX86JmpT; break; 476 default: 477 LOG(FATAL) << "Bad opcode: " << op; 478 break; 479 } 480 return NewLIR1(opcode, thread_offset.Int32Value()); 481} 482 483LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) { 484 X86OpCode opcode = kX86Bkpt; 485 switch (op) { 486 case kOpBlx: opcode = kX86CallM; break; 487 default: 488 LOG(FATAL) << "Bad opcode: " << op; 489 break; 490 } 491 return NewLIR2(opcode, rBase, disp); 492} 493 494LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { 495 int32_t val_lo = Low32Bits(value); 496 int32_t val_hi = High32Bits(value); 497 LIR *res; 498 if (X86_FPREG(r_dest_lo)) { 499 DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi 500 DCHECK_EQ(r_dest_lo, r_dest_hi); 501 if (value == 0) { 502 return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo); 503 } else if (base_of_code_ != nullptr) { 504 // We will load the value from the literal area. 505 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); 506 if (data_target == NULL) { 507 data_target = AddWideData(&literal_list_, val_lo, val_hi); 508 } 509 510 // Address the start of the method 511 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 512 rl_method = LoadValue(rl_method, kCoreReg); 513 514 // Load the proper value from the literal area. 515 // We don't know the proper offset for the value, so pick one that will force 516 // 4 byte offset. We will fix this up in the assembler later to have the right 517 // value. 518 res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG); 519 res->target = data_target; 520 res->flags.fixup = kFixupLoad; 521 SetMemRefType(res, true, kLiteral); 522 store_method_addr_used_ = true; 523 } else { 524 if (val_lo == 0) { 525 res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo); 526 } else { 527 res = LoadConstantNoClobber(r_dest_lo, val_lo); 528 } 529 if (val_hi != 0) { 530 r_dest_hi = AllocTempDouble(); 531 LoadConstantNoClobber(r_dest_hi, val_hi); 532 NewLIR2(kX86PunpckldqRR, r_dest_lo, r_dest_hi); 533 FreeTemp(r_dest_hi); 534 } 535 } 536 } else { 537 res = LoadConstantNoClobber(r_dest_lo, val_lo); 538 LoadConstantNoClobber(r_dest_hi, val_hi); 539 } 540 return res; 541} 542 543LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, 544 int displacement, int r_dest, int r_dest_hi, OpSize size, 545 int s_reg) { 546 LIR *load = NULL; 547 LIR *load2 = NULL; 548 bool is_array = r_index != INVALID_REG; 549 bool pair = false; 550 bool is64bit = false; 551 X86OpCode opcode = kX86Nop; 552 switch (size) { 553 case kLong: 554 case kDouble: 555 is64bit = true; 556 if (X86_FPREG(r_dest)) { 557 opcode = is_array ? kX86MovsdRA : kX86MovsdRM; 558 } else { 559 pair = true; 560 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 561 } 562 // TODO: double store is to unaligned address 563 DCHECK_EQ((displacement & 0x3), 0); 564 break; 565 case kWord: 566 case kSingle: 567 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 568 if (X86_FPREG(r_dest)) { 569 opcode = is_array ? kX86MovssRA : kX86MovssRM; 570 DCHECK(X86_SINGLEREG(r_dest)); 571 } 572 DCHECK_EQ((displacement & 0x3), 0); 573 break; 574 case kUnsignedHalf: 575 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM; 576 DCHECK_EQ((displacement & 0x1), 0); 577 break; 578 case kSignedHalf: 579 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM; 580 DCHECK_EQ((displacement & 0x1), 0); 581 break; 582 case kUnsignedByte: 583 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM; 584 break; 585 case kSignedByte: 586 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM; 587 break; 588 default: 589 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody"; 590 } 591 592 if (!is_array) { 593 if (!pair) { 594 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET); 595 } else { 596 if (rBase == r_dest) { 597 load2 = NewLIR3(opcode, r_dest_hi, rBase, 598 displacement + HIWORD_OFFSET); 599 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET); 600 } else { 601 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET); 602 load2 = NewLIR3(opcode, r_dest_hi, rBase, 603 displacement + HIWORD_OFFSET); 604 } 605 } 606 if (rBase == rX86_SP) { 607 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 608 true /* is_load */, is64bit); 609 if (pair) { 610 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2, 611 true /* is_load */, is64bit); 612 } 613 } 614 } else { 615 if (!pair) { 616 load = NewLIR5(opcode, r_dest, rBase, r_index, scale, 617 displacement + LOWORD_OFFSET); 618 } else { 619 if (rBase == r_dest) { 620 if (r_dest_hi == r_index) { 621 // We can't use either register for the first load. 622 int temp = AllocTemp(); 623 load2 = NewLIR5(opcode, temp, rBase, r_index, scale, 624 displacement + HIWORD_OFFSET); 625 load = NewLIR5(opcode, r_dest, rBase, r_index, scale, 626 displacement + LOWORD_OFFSET); 627 OpRegCopy(r_dest_hi, temp); 628 FreeTemp(temp); 629 } else { 630 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale, 631 displacement + HIWORD_OFFSET); 632 load = NewLIR5(opcode, r_dest, rBase, r_index, scale, 633 displacement + LOWORD_OFFSET); 634 } 635 } else { 636 if (r_dest == r_index) { 637 // We can't use either register for the first load. 638 int temp = AllocTemp(); 639 load = NewLIR5(opcode, temp, rBase, r_index, scale, 640 displacement + LOWORD_OFFSET); 641 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale, 642 displacement + HIWORD_OFFSET); 643 OpRegCopy(r_dest, temp); 644 FreeTemp(temp); 645 } else { 646 load = NewLIR5(opcode, r_dest, rBase, r_index, scale, 647 displacement + LOWORD_OFFSET); 648 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale, 649 displacement + HIWORD_OFFSET); 650 } 651 } 652 } 653 } 654 655 return load; 656} 657 658/* Load value from base + scaled index. */ 659LIR* X86Mir2Lir::LoadBaseIndexed(int rBase, 660 int r_index, int r_dest, int scale, OpSize size) { 661 return LoadBaseIndexedDisp(rBase, r_index, scale, 0, 662 r_dest, INVALID_REG, size, INVALID_SREG); 663} 664 665LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement, 666 int r_dest, OpSize size, int s_reg) { 667 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, 668 r_dest, INVALID_REG, size, s_reg); 669} 670 671LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement, 672 int r_dest_lo, int r_dest_hi, int s_reg) { 673 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, 674 r_dest_lo, r_dest_hi, kLong, s_reg); 675} 676 677LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, 678 int displacement, int r_src, int r_src_hi, OpSize size, 679 int s_reg) { 680 LIR *store = NULL; 681 LIR *store2 = NULL; 682 bool is_array = r_index != INVALID_REG; 683 bool pair = false; 684 bool is64bit = false; 685 X86OpCode opcode = kX86Nop; 686 switch (size) { 687 case kLong: 688 case kDouble: 689 is64bit = true; 690 if (X86_FPREG(r_src)) { 691 opcode = is_array ? kX86MovsdAR : kX86MovsdMR; 692 } else { 693 pair = true; 694 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 695 } 696 // TODO: double store is to unaligned address 697 DCHECK_EQ((displacement & 0x3), 0); 698 break; 699 case kWord: 700 case kSingle: 701 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 702 if (X86_FPREG(r_src)) { 703 opcode = is_array ? kX86MovssAR : kX86MovssMR; 704 DCHECK(X86_SINGLEREG(r_src)); 705 } 706 DCHECK_EQ((displacement & 0x3), 0); 707 break; 708 case kUnsignedHalf: 709 case kSignedHalf: 710 opcode = is_array ? kX86Mov16AR : kX86Mov16MR; 711 DCHECK_EQ((displacement & 0x1), 0); 712 break; 713 case kUnsignedByte: 714 case kSignedByte: 715 opcode = is_array ? kX86Mov8AR : kX86Mov8MR; 716 break; 717 default: 718 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody"; 719 } 720 721 if (!is_array) { 722 if (!pair) { 723 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src); 724 } else { 725 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src); 726 store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi); 727 } 728 if (rBase == rX86_SP) { 729 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 730 false /* is_load */, is64bit); 731 if (pair) { 732 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2, 733 false /* is_load */, is64bit); 734 } 735 } 736 } else { 737 if (!pair) { 738 store = NewLIR5(opcode, rBase, r_index, scale, 739 displacement + LOWORD_OFFSET, r_src); 740 } else { 741 store = NewLIR5(opcode, rBase, r_index, scale, 742 displacement + LOWORD_OFFSET, r_src); 743 store2 = NewLIR5(opcode, rBase, r_index, scale, 744 displacement + HIWORD_OFFSET, r_src_hi); 745 } 746 } 747 748 return store; 749} 750 751/* store value base base + scaled index. */ 752LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, 753 int scale, OpSize size) { 754 return StoreBaseIndexedDisp(rBase, r_index, scale, 0, 755 r_src, INVALID_REG, size, INVALID_SREG); 756} 757 758LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement, 759 int r_src, OpSize size) { 760 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, 761 displacement, r_src, INVALID_REG, size, 762 INVALID_SREG); 763} 764 765LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement, 766 int r_src_lo, int r_src_hi) { 767 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement, 768 r_src_lo, r_src_hi, kLong, INVALID_SREG); 769} 770 771/* 772 * Copy a long value in Core registers to an XMM register 773 * 774 */ 775void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) { 776 NewLIR2(kX86MovdxrRR, fp_reg, low_reg); 777 int tmp_reg = AllocTempDouble(); 778 NewLIR2(kX86MovdxrRR, tmp_reg, high_reg); 779 NewLIR2(kX86PunpckldqRR, fp_reg, tmp_reg); 780 FreeTemp(tmp_reg); 781} 782 783LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg, 784 int offset, int check_value, LIR* target) { 785 NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg, offset, 786 check_value); 787 LIR* branch = OpCondBranch(cond, target); 788 return branch; 789} 790 791void X86Mir2Lir::AnalyzeMIR() { 792 // Assume we don't need a pointer to the base of the code. 793 cu_->NewTimingSplit("X86 MIR Analysis"); 794 store_method_addr_ = false; 795 796 // Walk the MIR looking for interesting items. 797 PreOrderDfsIterator iter(mir_graph_); 798 BasicBlock* curr_bb = iter.Next(); 799 while (curr_bb != NULL) { 800 AnalyzeBB(curr_bb); 801 curr_bb = iter.Next(); 802 } 803 804 // Did we need a pointer to the method code? 805 if (store_method_addr_) { 806 base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, false); 807 } else { 808 base_of_code_ = nullptr; 809 } 810} 811 812void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) { 813 if (bb->block_type == kDead) { 814 // Ignore dead blocks 815 return; 816 } 817 818 for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { 819 int opcode = mir->dalvikInsn.opcode; 820 if (opcode >= kMirOpFirst) { 821 AnalyzeExtendedMIR(opcode, bb, mir); 822 } else { 823 AnalyzeMIR(opcode, bb, mir); 824 } 825 } 826} 827 828 829void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) { 830 switch (opcode) { 831 // Instructions referencing doubles. 832 case kMirOpFusedCmplDouble: 833 case kMirOpFusedCmpgDouble: 834 AnalyzeFPInstruction(opcode, bb, mir); 835 break; 836 default: 837 // Ignore the rest. 838 break; 839 } 840} 841 842void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) { 843 // Looking for 844 // - Do we need a pointer to the code (used for packed switches and double lits)? 845 846 switch (opcode) { 847 // Instructions referencing doubles. 848 case Instruction::CMPL_DOUBLE: 849 case Instruction::CMPG_DOUBLE: 850 case Instruction::NEG_DOUBLE: 851 case Instruction::ADD_DOUBLE: 852 case Instruction::SUB_DOUBLE: 853 case Instruction::MUL_DOUBLE: 854 case Instruction::DIV_DOUBLE: 855 case Instruction::REM_DOUBLE: 856 case Instruction::ADD_DOUBLE_2ADDR: 857 case Instruction::SUB_DOUBLE_2ADDR: 858 case Instruction::MUL_DOUBLE_2ADDR: 859 case Instruction::DIV_DOUBLE_2ADDR: 860 case Instruction::REM_DOUBLE_2ADDR: 861 AnalyzeFPInstruction(opcode, bb, mir); 862 break; 863 864 // Packed switches and array fills need a pointer to the base of the method. 865 case Instruction::FILL_ARRAY_DATA: 866 case Instruction::PACKED_SWITCH: 867 store_method_addr_ = true; 868 break; 869 default: 870 // Other instructions are not interesting yet. 871 break; 872 } 873} 874 875void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) { 876 // Look at all the uses, and see if they are double constants. 877 uint64_t attrs = mir_graph_->oat_data_flow_attributes_[opcode]; 878 int next_sreg = 0; 879 if (attrs & DF_UA) { 880 if (attrs & DF_A_WIDE) { 881 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 882 next_sreg += 2; 883 } else { 884 next_sreg++; 885 } 886 } 887 if (attrs & DF_UB) { 888 if (attrs & DF_B_WIDE) { 889 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 890 next_sreg += 2; 891 } else { 892 next_sreg++; 893 } 894 } 895 if (attrs & DF_UC) { 896 if (attrs & DF_C_WIDE) { 897 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 898 } 899 } 900} 901 902void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) { 903 // If this is a double literal, we will want it in the literal pool. 904 if (use.is_const) { 905 store_method_addr_ = true; 906 } 907} 908 909} // namespace art 910