utility_x86.cc revision 2700f7e1edbcd2518f4978e4cd0e05a4149f91b6
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "codegen_x86.h" 18#include "dex/quick/mir_to_lir-inl.h" 19#include "dex/dataflow_iterator-inl.h" 20#include "x86_lir.h" 21 22namespace art { 23 24/* This file contains codegen for the X86 ISA */ 25 26LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { 27 int opcode; 28 /* must be both DOUBLE or both not DOUBLE */ 29 DCHECK_EQ(X86_DOUBLEREG(r_dest.GetReg()), X86_DOUBLEREG(r_src.GetReg())); 30 if (X86_DOUBLEREG(r_dest.GetReg())) { 31 opcode = kX86MovsdRR; 32 } else { 33 if (X86_SINGLEREG(r_dest.GetReg())) { 34 if (X86_SINGLEREG(r_src.GetReg())) { 35 opcode = kX86MovssRR; 36 } else { // Fpr <- Gpr 37 opcode = kX86MovdxrRR; 38 } 39 } else { // Gpr <- Fpr 40 DCHECK(X86_SINGLEREG(r_src.GetReg())); 41 opcode = kX86MovdrxRR; 42 } 43 } 44 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL); 45 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); 46 if (r_dest == r_src) { 47 res->flags.is_nop = true; 48 } 49 return res; 50} 51 52bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) { 53 return true; 54} 55 56bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) { 57 return false; 58} 59 60bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) { 61 return true; 62} 63 64bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) { 65 return value == 0; 66} 67 68/* 69 * Load a immediate using a shortcut if possible; otherwise 70 * grab from the per-translation literal pool. If target is 71 * a high register, build constant into a low register and copy. 72 * 73 * No additional register clobbering operation performed. Use this version when 74 * 1) r_dest is freshly returned from AllocTemp or 75 * 2) The codegen is under fixed register usage 76 */ 77LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) { 78 RegStorage r_dest_save = r_dest; 79 if (X86_FPREG(r_dest.GetReg())) { 80 if (value == 0) { 81 return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg()); 82 } 83 DCHECK(X86_SINGLEREG(r_dest.GetReg())); 84 r_dest = AllocTemp(); 85 } 86 87 LIR *res; 88 if (value == 0) { 89 res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg()); 90 } else { 91 // Note, there is no byte immediate form of a 32 bit immediate move. 92 res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value); 93 } 94 95 if (X86_FPREG(r_dest_save.GetReg())) { 96 NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg()); 97 FreeTemp(r_dest); 98 } 99 100 return res; 101} 102 103LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) { 104 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/); 105 res->target = target; 106 return res; 107} 108 109LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { 110 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */, 111 X86ConditionEncoding(cc)); 112 branch->target = target; 113 return branch; 114} 115 116LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) { 117 X86OpCode opcode = kX86Bkpt; 118 switch (op) { 119 case kOpNeg: opcode = kX86Neg32R; break; 120 case kOpNot: opcode = kX86Not32R; break; 121 case kOpRev: opcode = kX86Bswap32R; break; 122 case kOpBlx: opcode = kX86CallR; break; 123 default: 124 LOG(FATAL) << "Bad case in OpReg " << op; 125 } 126 return NewLIR1(opcode, r_dest_src.GetReg()); 127} 128 129LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) { 130 X86OpCode opcode = kX86Bkpt; 131 bool byte_imm = IS_SIMM8(value); 132 DCHECK(!X86_FPREG(r_dest_src1.GetReg())); 133 switch (op) { 134 case kOpLsl: opcode = kX86Sal32RI; break; 135 case kOpLsr: opcode = kX86Shr32RI; break; 136 case kOpAsr: opcode = kX86Sar32RI; break; 137 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break; 138 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break; 139 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break; 140 // case kOpSbb: opcode = kX86Sbb32RI; break; 141 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break; 142 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break; 143 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break; 144 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break; 145 case kOpMov: 146 /* 147 * Moving the constant zero into register can be specialized as an xor of the register. 148 * However, that sets eflags while the move does not. For that reason here, always do 149 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead. 150 */ 151 opcode = kX86Mov32RI; 152 break; 153 case kOpMul: 154 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI; 155 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value); 156 default: 157 LOG(FATAL) << "Bad case in OpRegImm " << op; 158 } 159 return NewLIR2(opcode, r_dest_src1.GetReg(), value); 160} 161 162LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) { 163 X86OpCode opcode = kX86Nop; 164 bool src2_must_be_cx = false; 165 switch (op) { 166 // X86 unary opcodes 167 case kOpMvn: 168 OpRegCopy(r_dest_src1, r_src2); 169 return OpReg(kOpNot, r_dest_src1); 170 case kOpNeg: 171 OpRegCopy(r_dest_src1, r_src2); 172 return OpReg(kOpNeg, r_dest_src1); 173 case kOpRev: 174 OpRegCopy(r_dest_src1, r_src2); 175 return OpReg(kOpRev, r_dest_src1); 176 case kOpRevsh: 177 OpRegCopy(r_dest_src1, r_src2); 178 OpReg(kOpRev, r_dest_src1); 179 return OpRegImm(kOpAsr, r_dest_src1, 16); 180 // X86 binary opcodes 181 case kOpSub: opcode = kX86Sub32RR; break; 182 case kOpSbc: opcode = kX86Sbb32RR; break; 183 case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break; 184 case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break; 185 case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break; 186 case kOpMov: opcode = kX86Mov32RR; break; 187 case kOpCmp: opcode = kX86Cmp32RR; break; 188 case kOpAdd: opcode = kX86Add32RR; break; 189 case kOpAdc: opcode = kX86Adc32RR; break; 190 case kOpAnd: opcode = kX86And32RR; break; 191 case kOpOr: opcode = kX86Or32RR; break; 192 case kOpXor: opcode = kX86Xor32RR; break; 193 case kOp2Byte: 194 // Use shifts instead of a byte operand if the source can't be byte accessed. 195 if (r_src2.GetReg() >= 4) { 196 NewLIR2(kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg()); 197 NewLIR2(kX86Sal32RI, r_dest_src1.GetReg(), 24); 198 return NewLIR2(kX86Sar32RI, r_dest_src1.GetReg(), 24); 199 } else { 200 opcode = kX86Movsx8RR; 201 } 202 break; 203 case kOp2Short: opcode = kX86Movsx16RR; break; 204 case kOp2Char: opcode = kX86Movzx16RR; break; 205 case kOpMul: opcode = kX86Imul32RR; break; 206 default: 207 LOG(FATAL) << "Bad case in OpRegReg " << op; 208 break; 209 } 210 CHECK(!src2_must_be_cx || r_src2.GetReg() == rCX); 211 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg()); 212} 213 214LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) { 215 DCHECK(!(X86_FPREG(r_base.GetReg()))); 216 X86OpCode opcode = kX86Nop; 217 int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); 218 switch (move_type) { 219 case kMov8GP: 220 CHECK(!X86_FPREG(dest)); 221 opcode = kX86Mov8RM; 222 break; 223 case kMov16GP: 224 CHECK(!X86_FPREG(dest)); 225 opcode = kX86Mov16RM; 226 break; 227 case kMov32GP: 228 CHECK(!X86_FPREG(dest)); 229 opcode = kX86Mov32RM; 230 break; 231 case kMov32FP: 232 CHECK(X86_FPREG(dest)); 233 opcode = kX86MovssRM; 234 break; 235 case kMov64FP: 236 CHECK(X86_FPREG(dest)); 237 opcode = kX86MovsdRM; 238 break; 239 case kMovU128FP: 240 CHECK(X86_FPREG(dest)); 241 opcode = kX86MovupsRM; 242 break; 243 case kMovA128FP: 244 CHECK(X86_FPREG(dest)); 245 opcode = kX86MovapsRM; 246 break; 247 case kMovLo128FP: 248 CHECK(X86_FPREG(dest)); 249 opcode = kX86MovlpsRM; 250 break; 251 case kMovHi128FP: 252 CHECK(X86_FPREG(dest)); 253 opcode = kX86MovhpsRM; 254 break; 255 case kMov64GP: 256 case kMovLo64FP: 257 case kMovHi64FP: 258 default: 259 LOG(FATAL) << "Bad case in OpMovRegMem"; 260 break; 261 } 262 263 return NewLIR3(opcode, dest, r_base.GetReg(), offset); 264} 265 266LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { 267 DCHECK(!(X86_FPREG(r_base.GetReg()))); 268 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg(); 269 270 X86OpCode opcode = kX86Nop; 271 switch (move_type) { 272 case kMov8GP: 273 CHECK(!X86_FPREG(src)); 274 opcode = kX86Mov8MR; 275 break; 276 case kMov16GP: 277 CHECK(!X86_FPREG(src)); 278 opcode = kX86Mov16MR; 279 break; 280 case kMov32GP: 281 CHECK(!X86_FPREG(src)); 282 opcode = kX86Mov32MR; 283 break; 284 case kMov32FP: 285 CHECK(X86_FPREG(src)); 286 opcode = kX86MovssMR; 287 break; 288 case kMov64FP: 289 CHECK(X86_FPREG(src)); 290 opcode = kX86MovsdMR; 291 break; 292 case kMovU128FP: 293 CHECK(X86_FPREG(src)); 294 opcode = kX86MovupsMR; 295 break; 296 case kMovA128FP: 297 CHECK(X86_FPREG(src)); 298 opcode = kX86MovapsMR; 299 break; 300 case kMovLo128FP: 301 CHECK(X86_FPREG(src)); 302 opcode = kX86MovlpsMR; 303 break; 304 case kMovHi128FP: 305 CHECK(X86_FPREG(src)); 306 opcode = kX86MovhpsMR; 307 break; 308 case kMov64GP: 309 case kMovLo64FP: 310 case kMovHi64FP: 311 default: 312 LOG(FATAL) << "Bad case in OpMovMemReg"; 313 break; 314 } 315 316 return NewLIR3(opcode, r_base.GetReg(), offset, src); 317} 318 319LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { 320 // The only conditional reg to reg operation supported is Cmov 321 DCHECK_EQ(op, kOpCmov); 322 return NewLIR3(kX86Cmov32RRC, r_dest.GetReg(), r_src.GetReg(), X86ConditionEncoding(cc)); 323} 324 325LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) { 326 X86OpCode opcode = kX86Nop; 327 switch (op) { 328 // X86 binary opcodes 329 case kOpSub: opcode = kX86Sub32RM; break; 330 case kOpMov: opcode = kX86Mov32RM; break; 331 case kOpCmp: opcode = kX86Cmp32RM; break; 332 case kOpAdd: opcode = kX86Add32RM; break; 333 case kOpAnd: opcode = kX86And32RM; break; 334 case kOpOr: opcode = kX86Or32RM; break; 335 case kOpXor: opcode = kX86Xor32RM; break; 336 case kOp2Byte: opcode = kX86Movsx8RM; break; 337 case kOp2Short: opcode = kX86Movsx16RM; break; 338 case kOp2Char: opcode = kX86Movzx16RM; break; 339 case kOpMul: 340 default: 341 LOG(FATAL) << "Bad case in OpRegMem " << op; 342 break; 343 } 344 LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset); 345 if (r_base == rs_rX86_SP) { 346 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */); 347 } 348 return l; 349} 350 351LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) { 352 DCHECK_NE(rl_dest.location, kLocPhysReg); 353 int displacement = SRegOffset(rl_dest.s_reg_low); 354 X86OpCode opcode = kX86Nop; 355 switch (op) { 356 case kOpSub: opcode = kX86Sub32MR; break; 357 case kOpMov: opcode = kX86Mov32MR; break; 358 case kOpCmp: opcode = kX86Cmp32MR; break; 359 case kOpAdd: opcode = kX86Add32MR; break; 360 case kOpAnd: opcode = kX86And32MR; break; 361 case kOpOr: opcode = kX86Or32MR; break; 362 case kOpXor: opcode = kX86Xor32MR; break; 363 case kOpLsl: opcode = kX86Sal32MC; break; 364 case kOpLsr: opcode = kX86Shr32MC; break; 365 case kOpAsr: opcode = kX86Sar32MC; break; 366 default: 367 LOG(FATAL) << "Bad case in OpMemReg " << op; 368 break; 369 } 370 LIR *l = NewLIR3(opcode, rX86_SP, displacement, r_value); 371 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, false /* is_64bit */); 372 return l; 373} 374 375LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) { 376 DCHECK_NE(rl_value.location, kLocPhysReg); 377 int displacement = SRegOffset(rl_value.s_reg_low); 378 X86OpCode opcode = kX86Nop; 379 switch (op) { 380 case kOpSub: opcode = kX86Sub32RM; break; 381 case kOpMov: opcode = kX86Mov32RM; break; 382 case kOpCmp: opcode = kX86Cmp32RM; break; 383 case kOpAdd: opcode = kX86Add32RM; break; 384 case kOpAnd: opcode = kX86And32RM; break; 385 case kOpOr: opcode = kX86Or32RM; break; 386 case kOpXor: opcode = kX86Xor32RM; break; 387 case kOpMul: opcode = kX86Imul32RM; break; 388 default: 389 LOG(FATAL) << "Bad case in OpRegMem " << op; 390 break; 391 } 392 LIR *l = NewLIR3(opcode, r_dest.GetReg(), rX86_SP, displacement); 393 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, false /* is_64bit */); 394 return l; 395} 396 397LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 398 RegStorage r_src2) { 399 if (r_dest != r_src1 && r_dest != r_src2) { 400 if (op == kOpAdd) { // lea special case, except can't encode rbp as base 401 if (r_src1 == r_src2) { 402 OpRegCopy(r_dest, r_src1); 403 return OpRegImm(kOpLsl, r_dest, 1); 404 } else if (r_src1 != rs_rBP) { 405 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src1.GetReg() /* base */, 406 r_src2.GetReg() /* index */, 0 /* scale */, 0 /* disp */); 407 } else { 408 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src2.GetReg() /* base */, 409 r_src1.GetReg() /* index */, 0 /* scale */, 0 /* disp */); 410 } 411 } else { 412 OpRegCopy(r_dest, r_src1); 413 return OpRegReg(op, r_dest, r_src2); 414 } 415 } else if (r_dest == r_src1) { 416 return OpRegReg(op, r_dest, r_src2); 417 } else { // r_dest == r_src2 418 switch (op) { 419 case kOpSub: // non-commutative 420 OpReg(kOpNeg, r_dest); 421 op = kOpAdd; 422 break; 423 case kOpSbc: 424 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: { 425 RegStorage t_reg = AllocTemp(); 426 OpRegCopy(t_reg, r_src1); 427 OpRegReg(op, t_reg, r_src2); 428 LIR* res = OpRegCopy(r_dest, t_reg); 429 FreeTemp(t_reg); 430 return res; 431 } 432 case kOpAdd: // commutative 433 case kOpOr: 434 case kOpAdc: 435 case kOpAnd: 436 case kOpXor: 437 break; 438 default: 439 LOG(FATAL) << "Bad case in OpRegRegReg " << op; 440 } 441 return OpRegReg(op, r_dest, r_src1); 442 } 443} 444 445LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) { 446 if (op == kOpMul) { 447 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI; 448 return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value); 449 } else if (op == kOpAnd) { 450 if (value == 0xFF && r_src.GetReg() < 4) { 451 return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg()); 452 } else if (value == 0xFFFF) { 453 return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg()); 454 } 455 } 456 if (r_dest != r_src) { 457 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case 458 // TODO: fix bug in LEA encoding when disp == 0 459 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */, 460 r_src.GetReg() /* index */, value /* scale */, 0 /* disp */); 461 } else if (op == kOpAdd) { // lea add special case 462 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r_src.GetReg() /* base */, 463 r4sib_no_index /* index */, 0 /* scale */, value /* disp */); 464 } 465 OpRegCopy(r_dest, r_src); 466 } 467 return OpRegImm(op, r_dest, value); 468} 469 470LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) { 471 X86OpCode opcode = kX86Bkpt; 472 switch (op) { 473 case kOpBlx: opcode = kX86CallT; break; 474 case kOpBx: opcode = kX86JmpT; break; 475 default: 476 LOG(FATAL) << "Bad opcode: " << op; 477 break; 478 } 479 return NewLIR1(opcode, thread_offset.Int32Value()); 480} 481 482LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { 483 X86OpCode opcode = kX86Bkpt; 484 switch (op) { 485 case kOpBlx: opcode = kX86CallM; break; 486 default: 487 LOG(FATAL) << "Bad opcode: " << op; 488 break; 489 } 490 return NewLIR2(opcode, r_base.GetReg(), disp); 491} 492 493LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { 494 int32_t val_lo = Low32Bits(value); 495 int32_t val_hi = High32Bits(value); 496 int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg(); 497 LIR *res; 498 bool is_fp = X86_FPREG(low_reg_val); 499 // TODO: clean this up once we fully recognize 64-bit storage containers. 500 if (is_fp) { 501 if (value == 0) { 502 return NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val); 503 } else if (base_of_code_ != nullptr) { 504 // We will load the value from the literal area. 505 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); 506 if (data_target == NULL) { 507 data_target = AddWideData(&literal_list_, val_lo, val_hi); 508 } 509 510 // Address the start of the method 511 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); 512 rl_method = LoadValue(rl_method, kCoreReg); 513 514 // Load the proper value from the literal area. 515 // We don't know the proper offset for the value, so pick one that will force 516 // 4 byte offset. We will fix this up in the assembler later to have the right 517 // value. 518 res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::Solo64(low_reg_val), 519 kDouble, INVALID_SREG); 520 res->target = data_target; 521 res->flags.fixup = kFixupLoad; 522 SetMemRefType(res, true, kLiteral); 523 store_method_addr_used_ = true; 524 } else { 525 if (val_lo == 0) { 526 res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val); 527 } else { 528 res = LoadConstantNoClobber(RegStorage::Solo32(low_reg_val), val_lo); 529 } 530 if (val_hi != 0) { 531 // FIXME: clean up when AllocTempDouble no longer returns a pair. 532 RegStorage r_dest_hi = AllocTempDouble(); 533 LoadConstantNoClobber(RegStorage::Solo32(r_dest_hi.GetLowReg()), val_hi); 534 NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetLowReg()); 535 FreeTemp(r_dest_hi); 536 } 537 } 538 } else { 539 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo); 540 LoadConstantNoClobber(r_dest.GetHigh(), val_hi); 541 } 542 return res; 543} 544 545// FIXME: don't split r_dest into two storage units. 546LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 547 int displacement, RegStorage r_dest, RegStorage r_dest_hi, 548 OpSize size, int s_reg) { 549 LIR *load = NULL; 550 LIR *load2 = NULL; 551 bool is_array = r_index.Valid(); 552 bool pair = false; 553 bool is64bit = false; 554 X86OpCode opcode = kX86Nop; 555 switch (size) { 556 case kLong: 557 case kDouble: 558 // TODO: use regstorage attributes here. 559 is64bit = true; 560 if (X86_FPREG(r_dest.GetReg())) { 561 opcode = is_array ? kX86MovsdRA : kX86MovsdRM; 562 } else { 563 pair = true; 564 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 565 } 566 // TODO: double store is to unaligned address 567 DCHECK_EQ((displacement & 0x3), 0); 568 break; 569 case kWord: 570 case kSingle: 571 opcode = is_array ? kX86Mov32RA : kX86Mov32RM; 572 if (X86_FPREG(r_dest.GetReg())) { 573 opcode = is_array ? kX86MovssRA : kX86MovssRM; 574 DCHECK(X86_SINGLEREG(r_dest.GetReg())); 575 } 576 DCHECK_EQ((displacement & 0x3), 0); 577 break; 578 case kUnsignedHalf: 579 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM; 580 DCHECK_EQ((displacement & 0x1), 0); 581 break; 582 case kSignedHalf: 583 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM; 584 DCHECK_EQ((displacement & 0x1), 0); 585 break; 586 case kUnsignedByte: 587 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM; 588 break; 589 case kSignedByte: 590 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM; 591 break; 592 default: 593 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody"; 594 } 595 596 if (!is_array) { 597 if (!pair) { 598 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 599 } else { 600 if (r_base == r_dest) { 601 load2 = NewLIR3(opcode, r_dest_hi.GetReg(), r_base.GetReg(), 602 displacement + HIWORD_OFFSET); 603 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 604 } else { 605 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET); 606 load2 = NewLIR3(opcode, r_dest_hi.GetReg(), r_base.GetReg(), 607 displacement + HIWORD_OFFSET); 608 } 609 } 610 if (r_base == rs_rX86_SP) { 611 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 612 true /* is_load */, is64bit); 613 if (pair) { 614 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2, 615 true /* is_load */, is64bit); 616 } 617 } 618 } else { 619 if (!pair) { 620 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 621 displacement + LOWORD_OFFSET); 622 } else { 623 if (r_base == r_dest) { 624 if (r_dest_hi == r_index) { 625 // We can't use either register for the first load. 626 RegStorage temp = AllocTemp(); 627 load2 = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 628 displacement + HIWORD_OFFSET); 629 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 630 displacement + LOWORD_OFFSET); 631 OpRegCopy(r_dest_hi, temp); 632 FreeTemp(temp); 633 } else { 634 load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 635 displacement + HIWORD_OFFSET); 636 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 637 displacement + LOWORD_OFFSET); 638 } 639 } else { 640 if (r_dest == r_index) { 641 // We can't use either register for the first load. 642 RegStorage temp = AllocTemp(); 643 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 644 displacement + LOWORD_OFFSET); 645 load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 646 displacement + HIWORD_OFFSET); 647 OpRegCopy(r_dest, temp); 648 FreeTemp(temp); 649 } else { 650 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 651 displacement + LOWORD_OFFSET); 652 load2 = NewLIR5(opcode, r_dest_hi.GetReg(), r_base.GetReg(), r_index.GetReg(), scale, 653 displacement + HIWORD_OFFSET); 654 } 655 } 656 } 657 } 658 659 return load; 660} 661 662/* Load value from base + scaled index. */ 663LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 664 int scale, OpSize size) { 665 return LoadBaseIndexedDisp(r_base, r_index, scale, 0, 666 r_dest, RegStorage::InvalidReg(), size, INVALID_SREG); 667} 668 669LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, 670 RegStorage r_dest, OpSize size, int s_reg) { 671 return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, 672 r_dest, RegStorage::InvalidReg(), size, s_reg); 673} 674 675LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, 676 int s_reg) { 677 return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, 678 r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg); 679} 680 681LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, 682 int displacement, RegStorage r_src, RegStorage r_src_hi, 683 OpSize size, int s_reg) { 684 LIR *store = NULL; 685 LIR *store2 = NULL; 686 bool is_array = r_index.Valid(); 687 // FIXME: use regstorage attributes in place of these. 688 bool pair = false; 689 bool is64bit = false; 690 X86OpCode opcode = kX86Nop; 691 switch (size) { 692 case kLong: 693 case kDouble: 694 is64bit = true; 695 if (X86_FPREG(r_src.GetReg())) { 696 opcode = is_array ? kX86MovsdAR : kX86MovsdMR; 697 } else { 698 pair = true; 699 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 700 } 701 // TODO: double store is to unaligned address 702 DCHECK_EQ((displacement & 0x3), 0); 703 break; 704 case kWord: 705 case kSingle: 706 opcode = is_array ? kX86Mov32AR : kX86Mov32MR; 707 if (X86_FPREG(r_src.GetReg())) { 708 opcode = is_array ? kX86MovssAR : kX86MovssMR; 709 DCHECK(X86_SINGLEREG(r_src.GetReg())); 710 } 711 DCHECK_EQ((displacement & 0x3), 0); 712 break; 713 case kUnsignedHalf: 714 case kSignedHalf: 715 opcode = is_array ? kX86Mov16AR : kX86Mov16MR; 716 DCHECK_EQ((displacement & 0x1), 0); 717 break; 718 case kUnsignedByte: 719 case kSignedByte: 720 opcode = is_array ? kX86Mov8AR : kX86Mov8MR; 721 break; 722 default: 723 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody"; 724 } 725 726 if (!is_array) { 727 if (!pair) { 728 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg()); 729 } else { 730 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg()); 731 store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src_hi.GetReg()); 732 } 733 if (r_base == rs_rX86_SP) { 734 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2, 735 false /* is_load */, is64bit); 736 if (pair) { 737 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2, 738 false /* is_load */, is64bit); 739 } 740 } 741 } else { 742 if (!pair) { 743 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 744 displacement + LOWORD_OFFSET, r_src.GetReg()); 745 } else { 746 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 747 displacement + LOWORD_OFFSET, r_src.GetReg()); 748 store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale, 749 displacement + HIWORD_OFFSET, r_src_hi.GetReg()); 750 } 751 } 752 753 return store; 754} 755 756/* store value base base + scaled index. */ 757LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 758 int scale, OpSize size) { 759 return StoreBaseIndexedDisp(r_base, r_index, scale, 0, 760 r_src, RegStorage::InvalidReg(), size, INVALID_SREG); 761} 762 763LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, 764 RegStorage r_src, OpSize size) { 765 return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, 766 RegStorage::InvalidReg(), size, INVALID_SREG); 767} 768 769LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) { 770 return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, 771 r_src.GetLow(), r_src.GetHigh(), kLong, INVALID_SREG); 772} 773 774/* 775 * Copy a long value in Core registers to an XMM register 776 * 777 */ 778void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) { 779 NewLIR2(kX86MovdxrRR, fp_reg, low_reg); 780 int tmp_reg = AllocTempDouble().GetLowReg(); 781 NewLIR2(kX86MovdxrRR, tmp_reg, high_reg); 782 NewLIR2(kX86PunpckldqRR, fp_reg, tmp_reg); 783 FreeTemp(tmp_reg); 784} 785 786LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 787 int offset, int check_value, LIR* target) { 788 NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(), offset, 789 check_value); 790 LIR* branch = OpCondBranch(cond, target); 791 return branch; 792} 793 794void X86Mir2Lir::AnalyzeMIR() { 795 // Assume we don't need a pointer to the base of the code. 796 cu_->NewTimingSplit("X86 MIR Analysis"); 797 store_method_addr_ = false; 798 799 // Walk the MIR looking for interesting items. 800 PreOrderDfsIterator iter(mir_graph_); 801 BasicBlock* curr_bb = iter.Next(); 802 while (curr_bb != NULL) { 803 AnalyzeBB(curr_bb); 804 curr_bb = iter.Next(); 805 } 806 807 // Did we need a pointer to the method code? 808 if (store_method_addr_) { 809 base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, false); 810 } else { 811 base_of_code_ = nullptr; 812 } 813} 814 815void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) { 816 if (bb->block_type == kDead) { 817 // Ignore dead blocks 818 return; 819 } 820 821 for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { 822 int opcode = mir->dalvikInsn.opcode; 823 if (opcode >= kMirOpFirst) { 824 AnalyzeExtendedMIR(opcode, bb, mir); 825 } else { 826 AnalyzeMIR(opcode, bb, mir); 827 } 828 } 829} 830 831 832void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) { 833 switch (opcode) { 834 // Instructions referencing doubles. 835 case kMirOpFusedCmplDouble: 836 case kMirOpFusedCmpgDouble: 837 AnalyzeFPInstruction(opcode, bb, mir); 838 break; 839 default: 840 // Ignore the rest. 841 break; 842 } 843} 844 845void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) { 846 // Looking for 847 // - Do we need a pointer to the code (used for packed switches and double lits)? 848 849 switch (opcode) { 850 // Instructions referencing doubles. 851 case Instruction::CMPL_DOUBLE: 852 case Instruction::CMPG_DOUBLE: 853 case Instruction::NEG_DOUBLE: 854 case Instruction::ADD_DOUBLE: 855 case Instruction::SUB_DOUBLE: 856 case Instruction::MUL_DOUBLE: 857 case Instruction::DIV_DOUBLE: 858 case Instruction::REM_DOUBLE: 859 case Instruction::ADD_DOUBLE_2ADDR: 860 case Instruction::SUB_DOUBLE_2ADDR: 861 case Instruction::MUL_DOUBLE_2ADDR: 862 case Instruction::DIV_DOUBLE_2ADDR: 863 case Instruction::REM_DOUBLE_2ADDR: 864 AnalyzeFPInstruction(opcode, bb, mir); 865 break; 866 867 // Packed switches and array fills need a pointer to the base of the method. 868 case Instruction::FILL_ARRAY_DATA: 869 case Instruction::PACKED_SWITCH: 870 store_method_addr_ = true; 871 break; 872 default: 873 // Other instructions are not interesting yet. 874 break; 875 } 876} 877 878void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) { 879 // Look at all the uses, and see if they are double constants. 880 uint64_t attrs = mir_graph_->oat_data_flow_attributes_[opcode]; 881 int next_sreg = 0; 882 if (attrs & DF_UA) { 883 if (attrs & DF_A_WIDE) { 884 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 885 next_sreg += 2; 886 } else { 887 next_sreg++; 888 } 889 } 890 if (attrs & DF_UB) { 891 if (attrs & DF_B_WIDE) { 892 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 893 next_sreg += 2; 894 } else { 895 next_sreg++; 896 } 897 } 898 if (attrs & DF_UC) { 899 if (attrs & DF_C_WIDE) { 900 AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg)); 901 } 902 } 903} 904 905void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) { 906 // If this is a double literal, we will want it in the literal pool. 907 if (use.is_const) { 908 store_method_addr_ = true; 909 } 910} 911 912} // namespace art 913