code_generator_x86.cc revision 988939683c26c0b1c8808fc206add6337319509a
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "code_generator_x86.h" 18 19#include "entrypoints/quick/quick_entrypoints.h" 20#include "gc/accounting/card_table.h" 21#include "mirror/array-inl.h" 22#include "mirror/art_method.h" 23#include "mirror/class.h" 24#include "thread.h" 25#include "utils/assembler.h" 26#include "utils/stack_checks.h" 27#include "utils/x86/assembler_x86.h" 28#include "utils/x86/managed_register_x86.h" 29 30namespace art { 31 32namespace x86 { 33 34static constexpr int kNumberOfPushedRegistersAtEntry = 1; 35static constexpr int kCurrentMethodStackOffset = 0; 36 37static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX }; 38static constexpr size_t kRuntimeParameterCoreRegistersLength = 39 arraysize(kRuntimeParameterCoreRegisters); 40static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { }; 41static constexpr size_t kRuntimeParameterFpuRegistersLength = 0; 42 43static constexpr int kC2ConditionMask = 0x400; 44 45// Marker for places that can be updated once we don't follow the quick ABI. 46static constexpr bool kFollowsQuickABI = true; 47 48class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> { 49 public: 50 InvokeRuntimeCallingConvention() 51 : CallingConvention(kRuntimeParameterCoreRegisters, 52 kRuntimeParameterCoreRegistersLength, 53 kRuntimeParameterFpuRegisters, 54 kRuntimeParameterFpuRegistersLength) {} 55 56 private: 57 DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); 58}; 59 60#define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())-> 61 62class SlowPathCodeX86 : public SlowPathCode { 63 public: 64 SlowPathCodeX86() : entry_label_(), exit_label_() {} 65 66 Label* GetEntryLabel() { return &entry_label_; } 67 Label* GetExitLabel() { return &exit_label_; } 68 69 private: 70 Label entry_label_; 71 Label exit_label_; 72 73 DISALLOW_COPY_AND_ASSIGN(SlowPathCodeX86); 74}; 75 76class NullCheckSlowPathX86 : public SlowPathCodeX86 { 77 public: 78 explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {} 79 80 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 81 __ Bind(GetEntryLabel()); 82 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowNullPointer))); 83 codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); 84 } 85 86 private: 87 HNullCheck* const instruction_; 88 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86); 89}; 90 91class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 { 92 public: 93 explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {} 94 95 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 96 __ Bind(GetEntryLabel()); 97 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero))); 98 codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); 99 } 100 101 private: 102 HDivZeroCheck* const instruction_; 103 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86); 104}; 105 106class DivRemMinusOneSlowPathX86 : public SlowPathCodeX86 { 107 public: 108 explicit DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {} 109 110 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 111 __ Bind(GetEntryLabel()); 112 if (is_div_) { 113 __ negl(reg_); 114 } else { 115 __ movl(reg_, Immediate(0)); 116 } 117 __ jmp(GetExitLabel()); 118 } 119 120 private: 121 Register reg_; 122 bool is_div_; 123 DISALLOW_COPY_AND_ASSIGN(DivRemMinusOneSlowPathX86); 124}; 125 126class StackOverflowCheckSlowPathX86 : public SlowPathCodeX86 { 127 public: 128 StackOverflowCheckSlowPathX86() {} 129 130 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 131 __ Bind(GetEntryLabel()); 132 __ addl(ESP, 133 Immediate(codegen->GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize)); 134 __ fs()->jmp(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowStackOverflow))); 135 } 136 137 private: 138 DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86); 139}; 140 141class BoundsCheckSlowPathX86 : public SlowPathCodeX86 { 142 public: 143 BoundsCheckSlowPathX86(HBoundsCheck* instruction, 144 Location index_location, 145 Location length_location) 146 : instruction_(instruction), 147 index_location_(index_location), 148 length_location_(length_location) {} 149 150 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 151 CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); 152 __ Bind(GetEntryLabel()); 153 // We're moving two locations to locations that could overlap, so we need a parallel 154 // move resolver. 155 InvokeRuntimeCallingConvention calling_convention; 156 x86_codegen->EmitParallelMoves( 157 index_location_, 158 Location::RegisterLocation(calling_convention.GetRegisterAt(0)), 159 length_location_, 160 Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 161 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds))); 162 codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); 163 } 164 165 private: 166 HBoundsCheck* const instruction_; 167 const Location index_location_; 168 const Location length_location_; 169 170 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86); 171}; 172 173class SuspendCheckSlowPathX86 : public SlowPathCodeX86 { 174 public: 175 explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) 176 : instruction_(instruction), successor_(successor) {} 177 178 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 179 CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); 180 __ Bind(GetEntryLabel()); 181 codegen->SaveLiveRegisters(instruction_->GetLocations()); 182 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend))); 183 codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); 184 codegen->RestoreLiveRegisters(instruction_->GetLocations()); 185 if (successor_ == nullptr) { 186 __ jmp(GetReturnLabel()); 187 } else { 188 __ jmp(x86_codegen->GetLabelOf(successor_)); 189 } 190 } 191 192 Label* GetReturnLabel() { 193 DCHECK(successor_ == nullptr); 194 return &return_label_; 195 } 196 197 private: 198 HSuspendCheck* const instruction_; 199 HBasicBlock* const successor_; 200 Label return_label_; 201 202 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86); 203}; 204 205class LoadStringSlowPathX86 : public SlowPathCodeX86 { 206 public: 207 explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {} 208 209 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 210 LocationSummary* locations = instruction_->GetLocations(); 211 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 212 213 CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); 214 __ Bind(GetEntryLabel()); 215 codegen->SaveLiveRegisters(locations); 216 217 InvokeRuntimeCallingConvention calling_convention; 218 x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); 219 __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex())); 220 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString))); 221 codegen->RecordPcInfo(instruction_, instruction_->GetDexPc()); 222 x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); 223 codegen->RestoreLiveRegisters(locations); 224 225 __ jmp(GetExitLabel()); 226 } 227 228 private: 229 HLoadString* const instruction_; 230 231 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86); 232}; 233 234class LoadClassSlowPathX86 : public SlowPathCodeX86 { 235 public: 236 LoadClassSlowPathX86(HLoadClass* cls, 237 HInstruction* at, 238 uint32_t dex_pc, 239 bool do_clinit) 240 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { 241 DCHECK(at->IsLoadClass() || at->IsClinitCheck()); 242 } 243 244 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 245 LocationSummary* locations = at_->GetLocations(); 246 CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); 247 __ Bind(GetEntryLabel()); 248 codegen->SaveLiveRegisters(locations); 249 250 InvokeRuntimeCallingConvention calling_convention; 251 __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex())); 252 x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); 253 __ fs()->call(Address::Absolute(do_clinit_ 254 ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage) 255 : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType))); 256 codegen->RecordPcInfo(at_, dex_pc_); 257 258 // Move the class to the desired location. 259 Location out = locations->Out(); 260 if (out.IsValid()) { 261 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 262 x86_codegen->Move32(out, Location::RegisterLocation(EAX)); 263 } 264 265 codegen->RestoreLiveRegisters(locations); 266 __ jmp(GetExitLabel()); 267 } 268 269 private: 270 // The class this slow path will load. 271 HLoadClass* const cls_; 272 273 // The instruction where this slow path is happening. 274 // (Might be the load class or an initialization check). 275 HInstruction* const at_; 276 277 // The dex PC of `at_`. 278 const uint32_t dex_pc_; 279 280 // Whether to initialize the class. 281 const bool do_clinit_; 282 283 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86); 284}; 285 286class TypeCheckSlowPathX86 : public SlowPathCodeX86 { 287 public: 288 TypeCheckSlowPathX86(HInstruction* instruction, 289 Location class_to_check, 290 Location object_class, 291 uint32_t dex_pc) 292 : instruction_(instruction), 293 class_to_check_(class_to_check), 294 object_class_(object_class), 295 dex_pc_(dex_pc) {} 296 297 virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { 298 LocationSummary* locations = instruction_->GetLocations(); 299 DCHECK(instruction_->IsCheckCast() 300 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); 301 302 CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); 303 __ Bind(GetEntryLabel()); 304 codegen->SaveLiveRegisters(locations); 305 306 // We're moving two locations to locations that could overlap, so we need a parallel 307 // move resolver. 308 InvokeRuntimeCallingConvention calling_convention; 309 x86_codegen->EmitParallelMoves( 310 class_to_check_, 311 Location::RegisterLocation(calling_convention.GetRegisterAt(0)), 312 object_class_, 313 Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 314 315 if (instruction_->IsInstanceOf()) { 316 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, 317 pInstanceofNonTrivial))); 318 } else { 319 DCHECK(instruction_->IsCheckCast()); 320 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast))); 321 } 322 323 codegen->RecordPcInfo(instruction_, dex_pc_); 324 if (instruction_->IsInstanceOf()) { 325 x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); 326 } 327 codegen->RestoreLiveRegisters(locations); 328 329 __ jmp(GetExitLabel()); 330 } 331 332 private: 333 HInstruction* const instruction_; 334 const Location class_to_check_; 335 const Location object_class_; 336 const uint32_t dex_pc_; 337 338 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); 339}; 340 341#undef __ 342#define __ reinterpret_cast<X86Assembler*>(GetAssembler())-> 343 344inline Condition X86Condition(IfCondition cond) { 345 switch (cond) { 346 case kCondEQ: return kEqual; 347 case kCondNE: return kNotEqual; 348 case kCondLT: return kLess; 349 case kCondLE: return kLessEqual; 350 case kCondGT: return kGreater; 351 case kCondGE: return kGreaterEqual; 352 default: 353 LOG(FATAL) << "Unknown if condition"; 354 } 355 return kEqual; 356} 357 358void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const { 359 stream << X86ManagedRegister::FromCpuRegister(Register(reg)); 360} 361 362void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg) const { 363 stream << X86ManagedRegister::FromXmmRegister(XmmRegister(reg)); 364} 365 366size_t CodeGeneratorX86::SaveCoreRegister(size_t stack_index, uint32_t reg_id) { 367 __ movl(Address(ESP, stack_index), static_cast<Register>(reg_id)); 368 return kX86WordSize; 369} 370 371size_t CodeGeneratorX86::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) { 372 __ movl(static_cast<Register>(reg_id), Address(ESP, stack_index)); 373 return kX86WordSize; 374} 375 376CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, const CompilerOptions& compiler_options) 377 : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters, 378 kNumberOfRegisterPairs, 0, 0, compiler_options), 379 block_labels_(graph->GetArena(), 0), 380 location_builder_(graph, this), 381 instruction_visitor_(graph, this), 382 move_resolver_(graph->GetArena(), this) {} 383 384size_t CodeGeneratorX86::FrameEntrySpillSize() const { 385 return kNumberOfPushedRegistersAtEntry * kX86WordSize; 386} 387 388Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const { 389 switch (type) { 390 case Primitive::kPrimLong: { 391 size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs); 392 X86ManagedRegister pair = 393 X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg)); 394 DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]); 395 DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]); 396 blocked_core_registers_[pair.AsRegisterPairLow()] = true; 397 blocked_core_registers_[pair.AsRegisterPairHigh()] = true; 398 UpdateBlockedPairRegisters(); 399 return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh()); 400 } 401 402 case Primitive::kPrimByte: 403 case Primitive::kPrimBoolean: 404 case Primitive::kPrimChar: 405 case Primitive::kPrimShort: 406 case Primitive::kPrimInt: 407 case Primitive::kPrimNot: { 408 Register reg = static_cast<Register>( 409 FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters)); 410 // Block all register pairs that contain `reg`. 411 for (int i = 0; i < kNumberOfRegisterPairs; i++) { 412 X86ManagedRegister current = 413 X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i)); 414 if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) { 415 blocked_register_pairs_[i] = true; 416 } 417 } 418 return Location::RegisterLocation(reg); 419 } 420 421 case Primitive::kPrimFloat: 422 case Primitive::kPrimDouble: { 423 return Location::FpuRegisterLocation( 424 FindFreeEntry(blocked_fpu_registers_, kNumberOfXmmRegisters)); 425 } 426 427 case Primitive::kPrimVoid: 428 LOG(FATAL) << "Unreachable type " << type; 429 } 430 431 return Location(); 432} 433 434void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const { 435 // Don't allocate the dalvik style register pair passing. 436 blocked_register_pairs_[ECX_EDX] = true; 437 438 // Stack register is always reserved. 439 blocked_core_registers_[ESP] = true; 440 441 // TODO: We currently don't use Quick's callee saved registers. 442 DCHECK(kFollowsQuickABI); 443 blocked_core_registers_[EBP] = true; 444 blocked_core_registers_[ESI] = true; 445 blocked_core_registers_[EDI] = true; 446 447 UpdateBlockedPairRegisters(); 448} 449 450void CodeGeneratorX86::UpdateBlockedPairRegisters() const { 451 for (int i = 0; i < kNumberOfRegisterPairs; i++) { 452 X86ManagedRegister current = 453 X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i)); 454 if (blocked_core_registers_[current.AsRegisterPairLow()] 455 || blocked_core_registers_[current.AsRegisterPairHigh()]) { 456 blocked_register_pairs_[i] = true; 457 } 458 } 459} 460 461InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen) 462 : HGraphVisitor(graph), 463 assembler_(codegen->GetAssembler()), 464 codegen_(codegen) {} 465 466void CodeGeneratorX86::GenerateFrameEntry() { 467 // Create a fake register to mimic Quick. 468 static const int kFakeReturnRegister = 8; 469 core_spill_mask_ |= (1 << kFakeReturnRegister); 470 471 bool skip_overflow_check = 472 IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86); 473 bool implicitStackOverflowChecks = GetCompilerOptions().GetImplicitStackOverflowChecks(); 474 475 if (!skip_overflow_check && implicitStackOverflowChecks) { 476 __ testl(EAX, Address(ESP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86)))); 477 RecordPcInfo(nullptr, 0); 478 } 479 480 // The return PC has already been pushed on the stack. 481 __ subl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize)); 482 483 if (!skip_overflow_check && !implicitStackOverflowChecks) { 484 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86(); 485 AddSlowPath(slow_path); 486 487 __ fs()->cmpl(ESP, Address::Absolute(Thread::StackEndOffset<kX86WordSize>())); 488 __ j(kLess, slow_path->GetEntryLabel()); 489 } 490 491 __ movl(Address(ESP, kCurrentMethodStackOffset), EAX); 492} 493 494void CodeGeneratorX86::GenerateFrameExit() { 495 __ addl(ESP, Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86WordSize)); 496} 497 498void CodeGeneratorX86::Bind(HBasicBlock* block) { 499 __ Bind(GetLabelOf(block)); 500} 501 502void CodeGeneratorX86::LoadCurrentMethod(Register reg) { 503 __ movl(reg, Address(ESP, kCurrentMethodStackOffset)); 504} 505 506Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const { 507 switch (load->GetType()) { 508 case Primitive::kPrimLong: 509 case Primitive::kPrimDouble: 510 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal())); 511 break; 512 513 case Primitive::kPrimInt: 514 case Primitive::kPrimNot: 515 case Primitive::kPrimFloat: 516 return Location::StackSlot(GetStackSlot(load->GetLocal())); 517 518 case Primitive::kPrimBoolean: 519 case Primitive::kPrimByte: 520 case Primitive::kPrimChar: 521 case Primitive::kPrimShort: 522 case Primitive::kPrimVoid: 523 LOG(FATAL) << "Unexpected type " << load->GetType(); 524 } 525 526 LOG(FATAL) << "Unreachable"; 527 return Location(); 528} 529 530Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) { 531 switch (type) { 532 case Primitive::kPrimBoolean: 533 case Primitive::kPrimByte: 534 case Primitive::kPrimChar: 535 case Primitive::kPrimShort: 536 case Primitive::kPrimInt: 537 case Primitive::kPrimFloat: 538 case Primitive::kPrimNot: { 539 uint32_t index = gp_index_++; 540 if (index < calling_convention.GetNumberOfRegisters()) { 541 return Location::RegisterLocation(calling_convention.GetRegisterAt(index)); 542 } else { 543 return Location::StackSlot(calling_convention.GetStackOffsetOf(index)); 544 } 545 } 546 547 case Primitive::kPrimLong: 548 case Primitive::kPrimDouble: { 549 uint32_t index = gp_index_; 550 gp_index_ += 2; 551 if (index + 1 < calling_convention.GetNumberOfRegisters()) { 552 X86ManagedRegister pair = X86ManagedRegister::FromRegisterPair( 553 calling_convention.GetRegisterPairAt(index)); 554 return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh()); 555 } else if (index + 1 == calling_convention.GetNumberOfRegisters()) { 556 // On X86, the register index and stack index of a quick parameter is the same, since 557 // we are passing floating pointer values in core registers. 558 return Location::QuickParameter(index, index); 559 } else { 560 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index)); 561 } 562 } 563 564 case Primitive::kPrimVoid: 565 LOG(FATAL) << "Unexpected parameter type " << type; 566 break; 567 } 568 return Location(); 569} 570 571void CodeGeneratorX86::Move32(Location destination, Location source) { 572 if (source.Equals(destination)) { 573 return; 574 } 575 if (destination.IsRegister()) { 576 if (source.IsRegister()) { 577 __ movl(destination.AsRegister<Register>(), source.AsRegister<Register>()); 578 } else if (source.IsFpuRegister()) { 579 __ movd(destination.AsRegister<Register>(), source.AsFpuRegister<XmmRegister>()); 580 } else { 581 DCHECK(source.IsStackSlot()); 582 __ movl(destination.AsRegister<Register>(), Address(ESP, source.GetStackIndex())); 583 } 584 } else if (destination.IsFpuRegister()) { 585 if (source.IsRegister()) { 586 __ movd(destination.AsFpuRegister<XmmRegister>(), source.AsRegister<Register>()); 587 } else if (source.IsFpuRegister()) { 588 __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>()); 589 } else { 590 DCHECK(source.IsStackSlot()); 591 __ movss(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex())); 592 } 593 } else { 594 DCHECK(destination.IsStackSlot()) << destination; 595 if (source.IsRegister()) { 596 __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister<Register>()); 597 } else if (source.IsFpuRegister()) { 598 __ movss(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>()); 599 } else { 600 DCHECK(source.IsStackSlot()); 601 __ pushl(Address(ESP, source.GetStackIndex())); 602 __ popl(Address(ESP, destination.GetStackIndex())); 603 } 604 } 605} 606 607void CodeGeneratorX86::Move64(Location destination, Location source) { 608 if (source.Equals(destination)) { 609 return; 610 } 611 if (destination.IsRegisterPair()) { 612 if (source.IsRegisterPair()) { 613 EmitParallelMoves( 614 Location::RegisterLocation(source.AsRegisterPairHigh<Register>()), 615 Location::RegisterLocation(destination.AsRegisterPairHigh<Register>()), 616 Location::RegisterLocation(source.AsRegisterPairLow<Register>()), 617 Location::RegisterLocation(destination.AsRegisterPairLow<Register>())); 618 } else if (source.IsFpuRegister()) { 619 LOG(FATAL) << "Unimplemented"; 620 } else if (source.IsQuickParameter()) { 621 uint16_t register_index = source.GetQuickParameterRegisterIndex(); 622 uint16_t stack_index = source.GetQuickParameterStackIndex(); 623 InvokeDexCallingConvention calling_convention; 624 EmitParallelMoves( 625 Location::RegisterLocation(calling_convention.GetRegisterAt(register_index)), 626 Location::RegisterLocation(destination.AsRegisterPairLow<Register>()), 627 Location::StackSlot( 628 calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()), 629 Location::RegisterLocation(destination.AsRegisterPairHigh<Register>())); 630 } else { 631 // No conflict possible, so just do the moves. 632 DCHECK(source.IsDoubleStackSlot()); 633 __ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex())); 634 __ movl(destination.AsRegisterPairHigh<Register>(), 635 Address(ESP, source.GetHighStackIndex(kX86WordSize))); 636 } 637 } else if (destination.IsQuickParameter()) { 638 InvokeDexCallingConvention calling_convention; 639 uint16_t register_index = destination.GetQuickParameterRegisterIndex(); 640 uint16_t stack_index = destination.GetQuickParameterStackIndex(); 641 if (source.IsRegisterPair()) { 642 LOG(FATAL) << "Unimplemented"; 643 } else if (source.IsFpuRegister()) { 644 LOG(FATAL) << "Unimplemented"; 645 } else { 646 DCHECK(source.IsDoubleStackSlot()); 647 EmitParallelMoves( 648 Location::StackSlot(source.GetStackIndex()), 649 Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index)), 650 Location::StackSlot(source.GetHighStackIndex(kX86WordSize)), 651 Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index + 1))); 652 __ movl(calling_convention.GetRegisterAt(register_index), Address(ESP, source.GetStackIndex())); 653 } 654 } else if (destination.IsFpuRegister()) { 655 if (source.IsDoubleStackSlot()) { 656 __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex())); 657 } else { 658 LOG(FATAL) << "Unimplemented"; 659 } 660 } else { 661 DCHECK(destination.IsDoubleStackSlot()) << destination; 662 if (source.IsRegisterPair()) { 663 // No conflict possible, so just do the moves. 664 __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>()); 665 __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), 666 source.AsRegisterPairHigh<Register>()); 667 } else if (source.IsQuickParameter()) { 668 // No conflict possible, so just do the move. 669 InvokeDexCallingConvention calling_convention; 670 uint16_t register_index = source.GetQuickParameterRegisterIndex(); 671 uint16_t stack_index = source.GetQuickParameterStackIndex(); 672 // Just move the low part. The only time a source is a quick parameter is 673 // when moving the parameter to its stack locations. And the (Java) caller 674 // of this method has already done that. 675 __ movl(Address(ESP, destination.GetStackIndex()), 676 calling_convention.GetRegisterAt(register_index)); 677 DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(), 678 static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize))); 679 } else if (source.IsFpuRegister()) { 680 __ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>()); 681 } else { 682 DCHECK(source.IsDoubleStackSlot()); 683 EmitParallelMoves( 684 Location::StackSlot(source.GetStackIndex()), 685 Location::StackSlot(destination.GetStackIndex()), 686 Location::StackSlot(source.GetHighStackIndex(kX86WordSize)), 687 Location::StackSlot(destination.GetHighStackIndex(kX86WordSize))); 688 } 689 } 690} 691 692void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) { 693 LocationSummary* locations = instruction->GetLocations(); 694 if (locations != nullptr && locations->Out().Equals(location)) { 695 return; 696 } 697 698 if (locations != nullptr && locations->Out().IsConstant()) { 699 HConstant* const_to_move = locations->Out().GetConstant(); 700 if (const_to_move->IsIntConstant()) { 701 Immediate imm(const_to_move->AsIntConstant()->GetValue()); 702 if (location.IsRegister()) { 703 __ movl(location.AsRegister<Register>(), imm); 704 } else if (location.IsStackSlot()) { 705 __ movl(Address(ESP, location.GetStackIndex()), imm); 706 } else { 707 DCHECK(location.IsConstant()); 708 DCHECK_EQ(location.GetConstant(), const_to_move); 709 } 710 } else if (const_to_move->IsLongConstant()) { 711 int64_t value = const_to_move->AsLongConstant()->GetValue(); 712 if (location.IsRegisterPair()) { 713 __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value))); 714 __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value))); 715 } else if (location.IsDoubleStackSlot()) { 716 __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value))); 717 __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), 718 Immediate(High32Bits(value))); 719 } else { 720 DCHECK(location.IsConstant()); 721 DCHECK_EQ(location.GetConstant(), instruction); 722 } 723 } 724 } else if (instruction->IsTemporary()) { 725 Location temp_location = GetTemporaryLocation(instruction->AsTemporary()); 726 if (temp_location.IsStackSlot()) { 727 Move32(location, temp_location); 728 } else { 729 DCHECK(temp_location.IsDoubleStackSlot()); 730 Move64(location, temp_location); 731 } 732 } else if (instruction->IsLoadLocal()) { 733 int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal()); 734 switch (instruction->GetType()) { 735 case Primitive::kPrimBoolean: 736 case Primitive::kPrimByte: 737 case Primitive::kPrimChar: 738 case Primitive::kPrimShort: 739 case Primitive::kPrimInt: 740 case Primitive::kPrimNot: 741 case Primitive::kPrimFloat: 742 Move32(location, Location::StackSlot(slot)); 743 break; 744 745 case Primitive::kPrimLong: 746 case Primitive::kPrimDouble: 747 Move64(location, Location::DoubleStackSlot(slot)); 748 break; 749 750 default: 751 LOG(FATAL) << "Unimplemented local type " << instruction->GetType(); 752 } 753 } else { 754 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary()); 755 switch (instruction->GetType()) { 756 case Primitive::kPrimBoolean: 757 case Primitive::kPrimByte: 758 case Primitive::kPrimChar: 759 case Primitive::kPrimShort: 760 case Primitive::kPrimInt: 761 case Primitive::kPrimNot: 762 case Primitive::kPrimFloat: 763 Move32(location, locations->Out()); 764 break; 765 766 case Primitive::kPrimLong: 767 case Primitive::kPrimDouble: 768 Move64(location, locations->Out()); 769 break; 770 771 default: 772 LOG(FATAL) << "Unexpected type " << instruction->GetType(); 773 } 774 } 775} 776 777void LocationsBuilderX86::VisitGoto(HGoto* got) { 778 got->SetLocations(nullptr); 779} 780 781void InstructionCodeGeneratorX86::VisitGoto(HGoto* got) { 782 HBasicBlock* successor = got->GetSuccessor(); 783 DCHECK(!successor->IsExitBlock()); 784 785 HBasicBlock* block = got->GetBlock(); 786 HInstruction* previous = got->GetPrevious(); 787 788 HLoopInformation* info = block->GetLoopInformation(); 789 if (info != nullptr && info->IsBackEdge(block) && info->HasSuspendCheck()) { 790 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck()); 791 GenerateSuspendCheck(info->GetSuspendCheck(), successor); 792 return; 793 } 794 795 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { 796 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); 797 } 798 if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) { 799 __ jmp(codegen_->GetLabelOf(successor)); 800 } 801} 802 803void LocationsBuilderX86::VisitExit(HExit* exit) { 804 exit->SetLocations(nullptr); 805} 806 807void InstructionCodeGeneratorX86::VisitExit(HExit* exit) { 808 UNUSED(exit); 809 if (kIsDebugBuild) { 810 __ Comment("Unreachable"); 811 __ int3(); 812 } 813} 814 815void LocationsBuilderX86::VisitIf(HIf* if_instr) { 816 LocationSummary* locations = 817 new (GetGraph()->GetArena()) LocationSummary(if_instr, LocationSummary::kNoCall); 818 HInstruction* cond = if_instr->InputAt(0); 819 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) { 820 locations->SetInAt(0, Location::Any()); 821 } 822} 823 824void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) { 825 HInstruction* cond = if_instr->InputAt(0); 826 if (cond->IsIntConstant()) { 827 // Constant condition, statically compared against 1. 828 int32_t cond_value = cond->AsIntConstant()->GetValue(); 829 if (cond_value == 1) { 830 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), 831 if_instr->IfTrueSuccessor())) { 832 __ jmp(codegen_->GetLabelOf(if_instr->IfTrueSuccessor())); 833 } 834 return; 835 } else { 836 DCHECK_EQ(cond_value, 0); 837 } 838 } else { 839 bool materialized = 840 !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization(); 841 // Moves do not affect the eflags register, so if the condition is 842 // evaluated just before the if, we don't need to evaluate it 843 // again. 844 bool eflags_set = cond->IsCondition() 845 && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr); 846 if (materialized) { 847 if (!eflags_set) { 848 // Materialized condition, compare against 0. 849 Location lhs = if_instr->GetLocations()->InAt(0); 850 if (lhs.IsRegister()) { 851 __ cmpl(lhs.AsRegister<Register>(), Immediate(0)); 852 } else { 853 __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0)); 854 } 855 __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor())); 856 } else { 857 __ j(X86Condition(cond->AsCondition()->GetCondition()), 858 codegen_->GetLabelOf(if_instr->IfTrueSuccessor())); 859 } 860 } else { 861 Location lhs = cond->GetLocations()->InAt(0); 862 Location rhs = cond->GetLocations()->InAt(1); 863 // LHS is guaranteed to be in a register (see 864 // LocationsBuilderX86::VisitCondition). 865 if (rhs.IsRegister()) { 866 __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>()); 867 } else if (rhs.IsConstant()) { 868 HIntConstant* instruction = rhs.GetConstant()->AsIntConstant(); 869 Immediate imm(instruction->AsIntConstant()->GetValue()); 870 __ cmpl(lhs.AsRegister<Register>(), imm); 871 } else { 872 __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex())); 873 } 874 __ j(X86Condition(cond->AsCondition()->GetCondition()), 875 codegen_->GetLabelOf(if_instr->IfTrueSuccessor())); 876 } 877 } 878 if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), 879 if_instr->IfFalseSuccessor())) { 880 __ jmp(codegen_->GetLabelOf(if_instr->IfFalseSuccessor())); 881 } 882} 883 884void LocationsBuilderX86::VisitLocal(HLocal* local) { 885 local->SetLocations(nullptr); 886} 887 888void InstructionCodeGeneratorX86::VisitLocal(HLocal* local) { 889 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock()); 890} 891 892void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) { 893 local->SetLocations(nullptr); 894} 895 896void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) { 897 // Nothing to do, this is driven by the code generator. 898 UNUSED(load); 899} 900 901void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) { 902 LocationSummary* locations = 903 new (GetGraph()->GetArena()) LocationSummary(store, LocationSummary::kNoCall); 904 switch (store->InputAt(1)->GetType()) { 905 case Primitive::kPrimBoolean: 906 case Primitive::kPrimByte: 907 case Primitive::kPrimChar: 908 case Primitive::kPrimShort: 909 case Primitive::kPrimInt: 910 case Primitive::kPrimNot: 911 case Primitive::kPrimFloat: 912 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal()))); 913 break; 914 915 case Primitive::kPrimLong: 916 case Primitive::kPrimDouble: 917 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal()))); 918 break; 919 920 default: 921 LOG(FATAL) << "Unknown local type " << store->InputAt(1)->GetType(); 922 } 923 store->SetLocations(locations); 924} 925 926void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) { 927 UNUSED(store); 928} 929 930void LocationsBuilderX86::VisitCondition(HCondition* comp) { 931 LocationSummary* locations = 932 new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall); 933 locations->SetInAt(0, Location::RequiresRegister()); 934 locations->SetInAt(1, Location::Any()); 935 if (comp->NeedsMaterialization()) { 936 locations->SetOut(Location::RequiresRegister()); 937 } 938} 939 940void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) { 941 if (comp->NeedsMaterialization()) { 942 LocationSummary* locations = comp->GetLocations(); 943 Register reg = locations->Out().AsRegister<Register>(); 944 // Clear register: setcc only sets the low byte. 945 __ xorl(reg, reg); 946 if (locations->InAt(1).IsRegister()) { 947 __ cmpl(locations->InAt(0).AsRegister<Register>(), 948 locations->InAt(1).AsRegister<Register>()); 949 } else if (locations->InAt(1).IsConstant()) { 950 HConstant* instruction = locations->InAt(1).GetConstant(); 951 Immediate imm(instruction->AsIntConstant()->GetValue()); 952 __ cmpl(locations->InAt(0).AsRegister<Register>(), imm); 953 } else { 954 __ cmpl(locations->InAt(0).AsRegister<Register>(), 955 Address(ESP, locations->InAt(1).GetStackIndex())); 956 } 957 __ setb(X86Condition(comp->GetCondition()), reg); 958 } 959} 960 961void LocationsBuilderX86::VisitEqual(HEqual* comp) { 962 VisitCondition(comp); 963} 964 965void InstructionCodeGeneratorX86::VisitEqual(HEqual* comp) { 966 VisitCondition(comp); 967} 968 969void LocationsBuilderX86::VisitNotEqual(HNotEqual* comp) { 970 VisitCondition(comp); 971} 972 973void InstructionCodeGeneratorX86::VisitNotEqual(HNotEqual* comp) { 974 VisitCondition(comp); 975} 976 977void LocationsBuilderX86::VisitLessThan(HLessThan* comp) { 978 VisitCondition(comp); 979} 980 981void InstructionCodeGeneratorX86::VisitLessThan(HLessThan* comp) { 982 VisitCondition(comp); 983} 984 985void LocationsBuilderX86::VisitLessThanOrEqual(HLessThanOrEqual* comp) { 986 VisitCondition(comp); 987} 988 989void InstructionCodeGeneratorX86::VisitLessThanOrEqual(HLessThanOrEqual* comp) { 990 VisitCondition(comp); 991} 992 993void LocationsBuilderX86::VisitGreaterThan(HGreaterThan* comp) { 994 VisitCondition(comp); 995} 996 997void InstructionCodeGeneratorX86::VisitGreaterThan(HGreaterThan* comp) { 998 VisitCondition(comp); 999} 1000 1001void LocationsBuilderX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { 1002 VisitCondition(comp); 1003} 1004 1005void InstructionCodeGeneratorX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) { 1006 VisitCondition(comp); 1007} 1008 1009void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) { 1010 LocationSummary* locations = 1011 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1012 locations->SetOut(Location::ConstantLocation(constant)); 1013} 1014 1015void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) { 1016 // Will be generated at use site. 1017 UNUSED(constant); 1018} 1019 1020void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) { 1021 LocationSummary* locations = 1022 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1023 locations->SetOut(Location::ConstantLocation(constant)); 1024} 1025 1026void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) { 1027 // Will be generated at use site. 1028 UNUSED(constant); 1029} 1030 1031void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) { 1032 LocationSummary* locations = 1033 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1034 locations->SetOut(Location::ConstantLocation(constant)); 1035} 1036 1037void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) { 1038 // Will be generated at use site. 1039 UNUSED(constant); 1040} 1041 1042void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) { 1043 LocationSummary* locations = 1044 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall); 1045 locations->SetOut(Location::ConstantLocation(constant)); 1046} 1047 1048void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) { 1049 // Will be generated at use site. 1050 UNUSED(constant); 1051} 1052 1053void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) { 1054 ret->SetLocations(nullptr); 1055} 1056 1057void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) { 1058 UNUSED(ret); 1059 codegen_->GenerateFrameExit(); 1060 __ ret(); 1061} 1062 1063void LocationsBuilderX86::VisitReturn(HReturn* ret) { 1064 LocationSummary* locations = 1065 new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall); 1066 switch (ret->InputAt(0)->GetType()) { 1067 case Primitive::kPrimBoolean: 1068 case Primitive::kPrimByte: 1069 case Primitive::kPrimChar: 1070 case Primitive::kPrimShort: 1071 case Primitive::kPrimInt: 1072 case Primitive::kPrimNot: 1073 locations->SetInAt(0, Location::RegisterLocation(EAX)); 1074 break; 1075 1076 case Primitive::kPrimLong: 1077 locations->SetInAt( 1078 0, Location::RegisterPairLocation(EAX, EDX)); 1079 break; 1080 1081 case Primitive::kPrimFloat: 1082 case Primitive::kPrimDouble: 1083 locations->SetInAt( 1084 0, Location::FpuRegisterLocation(XMM0)); 1085 break; 1086 1087 default: 1088 LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType(); 1089 } 1090} 1091 1092void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) { 1093 if (kIsDebugBuild) { 1094 switch (ret->InputAt(0)->GetType()) { 1095 case Primitive::kPrimBoolean: 1096 case Primitive::kPrimByte: 1097 case Primitive::kPrimChar: 1098 case Primitive::kPrimShort: 1099 case Primitive::kPrimInt: 1100 case Primitive::kPrimNot: 1101 DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX); 1102 break; 1103 1104 case Primitive::kPrimLong: 1105 DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX); 1106 DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX); 1107 break; 1108 1109 case Primitive::kPrimFloat: 1110 case Primitive::kPrimDouble: 1111 DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0); 1112 break; 1113 1114 default: 1115 LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType(); 1116 } 1117 } 1118 codegen_->GenerateFrameExit(); 1119 __ ret(); 1120} 1121 1122void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1123 HandleInvoke(invoke); 1124} 1125 1126void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { 1127 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1128 1129 // TODO: Implement all kinds of calls: 1130 // 1) boot -> boot 1131 // 2) app -> boot 1132 // 3) app -> app 1133 // 1134 // Currently we implement the app -> app logic, which looks up in the resolve cache. 1135 1136 // temp = method; 1137 codegen_->LoadCurrentMethod(temp); 1138 // temp = temp->dex_cache_resolved_methods_; 1139 __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); 1140 // temp = temp[index_in_cache] 1141 __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); 1142 // (temp + offset_of_quick_compiled_code)() 1143 __ call(Address( 1144 temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); 1145 1146 DCHECK(!codegen_->IsLeafMethod()); 1147 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1148} 1149 1150void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1151 HandleInvoke(invoke); 1152} 1153 1154void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) { 1155 LocationSummary* locations = 1156 new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); 1157 locations->AddTemp(Location::RegisterLocation(EAX)); 1158 1159 InvokeDexCallingConventionVisitor calling_convention_visitor; 1160 for (size_t i = 0; i < invoke->InputCount(); i++) { 1161 HInstruction* input = invoke->InputAt(i); 1162 locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType())); 1163 } 1164 1165 switch (invoke->GetType()) { 1166 case Primitive::kPrimBoolean: 1167 case Primitive::kPrimByte: 1168 case Primitive::kPrimChar: 1169 case Primitive::kPrimShort: 1170 case Primitive::kPrimInt: 1171 case Primitive::kPrimNot: 1172 locations->SetOut(Location::RegisterLocation(EAX)); 1173 break; 1174 1175 case Primitive::kPrimLong: 1176 locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); 1177 break; 1178 1179 case Primitive::kPrimVoid: 1180 break; 1181 1182 case Primitive::kPrimDouble: 1183 case Primitive::kPrimFloat: 1184 locations->SetOut(Location::FpuRegisterLocation(XMM0)); 1185 break; 1186 } 1187 1188 invoke->SetLocations(locations); 1189} 1190 1191void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { 1192 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1193 uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + 1194 invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry); 1195 LocationSummary* locations = invoke->GetLocations(); 1196 Location receiver = locations->InAt(0); 1197 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 1198 // temp = object->GetClass(); 1199 if (receiver.IsStackSlot()) { 1200 __ movl(temp, Address(ESP, receiver.GetStackIndex())); 1201 __ movl(temp, Address(temp, class_offset)); 1202 } else { 1203 __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset)); 1204 } 1205 codegen_->MaybeRecordImplicitNullCheck(invoke); 1206 // temp = temp->GetMethodAt(method_offset); 1207 __ movl(temp, Address(temp, method_offset)); 1208 // call temp->GetEntryPoint(); 1209 __ call(Address( 1210 temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); 1211 1212 DCHECK(!codegen_->IsLeafMethod()); 1213 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1214} 1215 1216void LocationsBuilderX86::VisitInvokeInterface(HInvokeInterface* invoke) { 1217 HandleInvoke(invoke); 1218 // Add the hidden argument. 1219 invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM0)); 1220} 1221 1222void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) { 1223 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError. 1224 Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>(); 1225 uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + 1226 (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); 1227 LocationSummary* locations = invoke->GetLocations(); 1228 Location receiver = locations->InAt(0); 1229 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 1230 1231 // Set the hidden argument. 1232 __ movl(temp, Immediate(invoke->GetDexMethodIndex())); 1233 __ movd(invoke->GetLocations()->GetTemp(1).AsFpuRegister<XmmRegister>(), temp); 1234 1235 // temp = object->GetClass(); 1236 if (receiver.IsStackSlot()) { 1237 __ movl(temp, Address(ESP, receiver.GetStackIndex())); 1238 __ movl(temp, Address(temp, class_offset)); 1239 } else { 1240 __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset)); 1241 } 1242 codegen_->MaybeRecordImplicitNullCheck(invoke); 1243 // temp = temp->GetImtEntryAt(method_offset); 1244 __ movl(temp, Address(temp, method_offset)); 1245 // call temp->GetEntryPoint(); 1246 __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( 1247 kX86WordSize).Int32Value())); 1248 1249 DCHECK(!codegen_->IsLeafMethod()); 1250 codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); 1251} 1252 1253void LocationsBuilderX86::VisitNeg(HNeg* neg) { 1254 LocationSummary* locations = 1255 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall); 1256 switch (neg->GetResultType()) { 1257 case Primitive::kPrimInt: 1258 case Primitive::kPrimLong: 1259 locations->SetInAt(0, Location::RequiresRegister()); 1260 locations->SetOut(Location::SameAsFirstInput()); 1261 break; 1262 1263 case Primitive::kPrimFloat: 1264 locations->SetInAt(0, Location::RequiresFpuRegister()); 1265 locations->SetOut(Location::SameAsFirstInput()); 1266 locations->AddTemp(Location::RequiresRegister()); 1267 locations->AddTemp(Location::RequiresFpuRegister()); 1268 break; 1269 1270 case Primitive::kPrimDouble: 1271 locations->SetInAt(0, Location::RequiresFpuRegister()); 1272 locations->SetOut(Location::SameAsFirstInput()); 1273 locations->AddTemp(Location::RequiresFpuRegister()); 1274 break; 1275 1276 default: 1277 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 1278 } 1279} 1280 1281void InstructionCodeGeneratorX86::VisitNeg(HNeg* neg) { 1282 LocationSummary* locations = neg->GetLocations(); 1283 Location out = locations->Out(); 1284 Location in = locations->InAt(0); 1285 switch (neg->GetResultType()) { 1286 case Primitive::kPrimInt: 1287 DCHECK(in.IsRegister()); 1288 DCHECK(in.Equals(out)); 1289 __ negl(out.AsRegister<Register>()); 1290 break; 1291 1292 case Primitive::kPrimLong: 1293 DCHECK(in.IsRegisterPair()); 1294 DCHECK(in.Equals(out)); 1295 __ negl(out.AsRegisterPairLow<Register>()); 1296 // Negation is similar to subtraction from zero. The least 1297 // significant byte triggers a borrow when it is different from 1298 // zero; to take it into account, add 1 to the most significant 1299 // byte if the carry flag (CF) is set to 1 after the first NEGL 1300 // operation. 1301 __ adcl(out.AsRegisterPairHigh<Register>(), Immediate(0)); 1302 __ negl(out.AsRegisterPairHigh<Register>()); 1303 break; 1304 1305 case Primitive::kPrimFloat: { 1306 DCHECK(in.Equals(out)); 1307 Register constant = locations->GetTemp(0).AsRegister<Register>(); 1308 XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); 1309 // Implement float negation with an exclusive or with value 1310 // 0x80000000 (mask for bit 31, representing the sign of a 1311 // single-precision floating-point number). 1312 __ movl(constant, Immediate(INT32_C(0x80000000))); 1313 __ movd(mask, constant); 1314 __ xorps(out.AsFpuRegister<XmmRegister>(), mask); 1315 break; 1316 } 1317 1318 case Primitive::kPrimDouble: { 1319 DCHECK(in.Equals(out)); 1320 XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 1321 // Implement double negation with an exclusive or with value 1322 // 0x8000000000000000 (mask for bit 63, representing the sign of 1323 // a double-precision floating-point number). 1324 __ LoadLongConstant(mask, INT64_C(0x8000000000000000)); 1325 __ xorpd(out.AsFpuRegister<XmmRegister>(), mask); 1326 break; 1327 } 1328 1329 default: 1330 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType(); 1331 } 1332} 1333 1334void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) { 1335 Primitive::Type result_type = conversion->GetResultType(); 1336 Primitive::Type input_type = conversion->GetInputType(); 1337 DCHECK_NE(result_type, input_type); 1338 1339 // The float-to-long and double-to-long type conversions rely on a 1340 // call to the runtime. 1341 LocationSummary::CallKind call_kind = 1342 ((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble) 1343 && result_type == Primitive::kPrimLong) 1344 ? LocationSummary::kCall 1345 : LocationSummary::kNoCall; 1346 LocationSummary* locations = 1347 new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind); 1348 1349 switch (result_type) { 1350 case Primitive::kPrimByte: 1351 switch (input_type) { 1352 case Primitive::kPrimShort: 1353 case Primitive::kPrimInt: 1354 case Primitive::kPrimChar: 1355 // Processing a Dex `int-to-byte' instruction. 1356 locations->SetInAt(0, Location::Any()); 1357 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1358 break; 1359 1360 default: 1361 LOG(FATAL) << "Unexpected type conversion from " << input_type 1362 << " to " << result_type; 1363 } 1364 break; 1365 1366 case Primitive::kPrimShort: 1367 switch (input_type) { 1368 case Primitive::kPrimByte: 1369 case Primitive::kPrimInt: 1370 case Primitive::kPrimChar: 1371 // Processing a Dex `int-to-short' instruction. 1372 locations->SetInAt(0, Location::Any()); 1373 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1374 break; 1375 1376 default: 1377 LOG(FATAL) << "Unexpected type conversion from " << input_type 1378 << " to " << result_type; 1379 } 1380 break; 1381 1382 case Primitive::kPrimInt: 1383 switch (input_type) { 1384 case Primitive::kPrimLong: 1385 // Processing a Dex `long-to-int' instruction. 1386 locations->SetInAt(0, Location::Any()); 1387 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1388 break; 1389 1390 case Primitive::kPrimFloat: 1391 // Processing a Dex `float-to-int' instruction. 1392 locations->SetInAt(0, Location::RequiresFpuRegister()); 1393 locations->SetOut(Location::RequiresRegister()); 1394 locations->AddTemp(Location::RequiresFpuRegister()); 1395 break; 1396 1397 case Primitive::kPrimDouble: 1398 // Processing a Dex `double-to-int' instruction. 1399 locations->SetInAt(0, Location::RequiresFpuRegister()); 1400 locations->SetOut(Location::RequiresRegister()); 1401 locations->AddTemp(Location::RequiresFpuRegister()); 1402 break; 1403 1404 default: 1405 LOG(FATAL) << "Unexpected type conversion from " << input_type 1406 << " to " << result_type; 1407 } 1408 break; 1409 1410 case Primitive::kPrimLong: 1411 switch (input_type) { 1412 case Primitive::kPrimByte: 1413 case Primitive::kPrimShort: 1414 case Primitive::kPrimInt: 1415 case Primitive::kPrimChar: 1416 // Processing a Dex `int-to-long' instruction. 1417 locations->SetInAt(0, Location::RegisterLocation(EAX)); 1418 locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); 1419 break; 1420 1421 case Primitive::kPrimFloat: { 1422 // Processing a Dex `float-to-long' instruction. 1423 InvokeRuntimeCallingConvention calling_convention; 1424 // Note that on x86 floating-point parameters are passed 1425 // through core registers (here, EAX). 1426 locations->SetInAt(0, Location::RegisterLocation( 1427 calling_convention.GetRegisterAt(0))); 1428 // The runtime helper puts the result in EAX, EDX. 1429 locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); 1430 break; 1431 } 1432 1433 case Primitive::kPrimDouble: { 1434 // Processing a Dex `double-to-long' instruction. 1435 InvokeRuntimeCallingConvention calling_convention; 1436 // Note that on x86 floating-point parameters are passed 1437 // through core registers (here, EAX and ECX). 1438 locations->SetInAt(0, Location::RegisterPairLocation( 1439 calling_convention.GetRegisterAt(0), 1440 calling_convention.GetRegisterAt(1))); 1441 // The runtime helper puts the result in EAX, EDX. 1442 locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); 1443 break; 1444 } 1445 break; 1446 1447 default: 1448 LOG(FATAL) << "Unexpected type conversion from " << input_type 1449 << " to " << result_type; 1450 } 1451 break; 1452 1453 case Primitive::kPrimChar: 1454 switch (input_type) { 1455 case Primitive::kPrimByte: 1456 case Primitive::kPrimShort: 1457 case Primitive::kPrimInt: 1458 // Processing a Dex `int-to-char' instruction. 1459 locations->SetInAt(0, Location::Any()); 1460 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1461 break; 1462 1463 default: 1464 LOG(FATAL) << "Unexpected type conversion from " << input_type 1465 << " to " << result_type; 1466 } 1467 break; 1468 1469 case Primitive::kPrimFloat: 1470 switch (input_type) { 1471 case Primitive::kPrimByte: 1472 case Primitive::kPrimShort: 1473 case Primitive::kPrimInt: 1474 case Primitive::kPrimChar: 1475 // Processing a Dex `int-to-float' instruction. 1476 locations->SetInAt(0, Location::RequiresRegister()); 1477 locations->SetOut(Location::RequiresFpuRegister()); 1478 break; 1479 1480 case Primitive::kPrimLong: 1481 // Processing a Dex `long-to-float' instruction. 1482 locations->SetInAt(0, Location::RequiresRegister()); 1483 locations->SetOut(Location::RequiresFpuRegister()); 1484 locations->AddTemp(Location::RequiresFpuRegister()); 1485 locations->AddTemp(Location::RequiresFpuRegister()); 1486 break; 1487 1488 case Primitive::kPrimDouble: 1489 // Processing a Dex `double-to-float' instruction. 1490 locations->SetInAt(0, Location::RequiresFpuRegister()); 1491 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1492 break; 1493 1494 default: 1495 LOG(FATAL) << "Unexpected type conversion from " << input_type 1496 << " to " << result_type; 1497 }; 1498 break; 1499 1500 case Primitive::kPrimDouble: 1501 switch (input_type) { 1502 case Primitive::kPrimByte: 1503 case Primitive::kPrimShort: 1504 case Primitive::kPrimInt: 1505 case Primitive::kPrimChar: 1506 // Processing a Dex `int-to-double' instruction. 1507 locations->SetInAt(0, Location::RequiresRegister()); 1508 locations->SetOut(Location::RequiresFpuRegister()); 1509 break; 1510 1511 case Primitive::kPrimLong: 1512 // Processing a Dex `long-to-double' instruction. 1513 locations->SetInAt(0, Location::RequiresRegister()); 1514 locations->SetOut(Location::RequiresFpuRegister()); 1515 locations->AddTemp(Location::RequiresFpuRegister()); 1516 locations->AddTemp(Location::RequiresFpuRegister()); 1517 break; 1518 1519 case Primitive::kPrimFloat: 1520 // Processing a Dex `float-to-double' instruction. 1521 locations->SetInAt(0, Location::RequiresFpuRegister()); 1522 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 1523 break; 1524 1525 default: 1526 LOG(FATAL) << "Unexpected type conversion from " << input_type 1527 << " to " << result_type; 1528 } 1529 break; 1530 1531 default: 1532 LOG(FATAL) << "Unexpected type conversion from " << input_type 1533 << " to " << result_type; 1534 } 1535} 1536 1537void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversion) { 1538 LocationSummary* locations = conversion->GetLocations(); 1539 Location out = locations->Out(); 1540 Location in = locations->InAt(0); 1541 Primitive::Type result_type = conversion->GetResultType(); 1542 Primitive::Type input_type = conversion->GetInputType(); 1543 DCHECK_NE(result_type, input_type); 1544 switch (result_type) { 1545 case Primitive::kPrimByte: 1546 switch (input_type) { 1547 case Primitive::kPrimShort: 1548 case Primitive::kPrimInt: 1549 case Primitive::kPrimChar: 1550 // Processing a Dex `int-to-byte' instruction. 1551 if (in.IsRegister()) { 1552 __ movsxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>()); 1553 } else if (in.IsStackSlot()) { 1554 __ movsxb(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex())); 1555 } else { 1556 DCHECK(in.GetConstant()->IsIntConstant()); 1557 int32_t value = in.GetConstant()->AsIntConstant()->GetValue(); 1558 __ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value))); 1559 } 1560 break; 1561 1562 default: 1563 LOG(FATAL) << "Unexpected type conversion from " << input_type 1564 << " to " << result_type; 1565 } 1566 break; 1567 1568 case Primitive::kPrimShort: 1569 switch (input_type) { 1570 case Primitive::kPrimByte: 1571 case Primitive::kPrimInt: 1572 case Primitive::kPrimChar: 1573 // Processing a Dex `int-to-short' instruction. 1574 if (in.IsRegister()) { 1575 __ movsxw(out.AsRegister<Register>(), in.AsRegister<Register>()); 1576 } else if (in.IsStackSlot()) { 1577 __ movsxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex())); 1578 } else { 1579 DCHECK(in.GetConstant()->IsIntConstant()); 1580 int32_t value = in.GetConstant()->AsIntConstant()->GetValue(); 1581 __ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value))); 1582 } 1583 break; 1584 1585 default: 1586 LOG(FATAL) << "Unexpected type conversion from " << input_type 1587 << " to " << result_type; 1588 } 1589 break; 1590 1591 case Primitive::kPrimInt: 1592 switch (input_type) { 1593 case Primitive::kPrimLong: 1594 // Processing a Dex `long-to-int' instruction. 1595 if (in.IsRegisterPair()) { 1596 __ movl(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>()); 1597 } else if (in.IsDoubleStackSlot()) { 1598 __ movl(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex())); 1599 } else { 1600 DCHECK(in.IsConstant()); 1601 DCHECK(in.GetConstant()->IsLongConstant()); 1602 int64_t value = in.GetConstant()->AsLongConstant()->GetValue(); 1603 __ movl(out.AsRegister<Register>(), Immediate(static_cast<int32_t>(value))); 1604 } 1605 break; 1606 1607 case Primitive::kPrimFloat: { 1608 // Processing a Dex `float-to-int' instruction. 1609 XmmRegister input = in.AsFpuRegister<XmmRegister>(); 1610 Register output = out.AsRegister<Register>(); 1611 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 1612 Label done, nan; 1613 1614 __ movl(output, Immediate(kPrimIntMax)); 1615 // temp = int-to-float(output) 1616 __ cvtsi2ss(temp, output); 1617 // if input >= temp goto done 1618 __ comiss(input, temp); 1619 __ j(kAboveEqual, &done); 1620 // if input == NaN goto nan 1621 __ j(kUnordered, &nan); 1622 // output = float-to-int-truncate(input) 1623 __ cvttss2si(output, input); 1624 __ jmp(&done); 1625 __ Bind(&nan); 1626 // output = 0 1627 __ xorl(output, output); 1628 __ Bind(&done); 1629 break; 1630 } 1631 1632 case Primitive::kPrimDouble: { 1633 // Processing a Dex `double-to-int' instruction. 1634 XmmRegister input = in.AsFpuRegister<XmmRegister>(); 1635 Register output = out.AsRegister<Register>(); 1636 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 1637 Label done, nan; 1638 1639 __ movl(output, Immediate(kPrimIntMax)); 1640 // temp = int-to-double(output) 1641 __ cvtsi2sd(temp, output); 1642 // if input >= temp goto done 1643 __ comisd(input, temp); 1644 __ j(kAboveEqual, &done); 1645 // if input == NaN goto nan 1646 __ j(kUnordered, &nan); 1647 // output = double-to-int-truncate(input) 1648 __ cvttsd2si(output, input); 1649 __ jmp(&done); 1650 __ Bind(&nan); 1651 // output = 0 1652 __ xorl(output, output); 1653 __ Bind(&done); 1654 break; 1655 } 1656 1657 default: 1658 LOG(FATAL) << "Unexpected type conversion from " << input_type 1659 << " to " << result_type; 1660 } 1661 break; 1662 1663 case Primitive::kPrimLong: 1664 switch (input_type) { 1665 case Primitive::kPrimByte: 1666 case Primitive::kPrimShort: 1667 case Primitive::kPrimInt: 1668 case Primitive::kPrimChar: 1669 // Processing a Dex `int-to-long' instruction. 1670 DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX); 1671 DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX); 1672 DCHECK_EQ(in.AsRegister<Register>(), EAX); 1673 __ cdq(); 1674 break; 1675 1676 case Primitive::kPrimFloat: 1677 // Processing a Dex `float-to-long' instruction. 1678 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pF2l))); 1679 codegen_->RecordPcInfo(conversion, conversion->GetDexPc()); 1680 break; 1681 1682 case Primitive::kPrimDouble: 1683 // Processing a Dex `double-to-long' instruction. 1684 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pD2l))); 1685 codegen_->RecordPcInfo(conversion, conversion->GetDexPc()); 1686 break; 1687 1688 default: 1689 LOG(FATAL) << "Unexpected type conversion from " << input_type 1690 << " to " << result_type; 1691 } 1692 break; 1693 1694 case Primitive::kPrimChar: 1695 switch (input_type) { 1696 case Primitive::kPrimByte: 1697 case Primitive::kPrimShort: 1698 case Primitive::kPrimInt: 1699 // Processing a Dex `Process a Dex `int-to-char'' instruction. 1700 if (in.IsRegister()) { 1701 __ movzxw(out.AsRegister<Register>(), in.AsRegister<Register>()); 1702 } else if (in.IsStackSlot()) { 1703 __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex())); 1704 } else { 1705 DCHECK(in.GetConstant()->IsIntConstant()); 1706 int32_t value = in.GetConstant()->AsIntConstant()->GetValue(); 1707 __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value))); 1708 } 1709 break; 1710 1711 default: 1712 LOG(FATAL) << "Unexpected type conversion from " << input_type 1713 << " to " << result_type; 1714 } 1715 break; 1716 1717 case Primitive::kPrimFloat: 1718 switch (input_type) { 1719 case Primitive::kPrimByte: 1720 case Primitive::kPrimShort: 1721 case Primitive::kPrimInt: 1722 case Primitive::kPrimChar: 1723 // Processing a Dex `int-to-float' instruction. 1724 __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>()); 1725 break; 1726 1727 case Primitive::kPrimLong: { 1728 // Processing a Dex `long-to-float' instruction. 1729 Register low = in.AsRegisterPairLow<Register>(); 1730 Register high = in.AsRegisterPairHigh<Register>(); 1731 XmmRegister result = out.AsFpuRegister<XmmRegister>(); 1732 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 1733 XmmRegister constant = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); 1734 1735 // Operations use doubles for precision reasons (each 32-bit 1736 // half of a long fits in the 53-bit mantissa of a double, 1737 // but not in the 24-bit mantissa of a float). This is 1738 // especially important for the low bits. The result is 1739 // eventually converted to float. 1740 1741 // low = low - 2^31 (to prevent bit 31 of `low` to be 1742 // interpreted as a sign bit) 1743 __ subl(low, Immediate(0x80000000)); 1744 // temp = int-to-double(high) 1745 __ cvtsi2sd(temp, high); 1746 // temp = temp * 2^32 1747 __ LoadLongConstant(constant, k2Pow32EncodingForDouble); 1748 __ mulsd(temp, constant); 1749 // result = int-to-double(low) 1750 __ cvtsi2sd(result, low); 1751 // result = result + 2^31 (restore the original value of `low`) 1752 __ LoadLongConstant(constant, k2Pow31EncodingForDouble); 1753 __ addsd(result, constant); 1754 // result = result + temp 1755 __ addsd(result, temp); 1756 // result = double-to-float(result) 1757 __ cvtsd2ss(result, result); 1758 break; 1759 } 1760 1761 case Primitive::kPrimDouble: 1762 // Processing a Dex `double-to-float' instruction. 1763 __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); 1764 break; 1765 1766 default: 1767 LOG(FATAL) << "Unexpected type conversion from " << input_type 1768 << " to " << result_type; 1769 }; 1770 break; 1771 1772 case Primitive::kPrimDouble: 1773 switch (input_type) { 1774 case Primitive::kPrimByte: 1775 case Primitive::kPrimShort: 1776 case Primitive::kPrimInt: 1777 case Primitive::kPrimChar: 1778 // Processing a Dex `int-to-double' instruction. 1779 __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>()); 1780 break; 1781 1782 case Primitive::kPrimLong: { 1783 // Processing a Dex `long-to-double' instruction. 1784 Register low = in.AsRegisterPairLow<Register>(); 1785 Register high = in.AsRegisterPairHigh<Register>(); 1786 XmmRegister result = out.AsFpuRegister<XmmRegister>(); 1787 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 1788 XmmRegister constant = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); 1789 1790 // low = low - 2^31 (to prevent bit 31 of `low` to be 1791 // interpreted as a sign bit) 1792 __ subl(low, Immediate(0x80000000)); 1793 // temp = int-to-double(high) 1794 __ cvtsi2sd(temp, high); 1795 // temp = temp * 2^32 1796 __ LoadLongConstant(constant, k2Pow32EncodingForDouble); 1797 __ mulsd(temp, constant); 1798 // result = int-to-double(low) 1799 __ cvtsi2sd(result, low); 1800 // result = result + 2^31 (restore the original value of `low`) 1801 __ LoadLongConstant(constant, k2Pow31EncodingForDouble); 1802 __ addsd(result, constant); 1803 // result = result + temp 1804 __ addsd(result, temp); 1805 break; 1806 } 1807 1808 case Primitive::kPrimFloat: 1809 // Processing a Dex `float-to-double' instruction. 1810 __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>()); 1811 break; 1812 1813 default: 1814 LOG(FATAL) << "Unexpected type conversion from " << input_type 1815 << " to " << result_type; 1816 }; 1817 break; 1818 1819 default: 1820 LOG(FATAL) << "Unexpected type conversion from " << input_type 1821 << " to " << result_type; 1822 } 1823} 1824 1825void LocationsBuilderX86::VisitAdd(HAdd* add) { 1826 LocationSummary* locations = 1827 new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall); 1828 switch (add->GetResultType()) { 1829 case Primitive::kPrimInt: 1830 case Primitive::kPrimLong: { 1831 locations->SetInAt(0, Location::RequiresRegister()); 1832 locations->SetInAt(1, Location::Any()); 1833 locations->SetOut(Location::SameAsFirstInput()); 1834 break; 1835 } 1836 1837 case Primitive::kPrimFloat: 1838 case Primitive::kPrimDouble: { 1839 locations->SetInAt(0, Location::RequiresFpuRegister()); 1840 locations->SetInAt(1, Location::Any()); 1841 locations->SetOut(Location::SameAsFirstInput()); 1842 break; 1843 } 1844 1845 default: 1846 LOG(FATAL) << "Unexpected add type " << add->GetResultType(); 1847 break; 1848 } 1849} 1850 1851void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) { 1852 LocationSummary* locations = add->GetLocations(); 1853 Location first = locations->InAt(0); 1854 Location second = locations->InAt(1); 1855 DCHECK(first.Equals(locations->Out())); 1856 switch (add->GetResultType()) { 1857 case Primitive::kPrimInt: { 1858 if (second.IsRegister()) { 1859 __ addl(first.AsRegister<Register>(), second.AsRegister<Register>()); 1860 } else if (second.IsConstant()) { 1861 __ addl(first.AsRegister<Register>(), 1862 Immediate(second.GetConstant()->AsIntConstant()->GetValue())); 1863 } else { 1864 __ addl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); 1865 } 1866 break; 1867 } 1868 1869 case Primitive::kPrimLong: { 1870 if (second.IsRegisterPair()) { 1871 __ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); 1872 __ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); 1873 } else { 1874 __ addl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex())); 1875 __ adcl(first.AsRegisterPairHigh<Register>(), 1876 Address(ESP, second.GetHighStackIndex(kX86WordSize))); 1877 } 1878 break; 1879 } 1880 1881 case Primitive::kPrimFloat: { 1882 if (second.IsFpuRegister()) { 1883 __ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 1884 } else { 1885 __ addss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex())); 1886 } 1887 break; 1888 } 1889 1890 case Primitive::kPrimDouble: { 1891 if (second.IsFpuRegister()) { 1892 __ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 1893 } else { 1894 __ addsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex())); 1895 } 1896 break; 1897 } 1898 1899 default: 1900 LOG(FATAL) << "Unexpected add type " << add->GetResultType(); 1901 } 1902} 1903 1904void LocationsBuilderX86::VisitSub(HSub* sub) { 1905 LocationSummary* locations = 1906 new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall); 1907 switch (sub->GetResultType()) { 1908 case Primitive::kPrimInt: 1909 case Primitive::kPrimLong: { 1910 locations->SetInAt(0, Location::RequiresRegister()); 1911 locations->SetInAt(1, Location::Any()); 1912 locations->SetOut(Location::SameAsFirstInput()); 1913 break; 1914 } 1915 case Primitive::kPrimFloat: 1916 case Primitive::kPrimDouble: { 1917 locations->SetInAt(0, Location::RequiresFpuRegister()); 1918 locations->SetInAt(1, Location::RequiresFpuRegister()); 1919 locations->SetOut(Location::SameAsFirstInput()); 1920 break; 1921 } 1922 1923 default: 1924 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); 1925 } 1926} 1927 1928void InstructionCodeGeneratorX86::VisitSub(HSub* sub) { 1929 LocationSummary* locations = sub->GetLocations(); 1930 Location first = locations->InAt(0); 1931 Location second = locations->InAt(1); 1932 DCHECK(first.Equals(locations->Out())); 1933 switch (sub->GetResultType()) { 1934 case Primitive::kPrimInt: { 1935 if (second.IsRegister()) { 1936 __ subl(first.AsRegister<Register>(), second.AsRegister<Register>()); 1937 } else if (second.IsConstant()) { 1938 __ subl(first.AsRegister<Register>(), 1939 Immediate(second.GetConstant()->AsIntConstant()->GetValue())); 1940 } else { 1941 __ subl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); 1942 } 1943 break; 1944 } 1945 1946 case Primitive::kPrimLong: { 1947 if (second.IsRegisterPair()) { 1948 __ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); 1949 __ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); 1950 } else { 1951 __ subl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex())); 1952 __ sbbl(first.AsRegisterPairHigh<Register>(), 1953 Address(ESP, second.GetHighStackIndex(kX86WordSize))); 1954 } 1955 break; 1956 } 1957 1958 case Primitive::kPrimFloat: { 1959 __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 1960 break; 1961 } 1962 1963 case Primitive::kPrimDouble: { 1964 __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 1965 break; 1966 } 1967 1968 default: 1969 LOG(FATAL) << "Unexpected sub type " << sub->GetResultType(); 1970 } 1971} 1972 1973void LocationsBuilderX86::VisitMul(HMul* mul) { 1974 LocationSummary* locations = 1975 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall); 1976 switch (mul->GetResultType()) { 1977 case Primitive::kPrimInt: 1978 locations->SetInAt(0, Location::RequiresRegister()); 1979 locations->SetInAt(1, Location::Any()); 1980 locations->SetOut(Location::SameAsFirstInput()); 1981 break; 1982 case Primitive::kPrimLong: { 1983 locations->SetInAt(0, Location::RequiresRegister()); 1984 // TODO: Currently this handles only stack operands: 1985 // - we don't have enough registers because we currently use Quick ABI. 1986 // - by the time we have a working register allocator we will probably change the ABI 1987 // and fix the above. 1988 // - we don't have a way yet to request operands on stack but the base line compiler 1989 // will leave the operands on the stack with Any(). 1990 locations->SetInAt(1, Location::Any()); 1991 locations->SetOut(Location::SameAsFirstInput()); 1992 // Needed for imul on 32bits with 64bits output. 1993 locations->AddTemp(Location::RegisterLocation(EAX)); 1994 locations->AddTemp(Location::RegisterLocation(EDX)); 1995 break; 1996 } 1997 case Primitive::kPrimFloat: 1998 case Primitive::kPrimDouble: { 1999 locations->SetInAt(0, Location::RequiresFpuRegister()); 2000 locations->SetInAt(1, Location::RequiresFpuRegister()); 2001 locations->SetOut(Location::SameAsFirstInput()); 2002 break; 2003 } 2004 2005 default: 2006 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2007 } 2008} 2009 2010void InstructionCodeGeneratorX86::VisitMul(HMul* mul) { 2011 LocationSummary* locations = mul->GetLocations(); 2012 Location first = locations->InAt(0); 2013 Location second = locations->InAt(1); 2014 DCHECK(first.Equals(locations->Out())); 2015 2016 switch (mul->GetResultType()) { 2017 case Primitive::kPrimInt: { 2018 if (second.IsRegister()) { 2019 __ imull(first.AsRegister<Register>(), second.AsRegister<Register>()); 2020 } else if (second.IsConstant()) { 2021 Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); 2022 __ imull(first.AsRegister<Register>(), imm); 2023 } else { 2024 DCHECK(second.IsStackSlot()); 2025 __ imull(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); 2026 } 2027 break; 2028 } 2029 2030 case Primitive::kPrimLong: { 2031 DCHECK(second.IsDoubleStackSlot()); 2032 2033 Register in1_hi = first.AsRegisterPairHigh<Register>(); 2034 Register in1_lo = first.AsRegisterPairLow<Register>(); 2035 Address in2_hi(ESP, second.GetHighStackIndex(kX86WordSize)); 2036 Address in2_lo(ESP, second.GetStackIndex()); 2037 Register eax = locations->GetTemp(0).AsRegister<Register>(); 2038 Register edx = locations->GetTemp(1).AsRegister<Register>(); 2039 2040 DCHECK_EQ(EAX, eax); 2041 DCHECK_EQ(EDX, edx); 2042 2043 // input: in1 - 64 bits, in2 - 64 bits 2044 // output: in1 2045 // formula: in1.hi : in1.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo 2046 // parts: in1.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32] 2047 // parts: in1.lo = (in1.lo * in2.lo)[31:0] 2048 2049 __ movl(eax, in2_hi); 2050 // eax <- in1.lo * in2.hi 2051 __ imull(eax, in1_lo); 2052 // in1.hi <- in1.hi * in2.lo 2053 __ imull(in1_hi, in2_lo); 2054 // in1.hi <- in1.lo * in2.hi + in1.hi * in2.lo 2055 __ addl(in1_hi, eax); 2056 // move in1_lo to eax to prepare for double precision 2057 __ movl(eax, in1_lo); 2058 // edx:eax <- in1.lo * in2.lo 2059 __ mull(in2_lo); 2060 // in1.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32] 2061 __ addl(in1_hi, edx); 2062 // in1.lo <- (in1.lo * in2.lo)[31:0]; 2063 __ movl(in1_lo, eax); 2064 2065 break; 2066 } 2067 2068 case Primitive::kPrimFloat: { 2069 __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 2070 break; 2071 } 2072 2073 case Primitive::kPrimDouble: { 2074 __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 2075 break; 2076 } 2077 2078 default: 2079 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType(); 2080 } 2081} 2082 2083void InstructionCodeGeneratorX86::PushOntoFPStack(Location source, uint32_t temp_offset, 2084 uint32_t stack_adjustment, bool is_float) { 2085 if (source.IsStackSlot()) { 2086 DCHECK(is_float); 2087 __ flds(Address(ESP, source.GetStackIndex() + stack_adjustment)); 2088 } else if (source.IsDoubleStackSlot()) { 2089 DCHECK(!is_float); 2090 __ fldl(Address(ESP, source.GetStackIndex() + stack_adjustment)); 2091 } else { 2092 // Write the value to the temporary location on the stack and load to FP stack. 2093 if (is_float) { 2094 Location stack_temp = Location::StackSlot(temp_offset); 2095 codegen_->Move32(stack_temp, source); 2096 __ flds(Address(ESP, temp_offset)); 2097 } else { 2098 Location stack_temp = Location::DoubleStackSlot(temp_offset); 2099 codegen_->Move64(stack_temp, source); 2100 __ fldl(Address(ESP, temp_offset)); 2101 } 2102 } 2103} 2104 2105void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) { 2106 Primitive::Type type = rem->GetResultType(); 2107 bool is_float = type == Primitive::kPrimFloat; 2108 size_t elem_size = Primitive::ComponentSize(type); 2109 LocationSummary* locations = rem->GetLocations(); 2110 Location first = locations->InAt(0); 2111 Location second = locations->InAt(1); 2112 Location out = locations->Out(); 2113 2114 // Create stack space for 2 elements. 2115 // TODO: enhance register allocator to ask for stack temporaries. 2116 __ subl(ESP, Immediate(2 * elem_size)); 2117 2118 // Load the values to the FP stack in reverse order, using temporaries if needed. 2119 PushOntoFPStack(second, elem_size, 2 * elem_size, is_float); 2120 PushOntoFPStack(first, 0, 2 * elem_size, is_float); 2121 2122 // Loop doing FPREM until we stabilize. 2123 Label retry; 2124 __ Bind(&retry); 2125 __ fprem(); 2126 2127 // Move FP status to AX. 2128 __ fstsw(); 2129 2130 // And see if the argument reduction is complete. This is signaled by the 2131 // C2 FPU flag bit set to 0. 2132 __ andl(EAX, Immediate(kC2ConditionMask)); 2133 __ j(kNotEqual, &retry); 2134 2135 // We have settled on the final value. Retrieve it into an XMM register. 2136 // Store FP top of stack to real stack. 2137 if (is_float) { 2138 __ fsts(Address(ESP, 0)); 2139 } else { 2140 __ fstl(Address(ESP, 0)); 2141 } 2142 2143 // Pop the 2 items from the FP stack. 2144 __ fucompp(); 2145 2146 // Load the value from the stack into an XMM register. 2147 DCHECK(out.IsFpuRegister()) << out; 2148 if (is_float) { 2149 __ movss(out.AsFpuRegister<XmmRegister>(), Address(ESP, 0)); 2150 } else { 2151 __ movsd(out.AsFpuRegister<XmmRegister>(), Address(ESP, 0)); 2152 } 2153 2154 // And remove the temporary stack space we allocated. 2155 __ addl(ESP, Immediate(2 * elem_size)); 2156} 2157 2158void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instruction) { 2159 DCHECK(instruction->IsDiv() || instruction->IsRem()); 2160 2161 LocationSummary* locations = instruction->GetLocations(); 2162 Location out = locations->Out(); 2163 Location first = locations->InAt(0); 2164 Location second = locations->InAt(1); 2165 bool is_div = instruction->IsDiv(); 2166 2167 switch (instruction->GetResultType()) { 2168 case Primitive::kPrimInt: { 2169 Register second_reg = second.AsRegister<Register>(); 2170 DCHECK_EQ(EAX, first.AsRegister<Register>()); 2171 DCHECK_EQ(is_div ? EAX : EDX, out.AsRegister<Register>()); 2172 2173 SlowPathCodeX86* slow_path = 2174 new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(out.AsRegister<Register>(), 2175 is_div); 2176 codegen_->AddSlowPath(slow_path); 2177 2178 // 0x80000000/-1 triggers an arithmetic exception! 2179 // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so 2180 // it's safe to just use negl instead of more complex comparisons. 2181 2182 __ cmpl(second_reg, Immediate(-1)); 2183 __ j(kEqual, slow_path->GetEntryLabel()); 2184 2185 // edx:eax <- sign-extended of eax 2186 __ cdq(); 2187 // eax = quotient, edx = remainder 2188 __ idivl(second_reg); 2189 2190 __ Bind(slow_path->GetExitLabel()); 2191 break; 2192 } 2193 2194 case Primitive::kPrimLong: { 2195 InvokeRuntimeCallingConvention calling_convention; 2196 DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>()); 2197 DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>()); 2198 DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>()); 2199 DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>()); 2200 DCHECK_EQ(EAX, out.AsRegisterPairLow<Register>()); 2201 DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>()); 2202 2203 if (is_div) { 2204 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv))); 2205 } else { 2206 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLmod))); 2207 } 2208 uint32_t dex_pc = is_div 2209 ? instruction->AsDiv()->GetDexPc() 2210 : instruction->AsRem()->GetDexPc(); 2211 codegen_->RecordPcInfo(instruction, dex_pc); 2212 2213 break; 2214 } 2215 2216 default: 2217 LOG(FATAL) << "Unexpected type for GenerateDivRemIntegral " << instruction->GetResultType(); 2218 } 2219} 2220 2221void LocationsBuilderX86::VisitDiv(HDiv* div) { 2222 LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong 2223 ? LocationSummary::kCall 2224 : LocationSummary::kNoCall; 2225 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind); 2226 2227 switch (div->GetResultType()) { 2228 case Primitive::kPrimInt: { 2229 locations->SetInAt(0, Location::RegisterLocation(EAX)); 2230 locations->SetInAt(1, Location::RequiresRegister()); 2231 locations->SetOut(Location::SameAsFirstInput()); 2232 // Intel uses edx:eax as the dividend. 2233 locations->AddTemp(Location::RegisterLocation(EDX)); 2234 break; 2235 } 2236 case Primitive::kPrimLong: { 2237 InvokeRuntimeCallingConvention calling_convention; 2238 locations->SetInAt(0, Location::RegisterPairLocation( 2239 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 2240 locations->SetInAt(1, Location::RegisterPairLocation( 2241 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); 2242 // Runtime helper puts the result in EAX, EDX. 2243 locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); 2244 break; 2245 } 2246 case Primitive::kPrimFloat: 2247 case Primitive::kPrimDouble: { 2248 locations->SetInAt(0, Location::RequiresFpuRegister()); 2249 locations->SetInAt(1, Location::RequiresFpuRegister()); 2250 locations->SetOut(Location::SameAsFirstInput()); 2251 break; 2252 } 2253 2254 default: 2255 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 2256 } 2257} 2258 2259void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) { 2260 LocationSummary* locations = div->GetLocations(); 2261 Location out = locations->Out(); 2262 Location first = locations->InAt(0); 2263 Location second = locations->InAt(1); 2264 2265 switch (div->GetResultType()) { 2266 case Primitive::kPrimInt: 2267 case Primitive::kPrimLong: { 2268 GenerateDivRemIntegral(div); 2269 break; 2270 } 2271 2272 case Primitive::kPrimFloat: { 2273 DCHECK(first.Equals(out)); 2274 __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 2275 break; 2276 } 2277 2278 case Primitive::kPrimDouble: { 2279 DCHECK(first.Equals(out)); 2280 __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); 2281 break; 2282 } 2283 2284 default: 2285 LOG(FATAL) << "Unexpected div type " << div->GetResultType(); 2286 } 2287} 2288 2289void LocationsBuilderX86::VisitRem(HRem* rem) { 2290 Primitive::Type type = rem->GetResultType(); 2291 LocationSummary* locations = 2292 new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall); 2293 2294 switch (type) { 2295 case Primitive::kPrimInt: { 2296 locations->SetInAt(0, Location::RegisterLocation(EAX)); 2297 locations->SetInAt(1, Location::RequiresRegister()); 2298 locations->SetOut(Location::RegisterLocation(EDX)); 2299 break; 2300 } 2301 case Primitive::kPrimLong: { 2302 InvokeRuntimeCallingConvention calling_convention; 2303 locations->SetInAt(0, Location::RegisterPairLocation( 2304 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1))); 2305 locations->SetInAt(1, Location::RegisterPairLocation( 2306 calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3))); 2307 // Runtime helper puts the result in EAX, EDX. 2308 locations->SetOut(Location::RegisterPairLocation(EAX, EDX)); 2309 break; 2310 } 2311 case Primitive::kPrimDouble: 2312 case Primitive::kPrimFloat: { 2313 locations->SetInAt(0, Location::Any()); 2314 locations->SetInAt(1, Location::Any()); 2315 locations->SetOut(Location::RequiresFpuRegister()); 2316 locations->AddTemp(Location::RegisterLocation(EAX)); 2317 break; 2318 } 2319 2320 default: 2321 LOG(FATAL) << "Unexpected rem type " << type; 2322 } 2323} 2324 2325void InstructionCodeGeneratorX86::VisitRem(HRem* rem) { 2326 Primitive::Type type = rem->GetResultType(); 2327 switch (type) { 2328 case Primitive::kPrimInt: 2329 case Primitive::kPrimLong: { 2330 GenerateDivRemIntegral(rem); 2331 break; 2332 } 2333 case Primitive::kPrimFloat: 2334 case Primitive::kPrimDouble: { 2335 GenerateRemFP(rem); 2336 break; 2337 } 2338 default: 2339 LOG(FATAL) << "Unexpected rem type " << type; 2340 } 2341} 2342 2343void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { 2344 LocationSummary* locations = 2345 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2346 switch (instruction->GetType()) { 2347 case Primitive::kPrimInt: { 2348 locations->SetInAt(0, Location::Any()); 2349 break; 2350 } 2351 case Primitive::kPrimLong: { 2352 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); 2353 if (!instruction->IsConstant()) { 2354 locations->AddTemp(Location::RequiresRegister()); 2355 } 2356 break; 2357 } 2358 default: 2359 LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType(); 2360 } 2361 if (instruction->HasUses()) { 2362 locations->SetOut(Location::SameAsFirstInput()); 2363 } 2364} 2365 2366void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { 2367 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction); 2368 codegen_->AddSlowPath(slow_path); 2369 2370 LocationSummary* locations = instruction->GetLocations(); 2371 Location value = locations->InAt(0); 2372 2373 switch (instruction->GetType()) { 2374 case Primitive::kPrimInt: { 2375 if (value.IsRegister()) { 2376 __ testl(value.AsRegister<Register>(), value.AsRegister<Register>()); 2377 __ j(kEqual, slow_path->GetEntryLabel()); 2378 } else if (value.IsStackSlot()) { 2379 __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0)); 2380 __ j(kEqual, slow_path->GetEntryLabel()); 2381 } else { 2382 DCHECK(value.IsConstant()) << value; 2383 if (value.GetConstant()->AsIntConstant()->GetValue() == 0) { 2384 __ jmp(slow_path->GetEntryLabel()); 2385 } 2386 } 2387 break; 2388 } 2389 case Primitive::kPrimLong: { 2390 if (value.IsRegisterPair()) { 2391 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2392 __ movl(temp, value.AsRegisterPairLow<Register>()); 2393 __ orl(temp, value.AsRegisterPairHigh<Register>()); 2394 __ j(kEqual, slow_path->GetEntryLabel()); 2395 } else { 2396 DCHECK(value.IsConstant()) << value; 2397 if (value.GetConstant()->AsLongConstant()->GetValue() == 0) { 2398 __ jmp(slow_path->GetEntryLabel()); 2399 } 2400 } 2401 break; 2402 } 2403 default: 2404 LOG(FATAL) << "Unexpected type for HDivZeroCheck" << instruction->GetType(); 2405 } 2406} 2407 2408void LocationsBuilderX86::HandleShift(HBinaryOperation* op) { 2409 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); 2410 2411 LocationSummary* locations = 2412 new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall); 2413 2414 switch (op->GetResultType()) { 2415 case Primitive::kPrimInt: { 2416 locations->SetInAt(0, Location::RequiresRegister()); 2417 // The shift count needs to be in CL. 2418 locations->SetInAt(1, Location::ByteRegisterOrConstant(ECX, op->InputAt(1))); 2419 locations->SetOut(Location::SameAsFirstInput()); 2420 break; 2421 } 2422 case Primitive::kPrimLong: { 2423 locations->SetInAt(0, Location::RequiresRegister()); 2424 // The shift count needs to be in CL. 2425 locations->SetInAt(1, Location::RegisterLocation(ECX)); 2426 locations->SetOut(Location::SameAsFirstInput()); 2427 break; 2428 } 2429 default: 2430 LOG(FATAL) << "Unexpected op type " << op->GetResultType(); 2431 } 2432} 2433 2434void InstructionCodeGeneratorX86::HandleShift(HBinaryOperation* op) { 2435 DCHECK(op->IsShl() || op->IsShr() || op->IsUShr()); 2436 2437 LocationSummary* locations = op->GetLocations(); 2438 Location first = locations->InAt(0); 2439 Location second = locations->InAt(1); 2440 DCHECK(first.Equals(locations->Out())); 2441 2442 switch (op->GetResultType()) { 2443 case Primitive::kPrimInt: { 2444 Register first_reg = first.AsRegister<Register>(); 2445 if (second.IsRegister()) { 2446 Register second_reg = second.AsRegister<Register>(); 2447 DCHECK_EQ(ECX, second_reg); 2448 if (op->IsShl()) { 2449 __ shll(first_reg, second_reg); 2450 } else if (op->IsShr()) { 2451 __ sarl(first_reg, second_reg); 2452 } else { 2453 __ shrl(first_reg, second_reg); 2454 } 2455 } else { 2456 Immediate imm(second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue); 2457 if (op->IsShl()) { 2458 __ shll(first_reg, imm); 2459 } else if (op->IsShr()) { 2460 __ sarl(first_reg, imm); 2461 } else { 2462 __ shrl(first_reg, imm); 2463 } 2464 } 2465 break; 2466 } 2467 case Primitive::kPrimLong: { 2468 Register second_reg = second.AsRegister<Register>(); 2469 DCHECK_EQ(ECX, second_reg); 2470 if (op->IsShl()) { 2471 GenerateShlLong(first, second_reg); 2472 } else if (op->IsShr()) { 2473 GenerateShrLong(first, second_reg); 2474 } else { 2475 GenerateUShrLong(first, second_reg); 2476 } 2477 break; 2478 } 2479 default: 2480 LOG(FATAL) << "Unexpected op type " << op->GetResultType(); 2481 } 2482} 2483 2484void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, Register shifter) { 2485 Label done; 2486 __ shld(loc.AsRegisterPairHigh<Register>(), loc.AsRegisterPairLow<Register>(), shifter); 2487 __ shll(loc.AsRegisterPairLow<Register>(), shifter); 2488 __ testl(shifter, Immediate(32)); 2489 __ j(kEqual, &done); 2490 __ movl(loc.AsRegisterPairHigh<Register>(), loc.AsRegisterPairLow<Register>()); 2491 __ movl(loc.AsRegisterPairLow<Register>(), Immediate(0)); 2492 __ Bind(&done); 2493} 2494 2495void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, Register shifter) { 2496 Label done; 2497 __ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter); 2498 __ sarl(loc.AsRegisterPairHigh<Register>(), shifter); 2499 __ testl(shifter, Immediate(32)); 2500 __ j(kEqual, &done); 2501 __ movl(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>()); 2502 __ sarl(loc.AsRegisterPairHigh<Register>(), Immediate(31)); 2503 __ Bind(&done); 2504} 2505 2506void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register shifter) { 2507 Label done; 2508 __ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter); 2509 __ shrl(loc.AsRegisterPairHigh<Register>(), shifter); 2510 __ testl(shifter, Immediate(32)); 2511 __ j(kEqual, &done); 2512 __ movl(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>()); 2513 __ movl(loc.AsRegisterPairHigh<Register>(), Immediate(0)); 2514 __ Bind(&done); 2515} 2516 2517void LocationsBuilderX86::VisitShl(HShl* shl) { 2518 HandleShift(shl); 2519} 2520 2521void InstructionCodeGeneratorX86::VisitShl(HShl* shl) { 2522 HandleShift(shl); 2523} 2524 2525void LocationsBuilderX86::VisitShr(HShr* shr) { 2526 HandleShift(shr); 2527} 2528 2529void InstructionCodeGeneratorX86::VisitShr(HShr* shr) { 2530 HandleShift(shr); 2531} 2532 2533void LocationsBuilderX86::VisitUShr(HUShr* ushr) { 2534 HandleShift(ushr); 2535} 2536 2537void InstructionCodeGeneratorX86::VisitUShr(HUShr* ushr) { 2538 HandleShift(ushr); 2539} 2540 2541void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) { 2542 LocationSummary* locations = 2543 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2544 locations->SetOut(Location::RegisterLocation(EAX)); 2545 InvokeRuntimeCallingConvention calling_convention; 2546 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2547 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2548} 2549 2550void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { 2551 InvokeRuntimeCallingConvention calling_convention; 2552 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(1)); 2553 __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex())); 2554 2555 __ fs()->call( 2556 Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocObjectWithAccessCheck))); 2557 2558 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 2559 DCHECK(!codegen_->IsLeafMethod()); 2560} 2561 2562void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) { 2563 LocationSummary* locations = 2564 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 2565 locations->SetOut(Location::RegisterLocation(EAX)); 2566 InvokeRuntimeCallingConvention calling_convention; 2567 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2568 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2569 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2570} 2571 2572void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) { 2573 InvokeRuntimeCallingConvention calling_convention; 2574 codegen_->LoadCurrentMethod(calling_convention.GetRegisterAt(2)); 2575 __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex())); 2576 2577 __ fs()->call( 2578 Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocArrayWithAccessCheck))); 2579 2580 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 2581 DCHECK(!codegen_->IsLeafMethod()); 2582} 2583 2584void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) { 2585 LocationSummary* locations = 2586 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2587 Location location = parameter_visitor_.GetNextLocation(instruction->GetType()); 2588 if (location.IsStackSlot()) { 2589 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2590 } else if (location.IsDoubleStackSlot()) { 2591 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize()); 2592 } 2593 locations->SetOut(location); 2594} 2595 2596void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) { 2597 UNUSED(instruction); 2598} 2599 2600void LocationsBuilderX86::VisitNot(HNot* not_) { 2601 LocationSummary* locations = 2602 new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall); 2603 locations->SetInAt(0, Location::RequiresRegister()); 2604 locations->SetOut(Location::SameAsFirstInput()); 2605} 2606 2607void InstructionCodeGeneratorX86::VisitNot(HNot* not_) { 2608 LocationSummary* locations = not_->GetLocations(); 2609 Location in = locations->InAt(0); 2610 Location out = locations->Out(); 2611 DCHECK(in.Equals(out)); 2612 switch (not_->InputAt(0)->GetType()) { 2613 case Primitive::kPrimInt: 2614 __ notl(out.AsRegister<Register>()); 2615 break; 2616 2617 case Primitive::kPrimLong: 2618 __ notl(out.AsRegisterPairLow<Register>()); 2619 __ notl(out.AsRegisterPairHigh<Register>()); 2620 break; 2621 2622 default: 2623 LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType(); 2624 } 2625} 2626 2627void LocationsBuilderX86::VisitCompare(HCompare* compare) { 2628 LocationSummary* locations = 2629 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); 2630 switch (compare->InputAt(0)->GetType()) { 2631 case Primitive::kPrimLong: { 2632 locations->SetInAt(0, Location::RequiresRegister()); 2633 // TODO: we set any here but we don't handle constants 2634 locations->SetInAt(1, Location::Any()); 2635 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2636 break; 2637 } 2638 case Primitive::kPrimFloat: 2639 case Primitive::kPrimDouble: { 2640 locations->SetInAt(0, Location::RequiresFpuRegister()); 2641 locations->SetInAt(1, Location::RequiresFpuRegister()); 2642 locations->SetOut(Location::RequiresRegister()); 2643 break; 2644 } 2645 default: 2646 LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType(); 2647 } 2648} 2649 2650void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) { 2651 LocationSummary* locations = compare->GetLocations(); 2652 Register out = locations->Out().AsRegister<Register>(); 2653 Location left = locations->InAt(0); 2654 Location right = locations->InAt(1); 2655 2656 Label less, greater, done; 2657 switch (compare->InputAt(0)->GetType()) { 2658 case Primitive::kPrimLong: { 2659 if (right.IsRegisterPair()) { 2660 __ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>()); 2661 } else { 2662 DCHECK(right.IsDoubleStackSlot()); 2663 __ cmpl(left.AsRegisterPairHigh<Register>(), 2664 Address(ESP, right.GetHighStackIndex(kX86WordSize))); 2665 } 2666 __ j(kLess, &less); // Signed compare. 2667 __ j(kGreater, &greater); // Signed compare. 2668 if (right.IsRegisterPair()) { 2669 __ cmpl(left.AsRegisterPairLow<Register>(), right.AsRegisterPairLow<Register>()); 2670 } else { 2671 DCHECK(right.IsDoubleStackSlot()); 2672 __ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex())); 2673 } 2674 break; 2675 } 2676 case Primitive::kPrimFloat: { 2677 __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); 2678 __ j(kUnordered, compare->IsGtBias() ? &greater : &less); 2679 break; 2680 } 2681 case Primitive::kPrimDouble: { 2682 __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); 2683 __ j(kUnordered, compare->IsGtBias() ? &greater : &less); 2684 break; 2685 } 2686 default: 2687 LOG(FATAL) << "Unexpected type for compare operation " << compare->InputAt(0)->GetType(); 2688 } 2689 __ movl(out, Immediate(0)); 2690 __ j(kEqual, &done); 2691 __ j(kBelow, &less); // kBelow is for CF (unsigned & floats). 2692 2693 __ Bind(&greater); 2694 __ movl(out, Immediate(1)); 2695 __ jmp(&done); 2696 2697 __ Bind(&less); 2698 __ movl(out, Immediate(-1)); 2699 2700 __ Bind(&done); 2701} 2702 2703void LocationsBuilderX86::VisitPhi(HPhi* instruction) { 2704 LocationSummary* locations = 2705 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2706 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) { 2707 locations->SetInAt(i, Location::Any()); 2708 } 2709 locations->SetOut(Location::Any()); 2710} 2711 2712void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) { 2713 UNUSED(instruction); 2714 LOG(FATAL) << "Unreachable"; 2715} 2716 2717void InstructionCodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) { 2718 /* 2719 * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence. 2720 * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model. 2721 * For those cases, all we need to ensure is that there is a scheduling barrier in place. 2722 */ 2723 switch (kind) { 2724 case MemBarrierKind::kAnyAny: { 2725 __ mfence(); 2726 break; 2727 } 2728 case MemBarrierKind::kAnyStore: 2729 case MemBarrierKind::kLoadAny: 2730 case MemBarrierKind::kStoreStore: { 2731 // nop 2732 break; 2733 } 2734 default: 2735 LOG(FATAL) << "Unexpected memory barrier " << kind; 2736 } 2737} 2738 2739 2740void CodeGeneratorX86::MarkGCCard(Register temp, Register card, Register object, Register value) { 2741 Label is_null; 2742 __ testl(value, value); 2743 __ j(kEqual, &is_null); 2744 __ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86WordSize>().Int32Value())); 2745 __ movl(temp, object); 2746 __ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift)); 2747 __ movb(Address(temp, card, TIMES_1, 0), 2748 X86ManagedRegister::FromCpuRegister(card).AsByteRegister()); 2749 __ Bind(&is_null); 2750} 2751 2752void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) { 2753 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); 2754 LocationSummary* locations = 2755 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2756 locations->SetInAt(0, Location::RequiresRegister()); 2757 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 2758 2759 if (field_info.IsVolatile() && (field_info.GetFieldType() == Primitive::kPrimLong)) { 2760 // Long values can be loaded atomically into an XMM using movsd. 2761 // So we use an XMM register as a temp to achieve atomicity (first load the temp into the XMM 2762 // and then copy the XMM into the output 32bits at a time). 2763 locations->AddTemp(Location::RequiresFpuRegister()); 2764 } 2765} 2766 2767void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, 2768 const FieldInfo& field_info) { 2769 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet()); 2770 2771 LocationSummary* locations = instruction->GetLocations(); 2772 Register base = locations->InAt(0).AsRegister<Register>(); 2773 Location out = locations->Out(); 2774 bool is_volatile = field_info.IsVolatile(); 2775 Primitive::Type field_type = field_info.GetFieldType(); 2776 uint32_t offset = field_info.GetFieldOffset().Uint32Value(); 2777 2778 switch (field_type) { 2779 case Primitive::kPrimBoolean: { 2780 __ movzxb(out.AsRegister<Register>(), Address(base, offset)); 2781 break; 2782 } 2783 2784 case Primitive::kPrimByte: { 2785 __ movsxb(out.AsRegister<Register>(), Address(base, offset)); 2786 break; 2787 } 2788 2789 case Primitive::kPrimShort: { 2790 __ movsxw(out.AsRegister<Register>(), Address(base, offset)); 2791 break; 2792 } 2793 2794 case Primitive::kPrimChar: { 2795 __ movzxw(out.AsRegister<Register>(), Address(base, offset)); 2796 break; 2797 } 2798 2799 case Primitive::kPrimInt: 2800 case Primitive::kPrimNot: { 2801 __ movl(out.AsRegister<Register>(), Address(base, offset)); 2802 break; 2803 } 2804 2805 case Primitive::kPrimLong: { 2806 if (is_volatile) { 2807 XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 2808 __ movsd(temp, Address(base, offset)); 2809 codegen_->MaybeRecordImplicitNullCheck(instruction); 2810 __ movd(out.AsRegisterPairLow<Register>(), temp); 2811 __ psrlq(temp, Immediate(32)); 2812 __ movd(out.AsRegisterPairHigh<Register>(), temp); 2813 } else { 2814 __ movl(out.AsRegisterPairLow<Register>(), Address(base, offset)); 2815 codegen_->MaybeRecordImplicitNullCheck(instruction); 2816 __ movl(out.AsRegisterPairHigh<Register>(), Address(base, kX86WordSize + offset)); 2817 } 2818 break; 2819 } 2820 2821 case Primitive::kPrimFloat: { 2822 __ movss(out.AsFpuRegister<XmmRegister>(), Address(base, offset)); 2823 break; 2824 } 2825 2826 case Primitive::kPrimDouble: { 2827 __ movsd(out.AsFpuRegister<XmmRegister>(), Address(base, offset)); 2828 break; 2829 } 2830 2831 case Primitive::kPrimVoid: 2832 LOG(FATAL) << "Unreachable type " << field_type; 2833 UNREACHABLE(); 2834 } 2835 2836 // Longs are handled in the switch. 2837 if (field_type != Primitive::kPrimLong) { 2838 codegen_->MaybeRecordImplicitNullCheck(instruction); 2839 } 2840 2841 if (is_volatile) { 2842 GenerateMemoryBarrier(MemBarrierKind::kLoadAny); 2843 } 2844} 2845 2846void LocationsBuilderX86::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) { 2847 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); 2848 2849 LocationSummary* locations = 2850 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2851 locations->SetInAt(0, Location::RequiresRegister()); 2852 bool is_volatile = field_info.IsVolatile(); 2853 Primitive::Type field_type = field_info.GetFieldType(); 2854 bool is_byte_type = (field_type == Primitive::kPrimBoolean) 2855 || (field_type == Primitive::kPrimByte); 2856 2857 // The register allocator does not support multiple 2858 // inputs that die at entry with one in a specific register. 2859 if (is_byte_type) { 2860 // Ensure the value is in a byte register. 2861 locations->SetInAt(1, Location::RegisterLocation(EAX)); 2862 } else { 2863 locations->SetInAt(1, Location::RequiresRegister()); 2864 } 2865 // Temporary registers for the write barrier. 2866 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2867 locations->AddTemp(Location::RequiresRegister()); 2868 // Ensure the card is in a byte register. 2869 locations->AddTemp(Location::RegisterLocation(ECX)); 2870 } else if (is_volatile && (field_type == Primitive::kPrimLong)) { 2871 // 64bits value can be atomically written to an address with movsd and an XMM register. 2872 // We need two XMM registers because there's no easier way to (bit) copy a register pair 2873 // into a single XMM register (we copy each pair part into the XMMs and then interleave them). 2874 // NB: We could make the register allocator understand fp_reg <-> core_reg moves but given the 2875 // isolated cases when we need this it isn't worth adding the extra complexity. 2876 locations->AddTemp(Location::RequiresFpuRegister()); 2877 locations->AddTemp(Location::RequiresFpuRegister()); 2878 } 2879} 2880 2881void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction, 2882 const FieldInfo& field_info) { 2883 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet()); 2884 2885 LocationSummary* locations = instruction->GetLocations(); 2886 Register base = locations->InAt(0).AsRegister<Register>(); 2887 Location value = locations->InAt(1); 2888 bool is_volatile = field_info.IsVolatile(); 2889 Primitive::Type field_type = field_info.GetFieldType(); 2890 uint32_t offset = field_info.GetFieldOffset().Uint32Value(); 2891 2892 if (is_volatile) { 2893 GenerateMemoryBarrier(MemBarrierKind::kAnyStore); 2894 } 2895 2896 switch (field_type) { 2897 case Primitive::kPrimBoolean: 2898 case Primitive::kPrimByte: { 2899 __ movb(Address(base, offset), value.AsRegister<ByteRegister>()); 2900 break; 2901 } 2902 2903 case Primitive::kPrimShort: 2904 case Primitive::kPrimChar: { 2905 __ movw(Address(base, offset), value.AsRegister<Register>()); 2906 break; 2907 } 2908 2909 case Primitive::kPrimInt: 2910 case Primitive::kPrimNot: { 2911 __ movl(Address(base, offset), value.AsRegister<Register>()); 2912 break; 2913 } 2914 2915 case Primitive::kPrimLong: { 2916 if (is_volatile) { 2917 XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); 2918 XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>(); 2919 __ movd(temp1, value.AsRegisterPairLow<Register>()); 2920 __ movd(temp2, value.AsRegisterPairHigh<Register>()); 2921 __ punpckldq(temp1, temp2); 2922 __ movsd(Address(base, offset), temp1); 2923 codegen_->MaybeRecordImplicitNullCheck(instruction); 2924 } else { 2925 __ movl(Address(base, offset), value.AsRegisterPairLow<Register>()); 2926 codegen_->MaybeRecordImplicitNullCheck(instruction); 2927 __ movl(Address(base, kX86WordSize + offset), value.AsRegisterPairHigh<Register>()); 2928 } 2929 break; 2930 } 2931 2932 case Primitive::kPrimFloat: { 2933 __ movss(Address(base, offset), value.AsFpuRegister<XmmRegister>()); 2934 break; 2935 } 2936 2937 case Primitive::kPrimDouble: { 2938 __ movsd(Address(base, offset), value.AsFpuRegister<XmmRegister>()); 2939 break; 2940 } 2941 2942 case Primitive::kPrimVoid: 2943 LOG(FATAL) << "Unreachable type " << field_type; 2944 UNREACHABLE(); 2945 } 2946 2947 // Longs are handled in the switch. 2948 if (field_type != Primitive::kPrimLong) { 2949 codegen_->MaybeRecordImplicitNullCheck(instruction); 2950 } 2951 2952 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) { 2953 Register temp = locations->GetTemp(0).AsRegister<Register>(); 2954 Register card = locations->GetTemp(1).AsRegister<Register>(); 2955 codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>()); 2956 } 2957 2958 if (is_volatile) { 2959 GenerateMemoryBarrier(MemBarrierKind::kAnyAny); 2960 } 2961} 2962 2963void LocationsBuilderX86::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2964 HandleFieldGet(instruction, instruction->GetFieldInfo()); 2965} 2966 2967void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instruction) { 2968 HandleFieldGet(instruction, instruction->GetFieldInfo()); 2969} 2970 2971void LocationsBuilderX86::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2972 HandleFieldSet(instruction, instruction->GetFieldInfo()); 2973} 2974 2975void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instruction) { 2976 HandleFieldSet(instruction, instruction->GetFieldInfo()); 2977} 2978 2979void LocationsBuilderX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 2980 HandleFieldSet(instruction, instruction->GetFieldInfo()); 2981} 2982 2983void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) { 2984 HandleFieldSet(instruction, instruction->GetFieldInfo()); 2985} 2986 2987void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 2988 HandleFieldGet(instruction, instruction->GetFieldInfo()); 2989} 2990 2991void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) { 2992 HandleFieldGet(instruction, instruction->GetFieldInfo()); 2993} 2994 2995void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) { 2996 LocationSummary* locations = 2997 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 2998 Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks() 2999 ? Location::RequiresRegister() 3000 : Location::Any(); 3001 locations->SetInAt(0, loc); 3002 if (instruction->HasUses()) { 3003 locations->SetOut(Location::SameAsFirstInput()); 3004 } 3005} 3006 3007void InstructionCodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) { 3008 if (codegen_->CanMoveNullCheckToUser(instruction)) { 3009 return; 3010 } 3011 LocationSummary* locations = instruction->GetLocations(); 3012 Location obj = locations->InAt(0); 3013 3014 __ testl(EAX, Address(obj.AsRegister<Register>(), 0)); 3015 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 3016} 3017 3018void InstructionCodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) { 3019 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction); 3020 codegen_->AddSlowPath(slow_path); 3021 3022 LocationSummary* locations = instruction->GetLocations(); 3023 Location obj = locations->InAt(0); 3024 3025 if (obj.IsRegister()) { 3026 __ cmpl(obj.AsRegister<Register>(), Immediate(0)); 3027 } else if (obj.IsStackSlot()) { 3028 __ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0)); 3029 } else { 3030 DCHECK(obj.IsConstant()) << obj; 3031 DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0); 3032 __ jmp(slow_path->GetEntryLabel()); 3033 return; 3034 } 3035 __ j(kEqual, slow_path->GetEntryLabel()); 3036} 3037 3038void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) { 3039 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) { 3040 GenerateImplicitNullCheck(instruction); 3041 } else { 3042 GenerateExplicitNullCheck(instruction); 3043 } 3044} 3045 3046void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { 3047 LocationSummary* locations = 3048 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3049 locations->SetInAt(0, Location::RequiresRegister()); 3050 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 3051 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 3052} 3053 3054void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { 3055 LocationSummary* locations = instruction->GetLocations(); 3056 Register obj = locations->InAt(0).AsRegister<Register>(); 3057 Location index = locations->InAt(1); 3058 3059 Primitive::Type type = instruction->GetType(); 3060 switch (type) { 3061 case Primitive::kPrimBoolean: { 3062 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); 3063 Register out = locations->Out().AsRegister<Register>(); 3064 if (index.IsConstant()) { 3065 __ movzxb(out, Address(obj, 3066 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); 3067 } else { 3068 __ movzxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset)); 3069 } 3070 break; 3071 } 3072 3073 case Primitive::kPrimByte: { 3074 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value(); 3075 Register out = locations->Out().AsRegister<Register>(); 3076 if (index.IsConstant()) { 3077 __ movsxb(out, Address(obj, 3078 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); 3079 } else { 3080 __ movsxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset)); 3081 } 3082 break; 3083 } 3084 3085 case Primitive::kPrimShort: { 3086 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value(); 3087 Register out = locations->Out().AsRegister<Register>(); 3088 if (index.IsConstant()) { 3089 __ movsxw(out, Address(obj, 3090 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset)); 3091 } else { 3092 __ movsxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset)); 3093 } 3094 break; 3095 } 3096 3097 case Primitive::kPrimChar: { 3098 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); 3099 Register out = locations->Out().AsRegister<Register>(); 3100 if (index.IsConstant()) { 3101 __ movzxw(out, Address(obj, 3102 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset)); 3103 } else { 3104 __ movzxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset)); 3105 } 3106 break; 3107 } 3108 3109 case Primitive::kPrimInt: 3110 case Primitive::kPrimNot: { 3111 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); 3112 Register out = locations->Out().AsRegister<Register>(); 3113 if (index.IsConstant()) { 3114 __ movl(out, Address(obj, 3115 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset)); 3116 } else { 3117 __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset)); 3118 } 3119 break; 3120 } 3121 3122 case Primitive::kPrimLong: { 3123 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); 3124 Location out = locations->Out(); 3125 if (index.IsConstant()) { 3126 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3127 __ movl(out.AsRegisterPairLow<Register>(), Address(obj, offset)); 3128 codegen_->MaybeRecordImplicitNullCheck(instruction); 3129 __ movl(out.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize)); 3130 } else { 3131 __ movl(out.AsRegisterPairLow<Register>(), 3132 Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset)); 3133 codegen_->MaybeRecordImplicitNullCheck(instruction); 3134 __ movl(out.AsRegisterPairHigh<Register>(), 3135 Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize)); 3136 } 3137 break; 3138 } 3139 3140 case Primitive::kPrimFloat: 3141 case Primitive::kPrimDouble: 3142 LOG(FATAL) << "Unimplemented register type " << type; 3143 UNREACHABLE(); 3144 case Primitive::kPrimVoid: 3145 LOG(FATAL) << "Unreachable type " << type; 3146 UNREACHABLE(); 3147 } 3148 3149 if (type != Primitive::kPrimLong) { 3150 codegen_->MaybeRecordImplicitNullCheck(instruction); 3151 } 3152} 3153 3154void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) { 3155 Primitive::Type value_type = instruction->GetComponentType(); 3156 bool needs_write_barrier = 3157 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); 3158 3159 DCHECK(kFollowsQuickABI); 3160 bool not_enough_registers = needs_write_barrier 3161 && !instruction->GetValue()->IsConstant() 3162 && !instruction->GetIndex()->IsConstant(); 3163 bool needs_runtime_call = instruction->NeedsTypeCheck() || not_enough_registers; 3164 3165 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 3166 instruction, 3167 needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall); 3168 3169 if (needs_runtime_call) { 3170 InvokeRuntimeCallingConvention calling_convention; 3171 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 3172 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 3173 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 3174 } else { 3175 bool is_byte_type = (value_type == Primitive::kPrimBoolean) 3176 || (value_type == Primitive::kPrimByte); 3177 // We need the inputs to be different than the output in case of long operation. 3178 // In case of a byte operation, the register allocator does not support multiple 3179 // inputs that die at entry with one in a specific register. 3180 locations->SetInAt(0, Location::RequiresRegister()); 3181 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); 3182 if (is_byte_type) { 3183 // Ensure the value is in a byte register. 3184 locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2))); 3185 } else { 3186 locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2))); 3187 } 3188 // Temporary registers for the write barrier. 3189 if (needs_write_barrier) { 3190 locations->AddTemp(Location::RequiresRegister()); 3191 // Ensure the card is in a byte register. 3192 locations->AddTemp(Location::RegisterLocation(ECX)); 3193 } 3194 } 3195} 3196 3197void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { 3198 LocationSummary* locations = instruction->GetLocations(); 3199 Register obj = locations->InAt(0).AsRegister<Register>(); 3200 Location index = locations->InAt(1); 3201 Location value = locations->InAt(2); 3202 Primitive::Type value_type = instruction->GetComponentType(); 3203 bool needs_runtime_call = locations->WillCall(); 3204 bool needs_write_barrier = 3205 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); 3206 3207 switch (value_type) { 3208 case Primitive::kPrimBoolean: 3209 case Primitive::kPrimByte: { 3210 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); 3211 if (index.IsConstant()) { 3212 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; 3213 if (value.IsRegister()) { 3214 __ movb(Address(obj, offset), value.AsRegister<ByteRegister>()); 3215 } else { 3216 __ movb(Address(obj, offset), 3217 Immediate(value.GetConstant()->AsIntConstant()->GetValue())); 3218 } 3219 } else { 3220 if (value.IsRegister()) { 3221 __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset), 3222 value.AsRegister<ByteRegister>()); 3223 } else { 3224 __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset), 3225 Immediate(value.GetConstant()->AsIntConstant()->GetValue())); 3226 } 3227 } 3228 codegen_->MaybeRecordImplicitNullCheck(instruction); 3229 break; 3230 } 3231 3232 case Primitive::kPrimShort: 3233 case Primitive::kPrimChar: { 3234 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); 3235 if (index.IsConstant()) { 3236 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; 3237 if (value.IsRegister()) { 3238 __ movw(Address(obj, offset), value.AsRegister<Register>()); 3239 } else { 3240 __ movw(Address(obj, offset), 3241 Immediate(value.GetConstant()->AsIntConstant()->GetValue())); 3242 } 3243 } else { 3244 if (value.IsRegister()) { 3245 __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset), 3246 value.AsRegister<Register>()); 3247 } else { 3248 __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset), 3249 Immediate(value.GetConstant()->AsIntConstant()->GetValue())); 3250 } 3251 } 3252 codegen_->MaybeRecordImplicitNullCheck(instruction); 3253 break; 3254 } 3255 3256 case Primitive::kPrimInt: 3257 case Primitive::kPrimNot: { 3258 if (!needs_runtime_call) { 3259 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); 3260 if (index.IsConstant()) { 3261 size_t offset = 3262 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; 3263 if (value.IsRegister()) { 3264 __ movl(Address(obj, offset), value.AsRegister<Register>()); 3265 } else { 3266 DCHECK(value.IsConstant()) << value; 3267 __ movl(Address(obj, offset), 3268 Immediate(value.GetConstant()->AsIntConstant()->GetValue())); 3269 } 3270 } else { 3271 DCHECK(index.IsRegister()) << index; 3272 if (value.IsRegister()) { 3273 __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), 3274 value.AsRegister<Register>()); 3275 } else { 3276 DCHECK(value.IsConstant()) << value; 3277 __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), 3278 Immediate(value.GetConstant()->AsIntConstant()->GetValue())); 3279 } 3280 } 3281 codegen_->MaybeRecordImplicitNullCheck(instruction); 3282 3283 if (needs_write_barrier) { 3284 Register temp = locations->GetTemp(0).AsRegister<Register>(); 3285 Register card = locations->GetTemp(1).AsRegister<Register>(); 3286 codegen_->MarkGCCard(temp, card, obj, value.AsRegister<Register>()); 3287 } 3288 } else { 3289 DCHECK_EQ(value_type, Primitive::kPrimNot); 3290 DCHECK(!codegen_->IsLeafMethod()); 3291 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject))); 3292 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 3293 } 3294 break; 3295 } 3296 3297 case Primitive::kPrimLong: { 3298 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); 3299 if (index.IsConstant()) { 3300 size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; 3301 if (value.IsRegisterPair()) { 3302 __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>()); 3303 codegen_->MaybeRecordImplicitNullCheck(instruction); 3304 __ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh<Register>()); 3305 } else { 3306 DCHECK(value.IsConstant()); 3307 int64_t val = value.GetConstant()->AsLongConstant()->GetValue(); 3308 __ movl(Address(obj, offset), Immediate(Low32Bits(val))); 3309 codegen_->MaybeRecordImplicitNullCheck(instruction); 3310 __ movl(Address(obj, offset + kX86WordSize), Immediate(High32Bits(val))); 3311 } 3312 } else { 3313 if (value.IsRegisterPair()) { 3314 __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset), 3315 value.AsRegisterPairLow<Register>()); 3316 codegen_->MaybeRecordImplicitNullCheck(instruction); 3317 __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize), 3318 value.AsRegisterPairHigh<Register>()); 3319 } else { 3320 DCHECK(value.IsConstant()); 3321 int64_t val = value.GetConstant()->AsLongConstant()->GetValue(); 3322 __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset), 3323 Immediate(Low32Bits(val))); 3324 codegen_->MaybeRecordImplicitNullCheck(instruction); 3325 __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize), 3326 Immediate(High32Bits(val))); 3327 } 3328 } 3329 break; 3330 } 3331 3332 case Primitive::kPrimFloat: 3333 case Primitive::kPrimDouble: 3334 LOG(FATAL) << "Unimplemented register type " << instruction->GetType(); 3335 UNREACHABLE(); 3336 case Primitive::kPrimVoid: 3337 LOG(FATAL) << "Unreachable type " << instruction->GetType(); 3338 UNREACHABLE(); 3339 } 3340} 3341 3342void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) { 3343 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); 3344 locations->SetInAt(0, Location::RequiresRegister()); 3345 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 3346 instruction->SetLocations(locations); 3347} 3348 3349void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) { 3350 LocationSummary* locations = instruction->GetLocations(); 3351 uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); 3352 Register obj = locations->InAt(0).AsRegister<Register>(); 3353 Register out = locations->Out().AsRegister<Register>(); 3354 __ movl(out, Address(obj, offset)); 3355 codegen_->MaybeRecordImplicitNullCheck(instruction); 3356} 3357 3358void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) { 3359 LocationSummary* locations = 3360 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3361 locations->SetInAt(0, Location::RequiresRegister()); 3362 locations->SetInAt(1, Location::RequiresRegister()); 3363 if (instruction->HasUses()) { 3364 locations->SetOut(Location::SameAsFirstInput()); 3365 } 3366} 3367 3368void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) { 3369 LocationSummary* locations = instruction->GetLocations(); 3370 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86( 3371 instruction, locations->InAt(0), locations->InAt(1)); 3372 codegen_->AddSlowPath(slow_path); 3373 3374 Register index = locations->InAt(0).AsRegister<Register>(); 3375 Register length = locations->InAt(1).AsRegister<Register>(); 3376 3377 __ cmpl(index, length); 3378 __ j(kAboveEqual, slow_path->GetEntryLabel()); 3379} 3380 3381void LocationsBuilderX86::VisitTemporary(HTemporary* temp) { 3382 temp->SetLocations(nullptr); 3383} 3384 3385void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) { 3386 // Nothing to do, this is driven by the code generator. 3387 UNUSED(temp); 3388} 3389 3390void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) { 3391 UNUSED(instruction); 3392 LOG(FATAL) << "Unreachable"; 3393} 3394 3395void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) { 3396 codegen_->GetMoveResolver()->EmitNativeCode(instruction); 3397} 3398 3399void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) { 3400 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); 3401} 3402 3403void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) { 3404 HBasicBlock* block = instruction->GetBlock(); 3405 if (block->GetLoopInformation() != nullptr) { 3406 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction); 3407 // The back edge will generate the suspend check. 3408 return; 3409 } 3410 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) { 3411 // The goto will generate the suspend check. 3412 return; 3413 } 3414 GenerateSuspendCheck(instruction, nullptr); 3415} 3416 3417void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instruction, 3418 HBasicBlock* successor) { 3419 SuspendCheckSlowPathX86* slow_path = 3420 new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor); 3421 codegen_->AddSlowPath(slow_path); 3422 __ fs()->cmpw(Address::Absolute( 3423 Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()), Immediate(0)); 3424 if (successor == nullptr) { 3425 __ j(kNotEqual, slow_path->GetEntryLabel()); 3426 __ Bind(slow_path->GetReturnLabel()); 3427 } else { 3428 __ j(kEqual, codegen_->GetLabelOf(successor)); 3429 __ jmp(slow_path->GetEntryLabel()); 3430 } 3431} 3432 3433X86Assembler* ParallelMoveResolverX86::GetAssembler() const { 3434 return codegen_->GetAssembler(); 3435} 3436 3437void ParallelMoveResolverX86::MoveMemoryToMemory(int dst, int src) { 3438 ScratchRegisterScope ensure_scratch( 3439 this, kNoRegister, EAX, codegen_->GetNumberOfCoreRegisters()); 3440 int stack_offset = ensure_scratch.IsSpilled() ? kX86WordSize : 0; 3441 __ movl(static_cast<Register>(ensure_scratch.GetRegister()), Address(ESP, src + stack_offset)); 3442 __ movl(Address(ESP, dst + stack_offset), static_cast<Register>(ensure_scratch.GetRegister())); 3443} 3444 3445void ParallelMoveResolverX86::EmitMove(size_t index) { 3446 MoveOperands* move = moves_.Get(index); 3447 Location source = move->GetSource(); 3448 Location destination = move->GetDestination(); 3449 3450 if (source.IsRegister()) { 3451 if (destination.IsRegister()) { 3452 __ movl(destination.AsRegister<Register>(), source.AsRegister<Register>()); 3453 } else { 3454 DCHECK(destination.IsStackSlot()); 3455 __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegister<Register>()); 3456 } 3457 } else if (source.IsStackSlot()) { 3458 if (destination.IsRegister()) { 3459 __ movl(destination.AsRegister<Register>(), Address(ESP, source.GetStackIndex())); 3460 } else { 3461 DCHECK(destination.IsStackSlot()); 3462 MoveMemoryToMemory(destination.GetStackIndex(), 3463 source.GetStackIndex()); 3464 } 3465 } else if (source.IsConstant()) { 3466 HIntConstant* instruction = source.GetConstant()->AsIntConstant(); 3467 Immediate imm(instruction->AsIntConstant()->GetValue()); 3468 if (destination.IsRegister()) { 3469 __ movl(destination.AsRegister<Register>(), imm); 3470 } else { 3471 __ movl(Address(ESP, destination.GetStackIndex()), imm); 3472 } 3473 } else { 3474 LOG(FATAL) << "Unimplemented move: " << destination << " <- " << source; 3475 } 3476} 3477 3478void ParallelMoveResolverX86::Exchange(Register reg, int mem) { 3479 Register suggested_scratch = reg == EAX ? EBX : EAX; 3480 ScratchRegisterScope ensure_scratch( 3481 this, reg, suggested_scratch, codegen_->GetNumberOfCoreRegisters()); 3482 3483 int stack_offset = ensure_scratch.IsSpilled() ? kX86WordSize : 0; 3484 __ movl(static_cast<Register>(ensure_scratch.GetRegister()), Address(ESP, mem + stack_offset)); 3485 __ movl(Address(ESP, mem + stack_offset), reg); 3486 __ movl(reg, static_cast<Register>(ensure_scratch.GetRegister())); 3487} 3488 3489void ParallelMoveResolverX86::Exchange(int mem1, int mem2) { 3490 ScratchRegisterScope ensure_scratch1( 3491 this, kNoRegister, EAX, codegen_->GetNumberOfCoreRegisters()); 3492 3493 Register suggested_scratch = ensure_scratch1.GetRegister() == EAX ? EBX : EAX; 3494 ScratchRegisterScope ensure_scratch2( 3495 this, ensure_scratch1.GetRegister(), suggested_scratch, codegen_->GetNumberOfCoreRegisters()); 3496 3497 int stack_offset = ensure_scratch1.IsSpilled() ? kX86WordSize : 0; 3498 stack_offset += ensure_scratch2.IsSpilled() ? kX86WordSize : 0; 3499 __ movl(static_cast<Register>(ensure_scratch1.GetRegister()), Address(ESP, mem1 + stack_offset)); 3500 __ movl(static_cast<Register>(ensure_scratch2.GetRegister()), Address(ESP, mem2 + stack_offset)); 3501 __ movl(Address(ESP, mem2 + stack_offset), static_cast<Register>(ensure_scratch1.GetRegister())); 3502 __ movl(Address(ESP, mem1 + stack_offset), static_cast<Register>(ensure_scratch2.GetRegister())); 3503} 3504 3505void ParallelMoveResolverX86::EmitSwap(size_t index) { 3506 MoveOperands* move = moves_.Get(index); 3507 Location source = move->GetSource(); 3508 Location destination = move->GetDestination(); 3509 3510 if (source.IsRegister() && destination.IsRegister()) { 3511 __ xchgl(destination.AsRegister<Register>(), source.AsRegister<Register>()); 3512 } else if (source.IsRegister() && destination.IsStackSlot()) { 3513 Exchange(source.AsRegister<Register>(), destination.GetStackIndex()); 3514 } else if (source.IsStackSlot() && destination.IsRegister()) { 3515 Exchange(destination.AsRegister<Register>(), source.GetStackIndex()); 3516 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 3517 Exchange(destination.GetStackIndex(), source.GetStackIndex()); 3518 } else { 3519 LOG(FATAL) << "Unimplemented"; 3520 } 3521} 3522 3523void ParallelMoveResolverX86::SpillScratch(int reg) { 3524 __ pushl(static_cast<Register>(reg)); 3525} 3526 3527void ParallelMoveResolverX86::RestoreScratch(int reg) { 3528 __ popl(static_cast<Register>(reg)); 3529} 3530 3531void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { 3532 LocationSummary::CallKind call_kind = cls->CanCallRuntime() 3533 ? LocationSummary::kCallOnSlowPath 3534 : LocationSummary::kNoCall; 3535 LocationSummary* locations = 3536 new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); 3537 locations->SetOut(Location::RequiresRegister()); 3538} 3539 3540void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { 3541 Register out = cls->GetLocations()->Out().AsRegister<Register>(); 3542 if (cls->IsReferrersClass()) { 3543 DCHECK(!cls->CanCallRuntime()); 3544 DCHECK(!cls->MustGenerateClinitCheck()); 3545 codegen_->LoadCurrentMethod(out); 3546 __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); 3547 } else { 3548 DCHECK(cls->CanCallRuntime()); 3549 codegen_->LoadCurrentMethod(out); 3550 __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); 3551 __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); 3552 3553 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( 3554 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); 3555 codegen_->AddSlowPath(slow_path); 3556 __ testl(out, out); 3557 __ j(kEqual, slow_path->GetEntryLabel()); 3558 if (cls->MustGenerateClinitCheck()) { 3559 GenerateClassInitializationCheck(slow_path, out); 3560 } else { 3561 __ Bind(slow_path->GetExitLabel()); 3562 } 3563 } 3564} 3565 3566void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) { 3567 LocationSummary* locations = 3568 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath); 3569 locations->SetInAt(0, Location::RequiresRegister()); 3570 if (check->HasUses()) { 3571 locations->SetOut(Location::SameAsFirstInput()); 3572 } 3573} 3574 3575void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) { 3576 // We assume the class to not be null. 3577 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( 3578 check->GetLoadClass(), check, check->GetDexPc(), true); 3579 codegen_->AddSlowPath(slow_path); 3580 GenerateClassInitializationCheck(slow_path, 3581 check->GetLocations()->InAt(0).AsRegister<Register>()); 3582} 3583 3584void InstructionCodeGeneratorX86::GenerateClassInitializationCheck( 3585 SlowPathCodeX86* slow_path, Register class_reg) { 3586 __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()), 3587 Immediate(mirror::Class::kStatusInitialized)); 3588 __ j(kLess, slow_path->GetEntryLabel()); 3589 __ Bind(slow_path->GetExitLabel()); 3590 // No need for memory fence, thanks to the X86 memory model. 3591} 3592 3593void LocationsBuilderX86::VisitLoadString(HLoadString* load) { 3594 LocationSummary* locations = 3595 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath); 3596 locations->SetOut(Location::RequiresRegister()); 3597} 3598 3599void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { 3600 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load); 3601 codegen_->AddSlowPath(slow_path); 3602 3603 Register out = load->GetLocations()->Out().AsRegister<Register>(); 3604 codegen_->LoadCurrentMethod(out); 3605 __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); 3606 __ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value())); 3607 __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex()))); 3608 __ testl(out, out); 3609 __ j(kEqual, slow_path->GetEntryLabel()); 3610 __ Bind(slow_path->GetExitLabel()); 3611} 3612 3613void LocationsBuilderX86::VisitLoadException(HLoadException* load) { 3614 LocationSummary* locations = 3615 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); 3616 locations->SetOut(Location::RequiresRegister()); 3617} 3618 3619void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) { 3620 Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value()); 3621 __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), address); 3622 __ fs()->movl(address, Immediate(0)); 3623} 3624 3625void LocationsBuilderX86::VisitThrow(HThrow* instruction) { 3626 LocationSummary* locations = 3627 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 3628 InvokeRuntimeCallingConvention calling_convention; 3629 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 3630} 3631 3632void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { 3633 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException))); 3634 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 3635} 3636 3637void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { 3638 LocationSummary::CallKind call_kind = instruction->IsClassFinal() 3639 ? LocationSummary::kNoCall 3640 : LocationSummary::kCallOnSlowPath; 3641 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); 3642 locations->SetInAt(0, Location::RequiresRegister()); 3643 locations->SetInAt(1, Location::Any()); 3644 locations->SetOut(Location::RequiresRegister()); 3645} 3646 3647void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { 3648 LocationSummary* locations = instruction->GetLocations(); 3649 Register obj = locations->InAt(0).AsRegister<Register>(); 3650 Location cls = locations->InAt(1); 3651 Register out = locations->Out().AsRegister<Register>(); 3652 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 3653 Label done, zero; 3654 SlowPathCodeX86* slow_path = nullptr; 3655 3656 // Return 0 if `obj` is null. 3657 // TODO: avoid this check if we know obj is not null. 3658 __ testl(obj, obj); 3659 __ j(kEqual, &zero); 3660 __ movl(out, Address(obj, class_offset)); 3661 // Compare the class of `obj` with `cls`. 3662 if (cls.IsRegister()) { 3663 __ cmpl(out, cls.AsRegister<Register>()); 3664 } else { 3665 DCHECK(cls.IsStackSlot()) << cls; 3666 __ cmpl(out, Address(ESP, cls.GetStackIndex())); 3667 } 3668 3669 if (instruction->IsClassFinal()) { 3670 // Classes must be equal for the instanceof to succeed. 3671 __ j(kNotEqual, &zero); 3672 __ movl(out, Immediate(1)); 3673 __ jmp(&done); 3674 } else { 3675 // If the classes are not equal, we go into a slow path. 3676 DCHECK(locations->OnlyCallsOnSlowPath()); 3677 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( 3678 instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); 3679 codegen_->AddSlowPath(slow_path); 3680 __ j(kNotEqual, slow_path->GetEntryLabel()); 3681 __ movl(out, Immediate(1)); 3682 __ jmp(&done); 3683 } 3684 __ Bind(&zero); 3685 __ movl(out, Immediate(0)); 3686 if (slow_path != nullptr) { 3687 __ Bind(slow_path->GetExitLabel()); 3688 } 3689 __ Bind(&done); 3690} 3691 3692void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) { 3693 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( 3694 instruction, LocationSummary::kCallOnSlowPath); 3695 locations->SetInAt(0, Location::RequiresRegister()); 3696 locations->SetInAt(1, Location::Any()); 3697 locations->AddTemp(Location::RequiresRegister()); 3698} 3699 3700void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { 3701 LocationSummary* locations = instruction->GetLocations(); 3702 Register obj = locations->InAt(0).AsRegister<Register>(); 3703 Location cls = locations->InAt(1); 3704 Register temp = locations->GetTemp(0).AsRegister<Register>(); 3705 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); 3706 SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( 3707 instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); 3708 codegen_->AddSlowPath(slow_path); 3709 3710 // TODO: avoid this check if we know obj is not null. 3711 __ testl(obj, obj); 3712 __ j(kEqual, slow_path->GetExitLabel()); 3713 __ movl(temp, Address(obj, class_offset)); 3714 3715 // Compare the class of `obj` with `cls`. 3716 if (cls.IsRegister()) { 3717 __ cmpl(temp, cls.AsRegister<Register>()); 3718 } else { 3719 DCHECK(cls.IsStackSlot()) << cls; 3720 __ cmpl(temp, Address(ESP, cls.GetStackIndex())); 3721 } 3722 3723 __ j(kNotEqual, slow_path->GetEntryLabel()); 3724 __ Bind(slow_path->GetExitLabel()); 3725} 3726 3727void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) { 3728 LocationSummary* locations = 3729 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall); 3730 InvokeRuntimeCallingConvention calling_convention; 3731 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 3732} 3733 3734void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) { 3735 __ fs()->call(Address::Absolute(instruction->IsEnter() 3736 ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject) 3737 : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject))); 3738 codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); 3739} 3740 3741void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); } 3742void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); } 3743void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); } 3744 3745void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) { 3746 LocationSummary* locations = 3747 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); 3748 DCHECK(instruction->GetResultType() == Primitive::kPrimInt 3749 || instruction->GetResultType() == Primitive::kPrimLong); 3750 locations->SetInAt(0, Location::RequiresRegister()); 3751 locations->SetInAt(1, Location::Any()); 3752 locations->SetOut(Location::SameAsFirstInput()); 3753} 3754 3755void InstructionCodeGeneratorX86::VisitAnd(HAnd* instruction) { 3756 HandleBitwiseOperation(instruction); 3757} 3758 3759void InstructionCodeGeneratorX86::VisitOr(HOr* instruction) { 3760 HandleBitwiseOperation(instruction); 3761} 3762 3763void InstructionCodeGeneratorX86::VisitXor(HXor* instruction) { 3764 HandleBitwiseOperation(instruction); 3765} 3766 3767void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instruction) { 3768 LocationSummary* locations = instruction->GetLocations(); 3769 Location first = locations->InAt(0); 3770 Location second = locations->InAt(1); 3771 DCHECK(first.Equals(locations->Out())); 3772 3773 if (instruction->GetResultType() == Primitive::kPrimInt) { 3774 if (second.IsRegister()) { 3775 if (instruction->IsAnd()) { 3776 __ andl(first.AsRegister<Register>(), second.AsRegister<Register>()); 3777 } else if (instruction->IsOr()) { 3778 __ orl(first.AsRegister<Register>(), second.AsRegister<Register>()); 3779 } else { 3780 DCHECK(instruction->IsXor()); 3781 __ xorl(first.AsRegister<Register>(), second.AsRegister<Register>()); 3782 } 3783 } else if (second.IsConstant()) { 3784 if (instruction->IsAnd()) { 3785 __ andl(first.AsRegister<Register>(), 3786 Immediate(second.GetConstant()->AsIntConstant()->GetValue())); 3787 } else if (instruction->IsOr()) { 3788 __ orl(first.AsRegister<Register>(), 3789 Immediate(second.GetConstant()->AsIntConstant()->GetValue())); 3790 } else { 3791 DCHECK(instruction->IsXor()); 3792 __ xorl(first.AsRegister<Register>(), 3793 Immediate(second.GetConstant()->AsIntConstant()->GetValue())); 3794 } 3795 } else { 3796 if (instruction->IsAnd()) { 3797 __ andl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); 3798 } else if (instruction->IsOr()) { 3799 __ orl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); 3800 } else { 3801 DCHECK(instruction->IsXor()); 3802 __ xorl(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); 3803 } 3804 } 3805 } else { 3806 DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); 3807 if (second.IsRegisterPair()) { 3808 if (instruction->IsAnd()) { 3809 __ andl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); 3810 __ andl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); 3811 } else if (instruction->IsOr()) { 3812 __ orl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); 3813 __ orl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); 3814 } else { 3815 DCHECK(instruction->IsXor()); 3816 __ xorl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>()); 3817 __ xorl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>()); 3818 } 3819 } else { 3820 if (instruction->IsAnd()) { 3821 __ andl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex())); 3822 __ andl(first.AsRegisterPairHigh<Register>(), 3823 Address(ESP, second.GetHighStackIndex(kX86WordSize))); 3824 } else if (instruction->IsOr()) { 3825 __ orl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex())); 3826 __ orl(first.AsRegisterPairHigh<Register>(), 3827 Address(ESP, second.GetHighStackIndex(kX86WordSize))); 3828 } else { 3829 DCHECK(instruction->IsXor()); 3830 __ xorl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex())); 3831 __ xorl(first.AsRegisterPairHigh<Register>(), 3832 Address(ESP, second.GetHighStackIndex(kX86WordSize))); 3833 } 3834 } 3835 } 3836} 3837 3838} // namespace x86 3839} // namespace art 3840