context_x86.cc revision 41b175aba41c9365a1c53b8a1afbd17129c87c14
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "context_x86.h"
18
19#include "base/bit_utils.h"
20#include "mirror/art_method-inl.h"
21#include "quick/quick_method_frame_info.h"
22
23namespace art {
24namespace x86 {
25
26static constexpr uintptr_t gZero = 0;
27
28void X86Context::Reset() {
29  std::fill_n(gprs_, arraysize(gprs_), nullptr);
30  std::fill_n(fprs_, arraysize(fprs_), nullptr);
31  gprs_[ESP] = &esp_;
32  // Initialize registers with easy to spot debug values.
33  esp_ = X86Context::kBadGprBase + ESP;
34  eip_ = X86Context::kBadGprBase + kNumberOfCpuRegisters;
35}
36
37void X86Context::FillCalleeSaves(const StackVisitor& fr) {
38  mirror::ArtMethod* method = fr.GetMethod();
39  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
40  int spill_pos = 0;
41
42  // Core registers come first, from the highest down to the lowest.
43  uint32_t core_regs =
44      frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
45  DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs));  // Return address spill.
46  for (uint32_t core_reg : HighToLowBits(core_regs)) {
47    gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
48    ++spill_pos;
49  }
50  DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
51
52  // FP registers come second, from the highest down to the lowest.
53  uint32_t fp_regs = frame_info.FpSpillMask();
54  DCHECK_EQ(0u, fp_regs & (static_cast<uint32_t>(-1) << kNumberOfFloatRegisters));
55  for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
56    // Two void* per XMM register.
57    fprs_[2 * fp_reg] = fr.CalleeSaveAddress(spill_pos + 1, frame_info.FrameSizeInBytes());
58    fprs_[2 * fp_reg + 1] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
59    spill_pos += 2;
60  }
61  DCHECK_EQ(spill_pos,
62            POPCOUNT(frame_info.CoreSpillMask()) - 1 + 2 * POPCOUNT(frame_info.FpSpillMask()));
63}
64
65void X86Context::SmashCallerSaves() {
66  // This needs to be 0 because we want a null/zero return value.
67  gprs_[EAX] = const_cast<uintptr_t*>(&gZero);
68  gprs_[EDX] = const_cast<uintptr_t*>(&gZero);
69  gprs_[ECX] = nullptr;
70  gprs_[EBX] = nullptr;
71  memset(&fprs_[0], '\0', sizeof(fprs_));
72}
73
74void X86Context::SetGPR(uint32_t reg, uintptr_t value) {
75  CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
76  DCHECK(IsAccessibleGPR(reg));
77  CHECK_NE(gprs_[reg], &gZero);
78  *gprs_[reg] = value;
79}
80
81void X86Context::SetFPR(uint32_t reg, uintptr_t value) {
82  CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFloatRegisters));
83  DCHECK(IsAccessibleFPR(reg));
84  CHECK_NE(fprs_[reg], reinterpret_cast<const uint32_t*>(&gZero));
85  *fprs_[reg] = value;
86}
87
88void X86Context::DoLongJump() {
89#if defined(__i386__)
90  // Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
91  // the top for the stack pointer that doesn't get popped in a pop-all.
92  volatile uintptr_t gprs[kNumberOfCpuRegisters + 1];
93  for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
94    gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : X86Context::kBadGprBase + i;
95  }
96  uint32_t fprs[kNumberOfFloatRegisters];
97  for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
98    fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : X86Context::kBadFprBase + i;
99  }
100  // We want to load the stack pointer one slot below so that the ret will pop eip.
101  uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
102  gprs[kNumberOfCpuRegisters] = esp;
103  *(reinterpret_cast<uintptr_t*>(esp)) = eip_;
104  __asm__ __volatile__(
105      "movl %1, %%ebx\n\t"          // Address base of FPRs.
106      "movsd 0(%%ebx), %%xmm0\n\t"  // Load up XMM0-XMM7.
107      "movsd 8(%%ebx), %%xmm1\n\t"
108      "movsd 16(%%ebx), %%xmm2\n\t"
109      "movsd 24(%%ebx), %%xmm3\n\t"
110      "movsd 32(%%ebx), %%xmm4\n\t"
111      "movsd 40(%%ebx), %%xmm5\n\t"
112      "movsd 48(%%ebx), %%xmm6\n\t"
113      "movsd 56(%%ebx), %%xmm7\n\t"
114      "movl %0, %%esp\n\t"  // ESP points to gprs.
115      "popal\n\t"           // Load all registers except ESP and EIP with values in gprs.
116      "popl %%esp\n\t"      // Load stack pointer.
117      "ret\n\t"             // From higher in the stack pop eip.
118      :  // output.
119      : "g"(&gprs[0]), "g"(&fprs[0]) // input.
120      :);  // clobber.
121#else
122  UNIMPLEMENTED(FATAL);
123#endif
124  UNREACHABLE();
125}
126
127}  // namespace x86
128}  // namespace art
129