mterp.cc revision 4a8ac9cee4312ac910fabf31c64d28d4c8362836
1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * Mterp entry point and support functions.
19 */
20#include "interpreter/interpreter_common.h"
21#include "entrypoints/entrypoint_utils-inl.h"
22#include "mterp.h"
23#include "jit/jit.h"
24#include "jit/jit_instrumentation.h"
25#include "debugger.h"
26
27namespace art {
28namespace interpreter {
29/*
30 * Verify some constants used by the mterp interpreter.
31 */
32void CheckMterpAsmConstants() {
33  /*
34   * If we're using computed goto instruction transitions, make sure
35   * none of the handlers overflows the 128-byte limit.  This won't tell
36   * which one did, but if any one is too big the total size will
37   * overflow.
38   */
39  const int width = 128;
40  int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
41                    (uintptr_t) artMterpAsmInstructionStart;
42  if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
43      LOG(art::FATAL) << "ERROR: unexpected asm interp size " << interp_size
44                      << "(did an instruction handler exceed " << width << " bytes?)";
45  }
46}
47
48void InitMterpTls(Thread* self) {
49  self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
50  self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
51  self->SetMterpCurrentIBase(TraceExecutionEnabled() ?
52                             artMterpAsmAltInstructionStart :
53                             artMterpAsmInstructionStart);
54}
55
56/*
57 * Find the matching case.  Returns the offset to the handler instructions.
58 *
59 * Returns 3 if we don't find a match (it's the size of the sparse-switch
60 * instruction).
61 */
62extern "C" int32_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
63  const int kInstrLen = 3;
64  uint16_t size;
65  const int32_t* keys;
66  const int32_t* entries;
67
68  /*
69   * Sparse switch data format:
70   *  ushort ident = 0x0200   magic value
71   *  ushort size             number of entries in the table; > 0
72   *  int keys[size]          keys, sorted low-to-high; 32-bit aligned
73   *  int targets[size]       branch targets, relative to switch opcode
74   *
75   * Total size is (2+size*4) 16-bit code units.
76   */
77
78  uint16_t signature = *switchData++;
79  DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kSparseSwitchSignature));
80
81  size = *switchData++;
82
83  /* The keys are guaranteed to be aligned on a 32-bit boundary;
84   * we can treat them as a native int array.
85   */
86  keys = reinterpret_cast<const int32_t*>(switchData);
87
88  /* The entries are guaranteed to be aligned on a 32-bit boundary;
89   * we can treat them as a native int array.
90   */
91  entries = keys + size;
92
93  /*
94   * Binary-search through the array of keys, which are guaranteed to
95   * be sorted low-to-high.
96   */
97  int lo = 0;
98  int hi = size - 1;
99  while (lo <= hi) {
100    int mid = (lo + hi) >> 1;
101
102    int32_t foundVal = keys[mid];
103    if (testVal < foundVal) {
104      hi = mid - 1;
105    } else if (testVal > foundVal) {
106      lo = mid + 1;
107    } else {
108      return entries[mid];
109    }
110  }
111  return kInstrLen;
112}
113
114extern "C" int32_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
115  const int kInstrLen = 3;
116
117  /*
118   * Packed switch data format:
119   *  ushort ident = 0x0100   magic value
120   *  ushort size             number of entries in the table
121   *  int first_key           first (and lowest) switch case value
122   *  int targets[size]       branch targets, relative to switch opcode
123   *
124   * Total size is (4+size*2) 16-bit code units.
125   */
126  uint16_t signature = *switchData++;
127  DCHECK_EQ(signature, static_cast<uint16_t>(art::Instruction::kPackedSwitchSignature));
128
129  uint16_t size = *switchData++;
130
131  int32_t firstKey = *switchData++;
132  firstKey |= (*switchData++) << 16;
133
134  int index = testVal - firstKey;
135  if (index < 0 || index >= size) {
136    return kInstrLen;
137  }
138
139  /*
140   * The entries are guaranteed to be aligned on a 32-bit boundary;
141   * we can treat them as a native int array.
142   */
143  const int32_t* entries = reinterpret_cast<const int32_t*>(switchData);
144  return entries[index];
145}
146
147extern "C" bool MterpShouldSwitchInterpreters()
148    SHARED_REQUIRES(Locks::mutator_lock_) {
149  const instrumentation::Instrumentation* const instrumentation =
150      Runtime::Current()->GetInstrumentation();
151  return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive();
152}
153
154
155extern "C" bool MterpInvokeVirtual(Thread* self, ShadowFrame* shadow_frame,
156                                   uint16_t* dex_pc_ptr,  uint16_t inst_data )
157    SHARED_REQUIRES(Locks::mutator_lock_) {
158  JValue* result_register = shadow_frame->GetResultRegister();
159  const Instruction* inst = Instruction::At(dex_pc_ptr);
160  return DoInvoke<kVirtual, false, false>(
161      self, *shadow_frame, inst, inst_data, result_register);
162}
163
164extern "C" bool MterpInvokeSuper(Thread* self, ShadowFrame* shadow_frame,
165                                 uint16_t* dex_pc_ptr,  uint16_t inst_data )
166    SHARED_REQUIRES(Locks::mutator_lock_) {
167  JValue* result_register = shadow_frame->GetResultRegister();
168  const Instruction* inst = Instruction::At(dex_pc_ptr);
169  return DoInvoke<kSuper, false, false>(
170      self, *shadow_frame, inst, inst_data, result_register);
171}
172
173extern "C" bool MterpInvokeInterface(Thread* self, ShadowFrame* shadow_frame,
174                                     uint16_t* dex_pc_ptr,  uint16_t inst_data )
175    SHARED_REQUIRES(Locks::mutator_lock_) {
176  JValue* result_register = shadow_frame->GetResultRegister();
177  const Instruction* inst = Instruction::At(dex_pc_ptr);
178  return DoInvoke<kInterface, false, false>(
179      self, *shadow_frame, inst, inst_data, result_register);
180}
181
182extern "C" bool MterpInvokeDirect(Thread* self, ShadowFrame* shadow_frame,
183                                  uint16_t* dex_pc_ptr,  uint16_t inst_data )
184    SHARED_REQUIRES(Locks::mutator_lock_) {
185  JValue* result_register = shadow_frame->GetResultRegister();
186  const Instruction* inst = Instruction::At(dex_pc_ptr);
187  return DoInvoke<kDirect, false, false>(
188      self, *shadow_frame, inst, inst_data, result_register);
189}
190
191extern "C" bool MterpInvokeStatic(Thread* self, ShadowFrame* shadow_frame,
192                                  uint16_t* dex_pc_ptr,  uint16_t inst_data )
193    SHARED_REQUIRES(Locks::mutator_lock_) {
194  JValue* result_register = shadow_frame->GetResultRegister();
195  const Instruction* inst = Instruction::At(dex_pc_ptr);
196  return DoInvoke<kStatic, false, false>(
197      self, *shadow_frame, inst, inst_data, result_register);
198}
199
200extern "C" bool MterpInvokeVirtualRange(Thread* self, ShadowFrame* shadow_frame,
201                                        uint16_t* dex_pc_ptr,  uint16_t inst_data )
202    SHARED_REQUIRES(Locks::mutator_lock_) {
203  JValue* result_register = shadow_frame->GetResultRegister();
204  const Instruction* inst = Instruction::At(dex_pc_ptr);
205  return DoInvoke<kVirtual, true, false>(
206      self, *shadow_frame, inst, inst_data, result_register);
207}
208
209extern "C" bool MterpInvokeSuperRange(Thread* self, ShadowFrame* shadow_frame,
210                                      uint16_t* dex_pc_ptr,  uint16_t inst_data )
211    SHARED_REQUIRES(Locks::mutator_lock_) {
212  JValue* result_register = shadow_frame->GetResultRegister();
213  const Instruction* inst = Instruction::At(dex_pc_ptr);
214  return DoInvoke<kSuper, true, false>(
215      self, *shadow_frame, inst, inst_data, result_register);
216}
217
218extern "C" bool MterpInvokeInterfaceRange(Thread* self, ShadowFrame* shadow_frame,
219                                          uint16_t* dex_pc_ptr,  uint16_t inst_data )
220    SHARED_REQUIRES(Locks::mutator_lock_) {
221  JValue* result_register = shadow_frame->GetResultRegister();
222  const Instruction* inst = Instruction::At(dex_pc_ptr);
223  return DoInvoke<kInterface, true, false>(
224      self, *shadow_frame, inst, inst_data, result_register);
225}
226
227extern "C" bool MterpInvokeDirectRange(Thread* self, ShadowFrame* shadow_frame,
228                                       uint16_t* dex_pc_ptr,  uint16_t inst_data )
229    SHARED_REQUIRES(Locks::mutator_lock_) {
230  JValue* result_register = shadow_frame->GetResultRegister();
231  const Instruction* inst = Instruction::At(dex_pc_ptr);
232  return DoInvoke<kDirect, true, false>(
233      self, *shadow_frame, inst, inst_data, result_register);
234}
235
236extern "C" bool MterpInvokeStaticRange(Thread* self, ShadowFrame* shadow_frame,
237                                       uint16_t* dex_pc_ptr,  uint16_t inst_data )
238    SHARED_REQUIRES(Locks::mutator_lock_) {
239  JValue* result_register = shadow_frame->GetResultRegister();
240  const Instruction* inst = Instruction::At(dex_pc_ptr);
241  return DoInvoke<kStatic, true, false>(
242      self, *shadow_frame, inst, inst_data, result_register);
243}
244
245extern "C" bool MterpInvokeVirtualQuick(Thread* self, ShadowFrame* shadow_frame,
246                                        uint16_t* dex_pc_ptr,  uint16_t inst_data )
247    SHARED_REQUIRES(Locks::mutator_lock_) {
248  JValue* result_register = shadow_frame->GetResultRegister();
249  const Instruction* inst = Instruction::At(dex_pc_ptr);
250  return DoInvokeVirtualQuick<false>(
251      self, *shadow_frame, inst, inst_data, result_register);
252}
253
254extern "C" bool MterpInvokeVirtualQuickRange(Thread* self, ShadowFrame* shadow_frame,
255                                             uint16_t* dex_pc_ptr,  uint16_t inst_data )
256    SHARED_REQUIRES(Locks::mutator_lock_) {
257  JValue* result_register = shadow_frame->GetResultRegister();
258  const Instruction* inst = Instruction::At(dex_pc_ptr);
259  return DoInvokeVirtualQuick<true>(
260      self, *shadow_frame, inst, inst_data, result_register);
261}
262
263extern "C" void MterpThreadFenceForConstructor() {
264  QuasiAtomic::ThreadFenceForConstructor();
265}
266
267extern "C" bool MterpConstString(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
268                                 Thread* self)
269    SHARED_REQUIRES(Locks::mutator_lock_) {
270  String* s = ResolveString(self, *shadow_frame,  index);
271  if (UNLIKELY(s == nullptr)) {
272    return true;
273  }
274  shadow_frame->SetVRegReference(tgt_vreg, s);
275  return false;
276}
277
278extern "C" bool MterpConstClass(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
279                                Thread* self)
280    SHARED_REQUIRES(Locks::mutator_lock_) {
281  Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
282  if (UNLIKELY(c == nullptr)) {
283    return true;
284  }
285  shadow_frame->SetVRegReference(tgt_vreg, c);
286  return false;
287}
288
289extern "C" bool MterpCheckCast(uint32_t index, StackReference<mirror::Object>* vreg_addr,
290                               art::ArtMethod* method, Thread* self)
291    SHARED_REQUIRES(Locks::mutator_lock_) {
292  Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
293  if (UNLIKELY(c == nullptr)) {
294    return true;
295  }
296  // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
297  Object* obj = vreg_addr->AsMirrorPtr();
298  if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
299    ThrowClassCastException(c, obj->GetClass());
300    return true;
301  }
302  return false;
303}
304
305extern "C" bool MterpInstanceOf(uint32_t index, StackReference<mirror::Object>* vreg_addr,
306                                art::ArtMethod* method, Thread* self)
307    SHARED_REQUIRES(Locks::mutator_lock_) {
308  Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
309  if (UNLIKELY(c == nullptr)) {
310    return false;  // Caller will check for pending exception.  Return value unimportant.
311  }
312  // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
313  Object* obj = vreg_addr->AsMirrorPtr();
314  return (obj != nullptr) && obj->InstanceOf(c);
315}
316
317extern "C" bool MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
318    SHARED_REQUIRES(Locks::mutator_lock_) {
319  return FillArrayData(obj, payload);
320}
321
322extern "C" bool MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
323    SHARED_REQUIRES(Locks::mutator_lock_) {
324  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
325  Object* obj = nullptr;
326  Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
327                                    self, false, false);
328  if (LIKELY(c != nullptr)) {
329    if (UNLIKELY(c->IsStringClass())) {
330      gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
331      mirror::SetStringCountVisitor visitor(0);
332      obj = String::Alloc<true>(self, 0, allocator_type, visitor);
333    } else {
334      obj = AllocObjectFromCode<false, true>(
335        inst->VRegB_21c(), shadow_frame->GetMethod(), self,
336        Runtime::Current()->GetHeap()->GetCurrentAllocator());
337    }
338  }
339  if (UNLIKELY(obj == nullptr)) {
340    return false;
341  }
342  obj->GetClass()->AssertInitializedOrInitializingInThread(self);
343  shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
344  return true;
345}
346
347extern "C" bool MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
348                                uint32_t inst_data, Thread* self)
349    SHARED_REQUIRES(Locks::mutator_lock_) {
350  const Instruction* inst = Instruction::At(dex_pc_ptr);
351  return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
352      (self, *shadow_frame, inst, inst_data);
353}
354
355extern "C" bool MterpIputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
356                                uint32_t inst_data, Thread* self)
357    SHARED_REQUIRES(Locks::mutator_lock_) {
358  const Instruction* inst = Instruction::At(dex_pc_ptr);
359  return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
360      (self, *shadow_frame, inst, inst_data);
361}
362
363extern "C" bool MterpIputObjectQuick(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
364                                     uint32_t inst_data)
365    SHARED_REQUIRES(Locks::mutator_lock_) {
366  const Instruction* inst = Instruction::At(dex_pc_ptr);
367  return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
368}
369
370extern "C" bool MterpAputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
371                                uint32_t inst_data)
372    SHARED_REQUIRES(Locks::mutator_lock_) {
373  const Instruction* inst = Instruction::At(dex_pc_ptr);
374  Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
375  if (UNLIKELY(a == nullptr)) {
376    return false;
377  }
378  int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
379  Object* val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
380  ObjectArray<Object>* array = a->AsObjectArray<Object>();
381  if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
382    array->SetWithoutChecks<false>(index, val);
383    return true;
384  }
385  return false;
386}
387
388extern "C" bool MterpFilledNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
389                                    Thread* self)
390    SHARED_REQUIRES(Locks::mutator_lock_) {
391  const Instruction* inst = Instruction::At(dex_pc_ptr);
392  return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
393                                               shadow_frame->GetResultRegister());
394}
395
396extern "C" bool MterpFilledNewArrayRange(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
397                                         Thread* self)
398    SHARED_REQUIRES(Locks::mutator_lock_) {
399  const Instruction* inst = Instruction::At(dex_pc_ptr);
400  return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
401                                              shadow_frame->GetResultRegister());
402}
403
404extern "C" bool MterpNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
405                              uint32_t inst_data, Thread* self)
406    SHARED_REQUIRES(Locks::mutator_lock_) {
407  const Instruction* inst = Instruction::At(dex_pc_ptr);
408  int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
409  Object* obj = AllocArrayFromCode<false, true>(
410      inst->VRegC_22c(), length, shadow_frame->GetMethod(), self,
411      Runtime::Current()->GetHeap()->GetCurrentAllocator());
412  if (UNLIKELY(obj == nullptr)) {
413      return false;
414  }
415  shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
416  return true;
417}
418
419extern "C" bool MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
420    SHARED_REQUIRES(Locks::mutator_lock_) {
421  DCHECK(self->IsExceptionPending());
422  const instrumentation::Instrumentation* const instrumentation =
423      Runtime::Current()->GetInstrumentation();
424  uint32_t found_dex_pc = FindNextInstructionFollowingException(self, *shadow_frame,
425                                                                shadow_frame->GetDexPC(),
426                                                                instrumentation);
427  if (found_dex_pc == DexFile::kDexNoIndex) {
428    return false;
429  }
430  // OK - we can deal with it.  Update and continue.
431  shadow_frame->SetDexPC(found_dex_pc);
432  return true;
433}
434
435extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame)
436    SHARED_REQUIRES(Locks::mutator_lock_) {
437  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
438  uint16_t inst_data = inst->Fetch16(0);
439  if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
440    self->AssertPendingException();
441  } else {
442    self->AssertNoPendingException();
443  }
444  TraceExecution(*shadow_frame, inst, shadow_frame->GetDexPC());
445}
446
447extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
448    SHARED_REQUIRES(Locks::mutator_lock_) {
449  UNUSED(self);
450  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
451  uint16_t inst_data = inst->Fetch16(0);
452  LOG(INFO) << "DivideByZero: " << inst->Opcode(inst_data);
453}
454
455extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
456    SHARED_REQUIRES(Locks::mutator_lock_) {
457  UNUSED(self);
458  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
459  uint16_t inst_data = inst->Fetch16(0);
460  LOG(INFO) << "ArrayIndex: " << inst->Opcode(inst_data);
461}
462
463extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
464    SHARED_REQUIRES(Locks::mutator_lock_) {
465  UNUSED(self);
466  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
467  uint16_t inst_data = inst->Fetch16(0);
468  LOG(INFO) << "NegativeArraySize: " << inst->Opcode(inst_data);
469}
470
471extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
472    SHARED_REQUIRES(Locks::mutator_lock_) {
473  UNUSED(self);
474  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
475  uint16_t inst_data = inst->Fetch16(0);
476  LOG(INFO) << "NoSuchMethod: " << inst->Opcode(inst_data);
477}
478
479extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
480    SHARED_REQUIRES(Locks::mutator_lock_) {
481  UNUSED(self);
482  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
483  uint16_t inst_data = inst->Fetch16(0);
484  LOG(INFO) << "ExceptionThrown: " << inst->Opcode(inst_data);
485}
486
487extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
488    SHARED_REQUIRES(Locks::mutator_lock_) {
489  UNUSED(self);
490  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
491  uint16_t inst_data = inst->Fetch16(0);
492  LOG(INFO) << "NullObject: " << inst->Opcode(inst_data);
493}
494
495extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
496    SHARED_REQUIRES(Locks::mutator_lock_) {
497  UNUSED(self);
498  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
499  uint16_t inst_data = inst->Fetch16(0);
500  LOG(INFO) << "Fallback: " << inst->Opcode(inst_data) << ", Suspend Pending?: "
501            << self->IsExceptionPending();
502}
503
504extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
505    SHARED_REQUIRES(Locks::mutator_lock_) {
506  UNUSED(self);
507  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
508  uint16_t inst_data = inst->Fetch16(0);
509  LOG(INFO) << "OSR: " << inst->Opcode(inst_data) << ", offset = " << offset;
510}
511
512extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
513    SHARED_REQUIRES(Locks::mutator_lock_) {
514  UNUSED(self);
515  const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
516  uint16_t inst_data = inst->Fetch16(0);
517  if (flags & kCheckpointRequest) {
518    LOG(INFO) << "Checkpoint fallback: " << inst->Opcode(inst_data);
519  } else if (flags & kSuspendRequest) {
520    LOG(INFO) << "Suspend fallback: " << inst->Opcode(inst_data);
521  }
522}
523
524extern "C" bool MterpSuspendCheck(Thread* self)
525    SHARED_REQUIRES(Locks::mutator_lock_) {
526  self->AllowThreadSuspension();
527  return MterpShouldSwitchInterpreters();
528}
529
530extern "C" int artSet64IndirectStaticFromMterp(uint32_t field_idx, ArtMethod* referrer,
531                                               uint64_t* new_value, Thread* self)
532    SHARED_REQUIRES(Locks::mutator_lock_) {
533  ScopedQuickEntrypointChecks sqec(self);
534  ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
535  if (LIKELY(field != nullptr)) {
536    // Compiled code can't use transactional mode.
537    field->Set64<false>(field->GetDeclaringClass(), *new_value);
538    return 0;  // success
539  }
540  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int64_t));
541  if (LIKELY(field != nullptr)) {
542    // Compiled code can't use transactional mode.
543    field->Set64<false>(field->GetDeclaringClass(), *new_value);
544    return 0;  // success
545  }
546  return -1;  // failure
547}
548
549extern "C" int artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
550                                        ArtMethod* referrer)
551    SHARED_REQUIRES(Locks::mutator_lock_) {
552  ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
553  if (LIKELY(field != nullptr && obj != nullptr)) {
554    Primitive::Type type = field->GetTypeAsPrimitiveType();
555    if (type == Primitive::kPrimBoolean) {
556      field->SetBoolean<false>(obj, new_value);
557    } else {
558      DCHECK_EQ(Primitive::kPrimByte, type);
559      field->SetByte<false>(obj, new_value);
560    }
561    return 0;  // success
562  }
563  return -1;  // failure
564}
565
566extern "C" int artSet16InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
567                                        ArtMethod* referrer)
568    SHARED_REQUIRES(Locks::mutator_lock_) {
569  ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
570                                          sizeof(int16_t));
571  if (LIKELY(field != nullptr && obj != nullptr)) {
572    Primitive::Type type = field->GetTypeAsPrimitiveType();
573    if (type == Primitive::kPrimChar) {
574      field->SetChar<false>(obj, new_value);
575    } else {
576      DCHECK_EQ(Primitive::kPrimShort, type);
577      field->SetShort<false>(obj, new_value);
578    }
579    return 0;  // success
580  }
581  return -1;  // failure
582}
583
584extern "C" int artSet32InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
585                                         uint32_t new_value, ArtMethod* referrer)
586    SHARED_REQUIRES(Locks::mutator_lock_) {
587  ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
588                                          sizeof(int32_t));
589  if (LIKELY(field != nullptr && obj != nullptr)) {
590    field->Set32<false>(obj, new_value);
591    return 0;  // success
592  }
593  return -1;  // failure
594}
595
596extern "C" int artSet64InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
597                                         uint64_t* new_value, ArtMethod* referrer)
598    SHARED_REQUIRES(Locks::mutator_lock_) {
599  ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
600                                          sizeof(int64_t));
601  if (LIKELY(field != nullptr  && obj != nullptr)) {
602    field->Set64<false>(obj, *new_value);
603    return 0;  // success
604  }
605  return -1;  // failure
606}
607
608extern "C" int artSetObjInstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
609                                          mirror::Object* new_value, ArtMethod* referrer)
610    SHARED_REQUIRES(Locks::mutator_lock_) {
611  ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
612                                          sizeof(mirror::HeapReference<mirror::Object>));
613  if (LIKELY(field != nullptr && obj != nullptr)) {
614    field->SetObj<false>(obj, new_value);
615    return 0;  // success
616  }
617  return -1;  // failure
618}
619
620extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
621    SHARED_REQUIRES(Locks::mutator_lock_) {
622  if (UNLIKELY(arr == nullptr)) {
623    ThrowNullPointerExceptionFromInterpreter();
624    return nullptr;
625  }
626  ObjectArray<Object>* array = arr->AsObjectArray<Object>();
627  if (LIKELY(array->CheckIsValidIndex(index))) {
628    return array->GetWithoutChecks(index);
629  } else {
630    return nullptr;
631  }
632}
633
634extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
635    SHARED_REQUIRES(Locks::mutator_lock_) {
636  if (UNLIKELY(obj == nullptr)) {
637    ThrowNullPointerExceptionFromInterpreter();
638    return nullptr;
639  }
640  return obj->GetFieldObject<mirror::Object>(MemberOffset(field_offset));
641}
642
643/*
644 * Create a hotness_countdown based on the current method hotness_count and profiling
645 * mode.  In short, determine how many hotness events we hit before reporting back
646 * to the full instrumentation via MterpAddHotnessBatch.  Called once on entry to the method,
647 * and regenerated following batch updates.
648 */
649extern "C" int MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
650    SHARED_REQUIRES(Locks::mutator_lock_) {
651  uint16_t hotness_count = method->GetCounter();
652  int32_t countdown_value = jit::kJitHotnessDisabled;
653  jit::Jit* jit = Runtime::Current()->GetJit();
654  if (jit != nullptr) {
655    jit::JitInstrumentationCache* cache = jit->GetInstrumentationCache();
656    int32_t warm_threshold = cache->WarmMethodThreshold();
657    int32_t hot_threshold = cache->HotMethodThreshold();
658    int32_t osr_threshold = cache->OSRMethodThreshold();
659    if (hotness_count < warm_threshold) {
660      countdown_value = warm_threshold - hotness_count;
661    } else if (hotness_count < hot_threshold) {
662      countdown_value = hot_threshold - hotness_count;
663    } else if (hotness_count < osr_threshold) {
664      countdown_value = osr_threshold - hotness_count;
665    } else {
666      countdown_value = jit::kJitCheckForOSR;
667    }
668  }
669  /*
670   * The actual hotness threshold may exceed the range of our int16_t countdown value.  This is
671   * not a problem, though.  We can just break it down into smaller chunks.
672   */
673  countdown_value = std::min(countdown_value,
674                             static_cast<int32_t>(std::numeric_limits<int16_t>::max()));
675  shadow_frame->SetCachedHotnessCountdown(countdown_value);
676  shadow_frame->SetHotnessCountdown(countdown_value);
677  return countdown_value;
678}
679
680/*
681 * Report a batch of hotness events to the instrumentation and then return the new
682 * countdown value to the next time we should report.
683 */
684extern "C" int16_t MterpAddHotnessBatch(ArtMethod* method,
685                                        ShadowFrame* shadow_frame,
686                                        Thread* self)
687    SHARED_REQUIRES(Locks::mutator_lock_) {
688  jit::Jit* jit = Runtime::Current()->GetJit();
689  if (jit != nullptr) {
690    int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
691    jit->GetInstrumentationCache()->AddSamples(self, method, count);
692  }
693  return MterpSetUpHotnessCountdown(method, shadow_frame);
694}
695
696// TUNING: Unused by arm/arm64.  Remove when x86/x86_64/mips/mips64 mterps support batch updates.
697extern "C" bool  MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
698    SHARED_REQUIRES(Locks::mutator_lock_) {
699  ArtMethod* method = shadow_frame->GetMethod();
700  JValue* result = shadow_frame->GetResultRegister();
701  uint32_t dex_pc = shadow_frame->GetDexPC();
702  jit::Jit* jit = Runtime::Current()->GetJit();
703  if ((jit != nullptr) && (offset <= 0)) {
704    jit->GetInstrumentationCache()->AddSamples(self, method, 1);
705  }
706  int16_t countdown_value = MterpSetUpHotnessCountdown(method, shadow_frame);
707  if (countdown_value == jit::kJitCheckForOSR) {
708    return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
709  } else {
710    return false;
711  }
712}
713
714extern "C" bool MterpMaybeDoOnStackReplacement(Thread* self,
715                                               ShadowFrame* shadow_frame,
716                                               int32_t offset)
717    SHARED_REQUIRES(Locks::mutator_lock_) {
718  ArtMethod* method = shadow_frame->GetMethod();
719  JValue* result = shadow_frame->GetResultRegister();
720  uint32_t dex_pc = shadow_frame->GetDexPC();
721  // Assumes caller has already determined that an OSR check is appropriate.
722  return jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
723}
724
725}  // namespace interpreter
726}  // namespace art
727