1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_ISOLATE_H_
6#define V8_ISOLATE_H_
7
8#include <memory>
9#include <queue>
10
11#include "include/v8-debug.h"
12#include "src/allocation.h"
13#include "src/base/atomicops.h"
14#include "src/builtins/builtins.h"
15#include "src/contexts.h"
16#include "src/date.h"
17#include "src/execution.h"
18#include "src/frames.h"
19#include "src/futex-emulation.h"
20#include "src/global-handles.h"
21#include "src/handles.h"
22#include "src/heap/heap.h"
23#include "src/messages.h"
24#include "src/regexp/regexp-stack.h"
25#include "src/runtime/runtime.h"
26#include "src/zone/zone.h"
27
28namespace v8 {
29
30namespace base {
31class RandomNumberGenerator;
32}
33
34namespace internal {
35
36class AccessCompilerData;
37class AddressToIndexHashMap;
38class AstStringConstants;
39class BasicBlockProfiler;
40class Bootstrapper;
41class CancelableTaskManager;
42class CallInterfaceDescriptorData;
43class CodeAgingHelper;
44class CodeEventDispatcher;
45class CodeGenerator;
46class CodeRange;
47class CodeStubDescriptor;
48class CodeTracer;
49class CompilationCache;
50class CompilerDispatcher;
51class CompilationStatistics;
52class ContextSlotCache;
53class Counters;
54class CpuFeatures;
55class CpuProfiler;
56class DeoptimizerData;
57class DescriptorLookupCache;
58class Deserializer;
59class EmptyStatement;
60class ExternalCallbackScope;
61class ExternalReferenceTable;
62class Factory;
63class HandleScopeImplementer;
64class HeapObjectToIndexHashMap;
65class HeapProfiler;
66class HStatistics;
67class HTracer;
68class InlineRuntimeFunctionsTable;
69class InnerPointerToCodeCache;
70class Logger;
71class MaterializedObjectStore;
72class OptimizingCompileDispatcher;
73class RegExpStack;
74class RuntimeProfiler;
75class SaveContext;
76class StatsTable;
77class StringTracker;
78class StubCache;
79class SweeperThread;
80class ThreadManager;
81class ThreadState;
82class ThreadVisitor;  // Defined in v8threads.h
83class UnicodeCache;
84template <StateTag Tag> class VMState;
85
86// 'void function pointer', used to roundtrip the
87// ExternalReference::ExternalReferenceRedirector since we can not include
88// assembler.h, where it is defined, here.
89typedef void* ExternalReferenceRedirectorPointer();
90
91
92class Debug;
93class PromiseOnStack;
94class Redirection;
95class Simulator;
96
97namespace interpreter {
98class Interpreter;
99}
100
101#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate)    \
102  do {                                                    \
103    Isolate* __isolate__ = (isolate);                     \
104    if (__isolate__->has_scheduled_exception()) {         \
105      return __isolate__->PromoteScheduledException();    \
106    }                                                     \
107  } while (false)
108
109// Macros for MaybeHandle.
110
111#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
112  do {                                                      \
113    Isolate* __isolate__ = (isolate);                       \
114    if (__isolate__->has_scheduled_exception()) {           \
115      __isolate__->PromoteScheduledException();             \
116      return value;                                         \
117    }                                                       \
118  } while (false)
119
120#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
121  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
122
123#define RETURN_RESULT_OR_FAILURE(isolate, call)     \
124  do {                                              \
125    Handle<Object> __result__;                      \
126    Isolate* __isolate__ = (isolate);               \
127    if (!(call).ToHandle(&__result__)) {            \
128      DCHECK(__isolate__->has_pending_exception()); \
129      return __isolate__->heap()->exception();      \
130    }                                               \
131    return *__result__;                             \
132  } while (false)
133
134#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
135  do {                                                               \
136    if (!(call).ToHandle(&dst)) {                                    \
137      DCHECK((isolate)->has_pending_exception());                    \
138      return value;                                                  \
139    }                                                                \
140  } while (false)
141
142#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call)          \
143  do {                                                                  \
144    Isolate* __isolate__ = (isolate);                                   \
145    ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call,            \
146                                     __isolate__->heap()->exception()); \
147  } while (false)
148
149#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
150  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
151
152#define THROW_NEW_ERROR(isolate, call, T)                       \
153  do {                                                          \
154    Isolate* __isolate__ = (isolate);                           \
155    return __isolate__->Throw<T>(__isolate__->factory()->call); \
156  } while (false)
157
158#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)         \
159  do {                                                        \
160    Isolate* __isolate__ = (isolate);                         \
161    return __isolate__->Throw(*__isolate__->factory()->call); \
162  } while (false)
163
164#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
165  do {                                                             \
166    if ((call).is_null()) {                                        \
167      DCHECK((isolate)->has_pending_exception());                  \
168      return value;                                                \
169    }                                                              \
170  } while (false)
171
172#define RETURN_FAILURE_ON_EXCEPTION(isolate, call)               \
173  do {                                                           \
174    Isolate* __isolate__ = (isolate);                            \
175    RETURN_ON_EXCEPTION_VALUE(__isolate__, call,                 \
176                              __isolate__->heap()->exception()); \
177  } while (false);
178
179#define RETURN_ON_EXCEPTION(isolate, call, T)  \
180  RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
181
182
183#define FOR_EACH_ISOLATE_ADDRESS_NAME(C)                \
184  C(Handler, handler)                                   \
185  C(CEntryFP, c_entry_fp)                               \
186  C(CFunction, c_function)                              \
187  C(Context, context)                                   \
188  C(PendingException, pending_exception)                \
189  C(PendingHandlerContext, pending_handler_context)     \
190  C(PendingHandlerCode, pending_handler_code)           \
191  C(PendingHandlerOffset, pending_handler_offset)       \
192  C(PendingHandlerFP, pending_handler_fp)               \
193  C(PendingHandlerSP, pending_handler_sp)               \
194  C(ExternalCaughtException, external_caught_exception) \
195  C(JSEntrySP, js_entry_sp)
196
197#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var,      \
198                              limit_check, increment, body)                \
199  do {                                                                     \
200    loop_var_type init;                                                    \
201    loop_var_type for_with_handle_limit = loop_var;                        \
202    Isolate* for_with_handle_isolate = isolate;                            \
203    while (limit_check) {                                                  \
204      for_with_handle_limit += 1024;                                       \
205      HandleScope loop_scope(for_with_handle_isolate);                     \
206      for (; limit_check && loop_var < for_with_handle_limit; increment) { \
207        body                                                               \
208      }                                                                    \
209    }                                                                      \
210  } while (false)
211
212// Platform-independent, reliable thread identifier.
213class ThreadId {
214 public:
215  // Creates an invalid ThreadId.
216  ThreadId() { base::NoBarrier_Store(&id_, kInvalidId); }
217
218  ThreadId& operator=(const ThreadId& other) {
219    base::NoBarrier_Store(&id_, base::NoBarrier_Load(&other.id_));
220    return *this;
221  }
222
223  // Returns ThreadId for current thread.
224  static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
225
226  // Returns invalid ThreadId (guaranteed not to be equal to any thread).
227  static ThreadId Invalid() { return ThreadId(kInvalidId); }
228
229  // Compares ThreadIds for equality.
230  INLINE(bool Equals(const ThreadId& other) const) {
231    return base::NoBarrier_Load(&id_) == base::NoBarrier_Load(&other.id_);
232  }
233
234  // Checks whether this ThreadId refers to any thread.
235  INLINE(bool IsValid() const) {
236    return base::NoBarrier_Load(&id_) != kInvalidId;
237  }
238
239  // Converts ThreadId to an integer representation
240  // (required for public API: V8::V8::GetCurrentThreadId).
241  int ToInteger() const { return static_cast<int>(base::NoBarrier_Load(&id_)); }
242
243  // Converts ThreadId to an integer representation
244  // (required for public API: V8::V8::TerminateExecution).
245  static ThreadId FromInteger(int id) { return ThreadId(id); }
246
247 private:
248  static const int kInvalidId = -1;
249
250  explicit ThreadId(int id) { base::NoBarrier_Store(&id_, id); }
251
252  static int AllocateThreadId();
253
254  V8_EXPORT_PRIVATE static int GetCurrentThreadId();
255
256  base::Atomic32 id_;
257
258  static base::Atomic32 highest_thread_id_;
259
260  friend class Isolate;
261};
262
263
264#define FIELD_ACCESSOR(type, name)                 \
265  inline void set_##name(type v) { name##_ = v; }  \
266  inline type name() const { return name##_; }
267
268
269class ThreadLocalTop BASE_EMBEDDED {
270 public:
271  // Does early low-level initialization that does not depend on the
272  // isolate being present.
273  ThreadLocalTop();
274
275  // Initialize the thread data.
276  void Initialize();
277
278  // Get the top C++ try catch handler or NULL if none are registered.
279  //
280  // This method is not guaranteed to return an address that can be
281  // used for comparison with addresses into the JS stack.  If such an
282  // address is needed, use try_catch_handler_address.
283  FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
284
285  // Get the address of the top C++ try catch handler or NULL if
286  // none are registered.
287  //
288  // This method always returns an address that can be compared to
289  // pointers into the JavaScript stack.  When running on actual
290  // hardware, try_catch_handler_address and TryCatchHandler return
291  // the same pointer.  When running on a simulator with a separate JS
292  // stack, try_catch_handler_address returns a JS stack address that
293  // corresponds to the place on the JS stack where the C++ handler
294  // would have been if the stack were not separate.
295  Address try_catch_handler_address() {
296    return reinterpret_cast<Address>(
297        v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
298  }
299
300  void Free();
301
302  Isolate* isolate_;
303  // The context where the current execution method is created and for variable
304  // lookups.
305  Context* context_;
306  ThreadId thread_id_;
307  Object* pending_exception_;
308
309  // Communication channel between Isolate::FindHandler and the CEntryStub.
310  Context* pending_handler_context_;
311  Code* pending_handler_code_;
312  intptr_t pending_handler_offset_;
313  Address pending_handler_fp_;
314  Address pending_handler_sp_;
315
316  // Communication channel between Isolate::Throw and message consumers.
317  bool rethrowing_message_;
318  Object* pending_message_obj_;
319
320  // Use a separate value for scheduled exceptions to preserve the
321  // invariants that hold about pending_exception.  We may want to
322  // unify them later.
323  Object* scheduled_exception_;
324  bool external_caught_exception_;
325  SaveContext* save_context_;
326
327  // Stack.
328  Address c_entry_fp_;  // the frame pointer of the top c entry frame
329  Address handler_;     // try-blocks are chained through the stack
330  Address c_function_;  // C function that was called at c entry.
331
332  // Throwing an exception may cause a Promise rejection.  For this purpose
333  // we keep track of a stack of nested promises and the corresponding
334  // try-catch handlers.
335  PromiseOnStack* promise_on_stack_;
336
337#ifdef USE_SIMULATOR
338  Simulator* simulator_;
339#endif
340
341  Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
342  // the external callback we're currently in
343  ExternalCallbackScope* external_callback_scope_;
344  StateTag current_vm_state_;
345
346  // Call back function to report unsafe JS accesses.
347  v8::FailedAccessCheckCallback failed_access_check_callback_;
348
349 private:
350  void InitializeInternal();
351
352  v8::TryCatch* try_catch_handler_;
353};
354
355
356#if USE_SIMULATOR
357
358#define ISOLATE_INIT_SIMULATOR_LIST(V)                    \
359  V(bool, simulator_initialized, false)                   \
360  V(base::CustomMatcherHashMap*, simulator_i_cache, NULL) \
361  V(Redirection*, simulator_redirection, NULL)
362#else
363
364#define ISOLATE_INIT_SIMULATOR_LIST(V)
365
366#endif
367
368
369#ifdef DEBUG
370
371#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)               \
372  V(CommentStatistic, paged_space_comments_statistics, \
373    CommentStatistic::kMaxComments + 1)                \
374  V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
375#else
376
377#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
378
379#endif
380
381#define ISOLATE_INIT_ARRAY_LIST(V)                                             \
382  /* SerializerDeserializer state. */                                          \
383  V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
384  V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
385  V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
386  V(int, suffix_table, (kBMMaxShift + 1))                                      \
387  ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
388
389typedef List<HeapObject*> DebugObjectCache;
390
391#define ISOLATE_INIT_LIST(V)                                                  \
392  /* Assembler state. */                                                      \
393  V(FatalErrorCallback, exception_behavior, nullptr)                          \
394  V(OOMErrorCallback, oom_behavior, nullptr)                                  \
395  V(LogEventCallback, event_logger, nullptr)                                  \
396  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
397  V(AllowWasmCompileCallback, allow_wasm_compile_callback, nullptr)           \
398  V(AllowWasmInstantiateCallback, allow_wasm_instantiate_callback, nullptr)   \
399  V(ExternalReferenceRedirectorPointer*, external_reference_redirector,       \
400    nullptr)                                                                  \
401  /* State for Relocatable. */                                                \
402  V(Relocatable*, relocatable_top, nullptr)                                   \
403  V(DebugObjectCache*, string_stream_debug_object_cache, nullptr)             \
404  V(Object*, string_stream_current_security_token, nullptr)                   \
405  V(ExternalReferenceTable*, external_reference_table, nullptr)               \
406  V(intptr_t*, api_external_references, nullptr)                              \
407  V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
408  V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
409  V(int, pending_microtask_count, 0)                                          \
410  V(HStatistics*, hstatistics, nullptr)                                       \
411  V(CompilationStatistics*, turbo_statistics, nullptr)                        \
412  V(HTracer*, htracer, nullptr)                                               \
413  V(CodeTracer*, code_tracer, nullptr)                                        \
414  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
415  V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
416  V(const v8::StartupData*, snapshot_blob, nullptr)                           \
417  V(int, code_and_metadata_size, 0)                                           \
418  V(int, bytecode_and_metadata_size, 0)                                       \
419  /* true if being profiled. Causes collection of extra compile info. */      \
420  V(bool, is_profiling, false)                                                \
421  /* true if a trace is being formatted through Error.prepareStackTrace. */   \
422  V(bool, formatting_stack_trace, false)                                      \
423  /* Perform side effect checks on function call and API callbacks. */        \
424  V(bool, needs_side_effect_check, false)                                     \
425  ISOLATE_INIT_SIMULATOR_LIST(V)
426
427#define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
428  inline void set_##name(type v) { thread_local_top_.name##_ = v; }  \
429  inline type name() const { return thread_local_top_.name##_; }
430
431#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
432  type* name##_address() { return &thread_local_top_.name##_; }
433
434
435class Isolate {
436  // These forward declarations are required to make the friend declarations in
437  // PerIsolateThreadData work on some older versions of gcc.
438  class ThreadDataTable;
439  class EntryStackItem;
440 public:
441  ~Isolate();
442
443  // A thread has a PerIsolateThreadData instance for each isolate that it has
444  // entered. That instance is allocated when the isolate is initially entered
445  // and reused on subsequent entries.
446  class PerIsolateThreadData {
447   public:
448    PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
449        : isolate_(isolate),
450          thread_id_(thread_id),
451          stack_limit_(0),
452          thread_state_(NULL),
453#if USE_SIMULATOR
454          simulator_(NULL),
455#endif
456          next_(NULL),
457          prev_(NULL) { }
458    ~PerIsolateThreadData();
459    Isolate* isolate() const { return isolate_; }
460    ThreadId thread_id() const { return thread_id_; }
461
462    FIELD_ACCESSOR(uintptr_t, stack_limit)
463    FIELD_ACCESSOR(ThreadState*, thread_state)
464
465#if USE_SIMULATOR
466    FIELD_ACCESSOR(Simulator*, simulator)
467#endif
468
469    bool Matches(Isolate* isolate, ThreadId thread_id) const {
470      return isolate_ == isolate && thread_id_.Equals(thread_id);
471    }
472
473   private:
474    Isolate* isolate_;
475    ThreadId thread_id_;
476    uintptr_t stack_limit_;
477    ThreadState* thread_state_;
478
479#if USE_SIMULATOR
480    Simulator* simulator_;
481#endif
482
483    PerIsolateThreadData* next_;
484    PerIsolateThreadData* prev_;
485
486    friend class Isolate;
487    friend class ThreadDataTable;
488    friend class EntryStackItem;
489
490    DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
491  };
492
493
494  enum AddressId {
495#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
496    FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
497#undef DECLARE_ENUM
498    kIsolateAddressCount
499  };
500
501  static void InitializeOncePerProcess();
502
503  // Returns the PerIsolateThreadData for the current thread (or NULL if one is
504  // not currently set).
505  static PerIsolateThreadData* CurrentPerIsolateThreadData() {
506    return reinterpret_cast<PerIsolateThreadData*>(
507        base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
508  }
509
510  // Returns the isolate inside which the current thread is running.
511  INLINE(static Isolate* Current()) {
512    DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
513    Isolate* isolate = reinterpret_cast<Isolate*>(
514        base::Thread::GetExistingThreadLocal(isolate_key_));
515    DCHECK(isolate != NULL);
516    return isolate;
517  }
518
519  // Usually called by Init(), but can be called early e.g. to allow
520  // testing components that require logging but not the whole
521  // isolate.
522  //
523  // Safe to call more than once.
524  void InitializeLoggingAndCounters();
525
526  bool Init(Deserializer* des);
527
528  // True if at least one thread Enter'ed this isolate.
529  bool IsInUse() { return entry_stack_ != NULL; }
530
531  // Destroys the non-default isolates.
532  // Sets default isolate into "has_been_disposed" state rather then destroying,
533  // for legacy API reasons.
534  void TearDown();
535
536  void ReleaseManagedObjects();
537
538  static void GlobalTearDown();
539
540  void ClearSerializerData();
541
542  // Find the PerThread for this particular (isolate, thread) combination
543  // If one does not yet exist, return null.
544  PerIsolateThreadData* FindPerThreadDataForThisThread();
545
546  // Find the PerThread for given (isolate, thread) combination
547  // If one does not yet exist, return null.
548  PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
549
550  // Discard the PerThread for this particular (isolate, thread) combination
551  // If one does not yet exist, no-op.
552  void DiscardPerThreadDataForThisThread();
553
554  // Returns the key used to store the pointer to the current isolate.
555  // Used internally for V8 threads that do not execute JavaScript but still
556  // are part of the domain of an isolate (like the context switcher).
557  static base::Thread::LocalStorageKey isolate_key() {
558    return isolate_key_;
559  }
560
561  // Returns the key used to store process-wide thread IDs.
562  static base::Thread::LocalStorageKey thread_id_key() {
563    return thread_id_key_;
564  }
565
566  static base::Thread::LocalStorageKey per_isolate_thread_data_key();
567
568  // Mutex for serializing access to break control structures.
569  base::RecursiveMutex* break_access() { return &break_access_; }
570
571  Address get_address_from_id(AddressId id);
572
573  // Access to top context (where the current function object was created).
574  Context* context() { return thread_local_top_.context_; }
575  inline void set_context(Context* context);
576  Context** context_address() { return &thread_local_top_.context_; }
577
578  THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
579
580  // Access to current thread id.
581  THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
582
583  // Interface to pending exception.
584  inline Object* pending_exception();
585  inline void set_pending_exception(Object* exception_obj);
586  inline void clear_pending_exception();
587
588  THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
589
590  inline bool has_pending_exception();
591
592  THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
593  THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code)
594  THREAD_LOCAL_TOP_ADDRESS(intptr_t, pending_handler_offset)
595  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
596  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
597
598  THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
599
600  v8::TryCatch* try_catch_handler() {
601    return thread_local_top_.try_catch_handler();
602  }
603  bool* external_caught_exception_address() {
604    return &thread_local_top_.external_caught_exception_;
605  }
606
607  THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
608
609  inline void clear_pending_message();
610  Address pending_message_obj_address() {
611    return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
612  }
613
614  inline Object* scheduled_exception();
615  inline bool has_scheduled_exception();
616  inline void clear_scheduled_exception();
617
618  bool IsJavaScriptHandlerOnTop(Object* exception);
619  bool IsExternalHandlerOnTop(Object* exception);
620
621  inline bool is_catchable_by_javascript(Object* exception);
622  inline bool is_catchable_by_wasm(Object* exception);
623
624  // JS execution stack (see frames.h).
625  static Address c_entry_fp(ThreadLocalTop* thread) {
626    return thread->c_entry_fp_;
627  }
628  static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
629  Address c_function() { return thread_local_top_.c_function_; }
630
631  inline Address* c_entry_fp_address() {
632    return &thread_local_top_.c_entry_fp_;
633  }
634  inline Address* handler_address() { return &thread_local_top_.handler_; }
635  inline Address* c_function_address() {
636    return &thread_local_top_.c_function_;
637  }
638
639  // Bottom JS entry.
640  Address js_entry_sp() {
641    return thread_local_top_.js_entry_sp_;
642  }
643  inline Address* js_entry_sp_address() {
644    return &thread_local_top_.js_entry_sp_;
645  }
646
647  // Returns the global object of the current context. It could be
648  // a builtin object, or a JS global object.
649  inline Handle<JSGlobalObject> global_object();
650
651  // Returns the global proxy object of the current context.
652  inline Handle<JSObject> global_proxy();
653
654  static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
655  void FreeThreadResources() { thread_local_top_.Free(); }
656
657  // This method is called by the api after operations that may throw
658  // exceptions.  If an exception was thrown and not handled by an external
659  // handler the exception is scheduled to be rethrown when we return to running
660  // JavaScript code.  If an exception is scheduled true is returned.
661  bool OptionalRescheduleException(bool is_bottom_call);
662
663  // Push and pop a promise and the current try-catch handler.
664  void PushPromise(Handle<JSObject> promise);
665  void PopPromise();
666
667  // Return the relevant Promise that a throw/rejection pertains to, based
668  // on the contents of the Promise stack
669  Handle<Object> GetPromiseOnStackOnThrow();
670
671  // Heuristically guess whether a Promise is handled by user catch handler
672  bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);
673
674  class ExceptionScope {
675   public:
676    // Scope currently can only be used for regular exceptions,
677    // not termination exception.
678    inline explicit ExceptionScope(Isolate* isolate);
679    inline ~ExceptionScope();
680
681   private:
682    Isolate* isolate_;
683    Handle<Object> pending_exception_;
684  };
685
686  void SetCaptureStackTraceForUncaughtExceptions(
687      bool capture,
688      int frame_limit,
689      StackTrace::StackTraceOptions options);
690
691  void SetAbortOnUncaughtExceptionCallback(
692      v8::Isolate::AbortOnUncaughtExceptionCallback callback);
693
694  enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
695  void PrintCurrentStackTrace(FILE* out);
696  void PrintStack(StringStream* accumulator,
697                  PrintStackMode mode = kPrintStackVerbose);
698  void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
699  Handle<String> StackTraceString();
700  NO_INLINE(void PushStackTraceAndDie(unsigned int magic, void* ptr1,
701                                      void* ptr2, unsigned int magic2));
702  Handle<JSArray> CaptureCurrentStackTrace(
703      int frame_limit,
704      StackTrace::StackTraceOptions options);
705  Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
706                                         FrameSkipMode mode,
707                                         Handle<Object> caller);
708  MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
709      Handle<JSReceiver> error_object);
710  MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
711      Handle<JSReceiver> error_object, FrameSkipMode mode,
712      Handle<Object> caller);
713  Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
714
715  // Returns if the given context may access the given global object. If
716  // the result is false, the pending exception is guaranteed to be
717  // set.
718  bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
719
720  void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
721  void ReportFailedAccessCheck(Handle<JSObject> receiver);
722
723  // Exception throwing support. The caller should use the result
724  // of Throw() as its return value.
725  Object* Throw(Object* exception, MessageLocation* location = NULL);
726  Object* ThrowIllegalOperation();
727
728  template <typename T>
729  MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
730                                       MessageLocation* location = NULL) {
731    Throw(*exception, location);
732    return MaybeHandle<T>();
733  }
734
735  // Re-throw an exception.  This involves no error reporting since error
736  // reporting was handled when the exception was thrown originally.
737  Object* ReThrow(Object* exception);
738
739  // Find the correct handler for the current pending exception. This also
740  // clears and returns the current pending exception.
741  Object* UnwindAndFindHandler();
742
743  // Tries to predict whether an exception will be caught. Note that this can
744  // only produce an estimate, because it is undecidable whether a finally
745  // clause will consume or re-throw an exception.
746  enum CatchType {
747    NOT_CAUGHT,
748    CAUGHT_BY_JAVASCRIPT,
749    CAUGHT_BY_EXTERNAL,
750    CAUGHT_BY_DESUGARING,
751    CAUGHT_BY_PROMISE,
752    CAUGHT_BY_ASYNC_AWAIT
753  };
754  CatchType PredictExceptionCatcher();
755
756  void ScheduleThrow(Object* exception);
757  // Re-set pending message, script and positions reported to the TryCatch
758  // back to the TLS for re-use when rethrowing.
759  void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
760  // Un-schedule an exception that was caught by a TryCatch handler.
761  void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
762  void ReportPendingMessages();
763  // Return pending location if any or unfilled structure.
764  MessageLocation GetMessageLocation();
765
766  // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
767  Object* PromoteScheduledException();
768
769  // Attempts to compute the current source location, storing the
770  // result in the target out parameter. The source location is attached to a
771  // Message object as the location which should be shown to the user. It's
772  // typically the top-most meaningful location on the stack.
773  bool ComputeLocation(MessageLocation* target);
774  bool ComputeLocationFromException(MessageLocation* target,
775                                    Handle<Object> exception);
776  bool ComputeLocationFromStackTrace(MessageLocation* target,
777                                     Handle<Object> exception);
778
779  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
780                                        MessageLocation* location);
781
782  // Out of resource exception helpers.
783  Object* StackOverflow();
784  Object* TerminateExecution();
785  void CancelTerminateExecution();
786
787  void RequestInterrupt(InterruptCallback callback, void* data);
788  void InvokeApiInterruptCallbacks();
789
790  // Administration
791  void Iterate(ObjectVisitor* v);
792  void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
793  char* Iterate(ObjectVisitor* v, char* t);
794  void IterateThread(ThreadVisitor* v, char* t);
795
796  // Returns the current native context.
797  inline Handle<Context> native_context();
798  inline Context* raw_native_context();
799
800  // Returns the native context of the calling JavaScript code.  That
801  // is, the native context of the top-most JavaScript frame.
802  Handle<Context> GetCallingNativeContext();
803
804  void RegisterTryCatchHandler(v8::TryCatch* that);
805  void UnregisterTryCatchHandler(v8::TryCatch* that);
806
807  char* ArchiveThread(char* to);
808  char* RestoreThread(char* from);
809
810  static const int kUC16AlphabetSize = 256;  // See StringSearchBase.
811  static const int kBMMaxShift = 250;        // See StringSearchBase.
812
813  // Accessors.
814#define GLOBAL_ACCESSOR(type, name, initialvalue)                       \
815  inline type name() const {                                            \
816    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
817    return name##_;                                                     \
818  }                                                                     \
819  inline void set_##name(type value) {                                  \
820    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
821    name##_ = value;                                                    \
822  }
823  ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
824#undef GLOBAL_ACCESSOR
825
826#define GLOBAL_ARRAY_ACCESSOR(type, name, length)                       \
827  inline type* name() {                                                 \
828    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
829    return &(name##_)[0];                                               \
830  }
831  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
832#undef GLOBAL_ARRAY_ACCESSOR
833
834#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
835  inline Handle<type> name();                            \
836  inline bool is_##name(type* value);
837  NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
838#undef NATIVE_CONTEXT_FIELD_ACCESSOR
839
840  Bootstrapper* bootstrapper() { return bootstrapper_; }
841  Counters* counters() {
842    // Call InitializeLoggingAndCounters() if logging is needed before
843    // the isolate is fully initialized.
844    DCHECK(counters_ != NULL);
845    return counters_;
846  }
847  RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
848  CompilationCache* compilation_cache() { return compilation_cache_; }
849  Logger* logger() {
850    // Call InitializeLoggingAndCounters() if logging is needed before
851    // the isolate is fully initialized.
852    DCHECK(logger_ != NULL);
853    return logger_;
854  }
855  StackGuard* stack_guard() { return &stack_guard_; }
856  Heap* heap() { return &heap_; }
857  StatsTable* stats_table();
858  StubCache* load_stub_cache() { return load_stub_cache_; }
859  StubCache* store_stub_cache() { return store_stub_cache_; }
860  CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
861  DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
862  bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
863  void set_deoptimizer_lazy_throw(bool value) {
864    deoptimizer_lazy_throw_ = value;
865  }
866  ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
867  MaterializedObjectStore* materialized_object_store() {
868    return materialized_object_store_;
869  }
870
871  ContextSlotCache* context_slot_cache() {
872    return context_slot_cache_;
873  }
874
875  DescriptorLookupCache* descriptor_lookup_cache() {
876    return descriptor_lookup_cache_;
877  }
878
879  HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
880
881  HandleScopeImplementer* handle_scope_implementer() {
882    DCHECK(handle_scope_implementer_);
883    return handle_scope_implementer_;
884  }
885
886  UnicodeCache* unicode_cache() {
887    return unicode_cache_;
888  }
889
890  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
891    return inner_pointer_to_code_cache_;
892  }
893
894  GlobalHandles* global_handles() { return global_handles_; }
895
896  EternalHandles* eternal_handles() { return eternal_handles_; }
897
898  ThreadManager* thread_manager() { return thread_manager_; }
899
900  unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
901    return &jsregexp_uncanonicalize_;
902  }
903
904  unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
905    return &jsregexp_canonrange_;
906  }
907
908  RuntimeState* runtime_state() { return &runtime_state_; }
909
910  Builtins* builtins() { return &builtins_; }
911
912  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
913      regexp_macro_assembler_canonicalize() {
914    return &regexp_macro_assembler_canonicalize_;
915  }
916
917  RegExpStack* regexp_stack() { return regexp_stack_; }
918
919  size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
920  void IncreaseTotalRegexpCodeGenerated(int size) {
921    total_regexp_code_generated_ += size;
922  }
923
924  List<int>* regexp_indices() { return &regexp_indices_; }
925
926  unibrow::Mapping<unibrow::Ecma262Canonicalize>*
927      interp_canonicalize_mapping() {
928    return &regexp_macro_assembler_canonicalize_;
929  }
930
931  Debug* debug() { return debug_; }
932
933  bool* is_profiling_address() { return &is_profiling_; }
934  CodeEventDispatcher* code_event_dispatcher() const {
935    return code_event_dispatcher_.get();
936  }
937  HeapProfiler* heap_profiler() const { return heap_profiler_; }
938
939#ifdef DEBUG
940  HistogramInfo* heap_histograms() { return heap_histograms_; }
941
942  JSObject::SpillInformation* js_spill_information() {
943    return &js_spill_information_;
944  }
945#endif
946
947  Factory* factory() { return reinterpret_cast<Factory*>(this); }
948
949  static const int kJSRegexpStaticOffsetsVectorSize = 128;
950
951  THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
952
953  THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
954
955  void SetData(uint32_t slot, void* data) {
956    DCHECK(slot < Internals::kNumIsolateDataSlots);
957    embedder_data_[slot] = data;
958  }
959  void* GetData(uint32_t slot) {
960    DCHECK(slot < Internals::kNumIsolateDataSlots);
961    return embedder_data_[slot];
962  }
963
964  bool serializer_enabled() const { return serializer_enabled_; }
965  bool snapshot_available() const {
966    return snapshot_blob_ != NULL && snapshot_blob_->raw_size != 0;
967  }
968
969  bool IsDead() { return has_fatal_error_; }
970  void SignalFatalError() { has_fatal_error_ = true; }
971
972  bool use_crankshaft();
973
974  bool initialized_from_snapshot() { return initialized_from_snapshot_; }
975
976  bool NeedsSourcePositionsForProfiling() const;
977
978  bool IsCodeCoverageEnabled();
979  void SetCodeCoverageList(Object* value);
980
981  double time_millis_since_init() {
982    return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
983  }
984
985  DateCache* date_cache() {
986    return date_cache_;
987  }
988
989  void set_date_cache(DateCache* date_cache) {
990    if (date_cache != date_cache_) {
991      delete date_cache_;
992    }
993    date_cache_ = date_cache;
994  }
995
996  Map* get_initial_js_array_map(ElementsKind kind);
997
998  static const int kProtectorValid = 1;
999  static const int kProtectorInvalid = 0;
1000
1001  bool IsFastArrayConstructorPrototypeChainIntact();
1002  inline bool IsArraySpeciesLookupChainIntact();
1003  bool IsIsConcatSpreadableLookupChainIntact();
1004  bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
1005  inline bool IsStringLengthOverflowIntact();
1006  inline bool IsArrayIteratorLookupChainIntact();
1007
1008  // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
1009  inline bool IsFastArrayIterationIntact();
1010
1011  // Make sure we do check for neutered array buffers.
1012  inline bool IsArrayBufferNeuteringIntact();
1013
1014  // On intent to set an element in object, make sure that appropriate
1015  // notifications occur if the set is on the elements of the array or
1016  // object prototype. Also ensure that changes to prototype chain between
1017  // Array and Object fire notifications.
1018  void UpdateArrayProtectorOnSetElement(Handle<JSObject> object);
1019  void UpdateArrayProtectorOnSetLength(Handle<JSObject> object) {
1020    UpdateArrayProtectorOnSetElement(object);
1021  }
1022  void UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object) {
1023    UpdateArrayProtectorOnSetElement(object);
1024  }
1025  void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
1026    UpdateArrayProtectorOnSetElement(object);
1027  }
1028  void InvalidateArraySpeciesProtector();
1029  void InvalidateIsConcatSpreadableProtector();
1030  void InvalidateStringLengthOverflowProtector();
1031  void InvalidateArrayIteratorProtector();
1032  void InvalidateArrayBufferNeuteringProtector();
1033
1034  // Returns true if array is the initial array prototype in any native context.
1035  bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
1036
1037  V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
1038      int index);
1039
1040  AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
1041
1042  void IterateDeferredHandles(ObjectVisitor* visitor);
1043  void LinkDeferredHandles(DeferredHandles* deferred_handles);
1044  void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
1045
1046#ifdef DEBUG
1047  bool IsDeferredHandle(Object** location);
1048#endif  // DEBUG
1049
1050  bool concurrent_recompilation_enabled() {
1051    // Thread is only available with flag enabled.
1052    DCHECK(optimizing_compile_dispatcher_ == NULL ||
1053           FLAG_concurrent_recompilation);
1054    return optimizing_compile_dispatcher_ != NULL;
1055  }
1056
1057  OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1058    return optimizing_compile_dispatcher_;
1059  }
1060
1061  int id() const { return static_cast<int>(id_); }
1062
1063  HStatistics* GetHStatistics();
1064  CompilationStatistics* GetTurboStatistics();
1065  HTracer* GetHTracer();
1066  CodeTracer* GetCodeTracer();
1067
1068  void DumpAndResetCompilationStats();
1069
1070  FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
1071  void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
1072    function_entry_hook_ = function_entry_hook;
1073  }
1074
1075  void* stress_deopt_count_address() { return &stress_deopt_count_; }
1076
1077  V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
1078
1079  // Generates a random number that is non-zero when masked
1080  // with the provided mask.
1081  int GenerateIdentityHash(uint32_t mask);
1082
1083  // Given an address occupied by a live code object, return that object.
1084  Code* FindCodeObject(Address a);
1085
1086  int NextOptimizationId() {
1087    int id = next_optimization_id_++;
1088    if (!Smi::IsValid(next_optimization_id_)) {
1089      next_optimization_id_ = 0;
1090    }
1091    return id;
1092  }
1093
1094  void AddCallCompletedCallback(CallCompletedCallback callback);
1095  void RemoveCallCompletedCallback(CallCompletedCallback callback);
1096  void FireCallCompletedCallback();
1097
1098  void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1099  void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1100  inline void FireBeforeCallEnteredCallback();
1101
1102  void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1103  void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1104  void FireMicrotasksCompletedCallback();
1105
1106  void SetPromiseRejectCallback(PromiseRejectCallback callback);
1107  void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
1108                           v8::PromiseRejectEvent event);
1109
1110  void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
1111                          MaybeHandle<Object>* result,
1112                          MaybeHandle<Object>* maybe_exception);
1113  void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
1114                                 MaybeHandle<Object>* result,
1115                                 MaybeHandle<Object>* maybe_exception);
1116  void EnqueueMicrotask(Handle<Object> microtask);
1117  void RunMicrotasks();
1118  bool IsRunningMicrotasks() const { return is_running_microtasks_; }
1119
1120  Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
1121                           Handle<String> name, bool private_symbol);
1122
1123  void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1124  void CountUsage(v8::Isolate::UseCounterFeature feature);
1125
1126  BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
1127  BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }
1128
1129  std::string GetTurboCfgFileName();
1130
1131#if TRACE_MAPS
1132  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
1133#endif
1134
1135  Address promise_hook_or_debug_is_active_address() {
1136    return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
1137  }
1138
1139  void DebugStateUpdated();
1140
1141  void SetPromiseHook(PromiseHook hook);
1142  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
1143                      Handle<Object> parent);
1144
1145  // Support for dynamically disabling tail call elimination.
1146  Address is_tail_call_elimination_enabled_address() {
1147    return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
1148  }
1149  bool is_tail_call_elimination_enabled() const {
1150    return is_tail_call_elimination_enabled_;
1151  }
1152  void SetTailCallEliminationEnabled(bool enabled);
1153
1154  void AddDetachedContext(Handle<Context> context);
1155  void CheckDetachedContextsAfterGC();
1156
1157  List<Object*>* partial_snapshot_cache() { return &partial_snapshot_cache_; }
1158
1159  void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1160    array_buffer_allocator_ = allocator;
1161  }
1162  v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1163    return array_buffer_allocator_;
1164  }
1165
1166  FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1167
1168  CancelableTaskManager* cancelable_task_manager() {
1169    return cancelable_task_manager_;
1170  }
1171
1172  const AstStringConstants* ast_string_constants() const {
1173    return ast_string_constants_;
1174  }
1175
1176  interpreter::Interpreter* interpreter() const { return interpreter_; }
1177
1178  AccountingAllocator* allocator() { return allocator_; }
1179
1180  CompilerDispatcher* compiler_dispatcher() const {
1181    return compiler_dispatcher_;
1182  }
1183
1184  // Clear all optimized code stored in native contexts.
1185  void ClearOSROptimizedCode();
1186
1187  // Ensure that a particular optimized code is evicted.
1188  void EvictOSROptimizedCode(Code* code, const char* reason);
1189
1190  bool IsInAnyContext(Object* object, uint32_t index);
1191
1192  void SetRAILMode(RAILMode rail_mode);
1193
1194  RAILMode rail_mode() { return rail_mode_.Value(); }
1195
1196  double LoadStartTimeMs();
1197
1198  void IsolateInForegroundNotification();
1199
1200  void IsolateInBackgroundNotification();
1201
1202  bool IsIsolateInBackground() { return is_isolate_in_background_; }
1203
1204  PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1205
1206#ifdef USE_SIMULATOR
1207  base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
1208#endif
1209
1210  void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
1211  bool allow_atomics_wait() { return allow_atomics_wait_; }
1212
1213  // List of native heap values allocated by the runtime as part of its
1214  // implementation that must be freed at isolate deinit.
1215  class ManagedObjectFinalizer final {
1216   public:
1217    typedef void (*Deleter)(void*);
1218    void Dispose() { deleter_(value_); }
1219
1220   private:
1221    friend class Isolate;
1222
1223    ManagedObjectFinalizer() {
1224      DCHECK_EQ(reinterpret_cast<void*>(this),
1225                reinterpret_cast<void*>(&value_));
1226    }
1227
1228    // value_ must be the first member
1229    void* value_ = nullptr;
1230    Deleter deleter_ = nullptr;
1231    ManagedObjectFinalizer* prev_ = nullptr;
1232    ManagedObjectFinalizer* next_ = nullptr;
1233  };
1234
1235  // Register a native value for destruction at isolate teardown.
1236  ManagedObjectFinalizer* RegisterForReleaseAtTeardown(
1237      void* value, ManagedObjectFinalizer::Deleter deleter);
1238
1239  // Unregister a previously registered value from release at
1240  // isolate teardown, deleting the ManagedObjectFinalizer.
1241  // This transfers the responsibility of the previously managed value's
1242  // deletion to the caller. Pass by pointer, because *finalizer_ptr gets
1243  // reset to nullptr.
1244  void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer** finalizer_ptr);
1245
1246 protected:
1247  explicit Isolate(bool enable_serializer);
1248  bool IsArrayOrObjectPrototype(Object* object);
1249
1250 private:
1251  friend struct GlobalState;
1252  friend struct InitializeGlobalState;
1253
1254  // These fields are accessed through the API, offsets must be kept in sync
1255  // with v8::internal::Internals (in include/v8.h) constants. This is also
1256  // verified in Isolate::Init() using runtime checks.
1257  void* embedder_data_[Internals::kNumIsolateDataSlots];
1258  Heap heap_;
1259
1260  // The per-process lock should be acquired before the ThreadDataTable is
1261  // modified.
1262  class ThreadDataTable {
1263   public:
1264    ThreadDataTable();
1265    ~ThreadDataTable();
1266
1267    PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
1268    void Insert(PerIsolateThreadData* data);
1269    void Remove(PerIsolateThreadData* data);
1270    void RemoveAllThreads(Isolate* isolate);
1271
1272   private:
1273    PerIsolateThreadData* list_;
1274  };
1275
1276  // These items form a stack synchronously with threads Enter'ing and Exit'ing
1277  // the Isolate. The top of the stack points to a thread which is currently
1278  // running the Isolate. When the stack is empty, the Isolate is considered
1279  // not entered by any thread and can be Disposed.
1280  // If the same thread enters the Isolate more than once, the entry_count_
1281  // is incremented rather then a new item pushed to the stack.
1282  class EntryStackItem {
1283   public:
1284    EntryStackItem(PerIsolateThreadData* previous_thread_data,
1285                   Isolate* previous_isolate,
1286                   EntryStackItem* previous_item)
1287        : entry_count(1),
1288          previous_thread_data(previous_thread_data),
1289          previous_isolate(previous_isolate),
1290          previous_item(previous_item) { }
1291
1292    int entry_count;
1293    PerIsolateThreadData* previous_thread_data;
1294    Isolate* previous_isolate;
1295    EntryStackItem* previous_item;
1296
1297   private:
1298    DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1299  };
1300
1301  static base::LazyMutex thread_data_table_mutex_;
1302
1303  static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
1304  static base::Thread::LocalStorageKey isolate_key_;
1305  static base::Thread::LocalStorageKey thread_id_key_;
1306  static ThreadDataTable* thread_data_table_;
1307
1308  // A global counter for all generated Isolates, might overflow.
1309  static base::Atomic32 isolate_counter_;
1310
1311#if DEBUG
1312  static base::Atomic32 isolate_key_created_;
1313#endif
1314
1315  void Deinit();
1316
1317  static void SetIsolateThreadLocals(Isolate* isolate,
1318                                     PerIsolateThreadData* data);
1319
1320  // Find the PerThread for this particular (isolate, thread) combination.
1321  // If one does not yet exist, allocate a new one.
1322  PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
1323
1324  // Initializes the current thread to run this Isolate.
1325  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1326  // at the same time, this should be prevented using external locking.
1327  void Enter();
1328
1329  // Exits the current thread. The previosuly entered Isolate is restored
1330  // for the thread.
1331  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1332  // at the same time, this should be prevented using external locking.
1333  void Exit();
1334
1335  void InitializeThreadLocal();
1336
1337  void MarkCompactPrologue(bool is_compacting,
1338                           ThreadLocalTop* archived_thread_data);
1339  void MarkCompactEpilogue(bool is_compacting,
1340                           ThreadLocalTop* archived_thread_data);
1341
1342  void FillCache();
1343
1344  // Propagate pending exception message to the v8::TryCatch.
1345  // If there is no external try-catch or message was successfully propagated,
1346  // then return true.
1347  bool PropagatePendingExceptionToExternalTryCatch();
1348
1349  // Remove per-frame stored materialized objects when we are unwinding
1350  // the frame.
1351  void RemoveMaterializedObjectsOnUnwind(StackFrame* frame);
1352
1353  void RunMicrotasksInternal();
1354
1355  const char* RAILModeName(RAILMode rail_mode) const {
1356    switch (rail_mode) {
1357      case PERFORMANCE_RESPONSE:
1358        return "RESPONSE";
1359      case PERFORMANCE_ANIMATION:
1360        return "ANIMATION";
1361      case PERFORMANCE_IDLE:
1362        return "IDLE";
1363      case PERFORMANCE_LOAD:
1364        return "LOAD";
1365    }
1366    return "";
1367  }
1368
1369  // TODO(alph): Remove along with the deprecated GetCpuProfiler().
1370  friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler();
1371  CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
1372
1373  base::Atomic32 id_;
1374  EntryStackItem* entry_stack_;
1375  int stack_trace_nesting_level_;
1376  StringStream* incomplete_message_;
1377  Address isolate_addresses_[kIsolateAddressCount + 1];  // NOLINT
1378  Bootstrapper* bootstrapper_;
1379  RuntimeProfiler* runtime_profiler_;
1380  CompilationCache* compilation_cache_;
1381  Counters* counters_;
1382  base::RecursiveMutex break_access_;
1383  Logger* logger_;
1384  StackGuard stack_guard_;
1385  StatsTable* stats_table_;
1386  StubCache* load_stub_cache_;
1387  StubCache* store_stub_cache_;
1388  CodeAgingHelper* code_aging_helper_;
1389  DeoptimizerData* deoptimizer_data_;
1390  bool deoptimizer_lazy_throw_;
1391  MaterializedObjectStore* materialized_object_store_;
1392  ThreadLocalTop thread_local_top_;
1393  bool capture_stack_trace_for_uncaught_exceptions_;
1394  int stack_trace_for_uncaught_exceptions_frame_limit_;
1395  StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1396  ContextSlotCache* context_slot_cache_;
1397  DescriptorLookupCache* descriptor_lookup_cache_;
1398  HandleScopeData handle_scope_data_;
1399  HandleScopeImplementer* handle_scope_implementer_;
1400  UnicodeCache* unicode_cache_;
1401  AccountingAllocator* allocator_;
1402  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
1403  GlobalHandles* global_handles_;
1404  EternalHandles* eternal_handles_;
1405  ThreadManager* thread_manager_;
1406  RuntimeState runtime_state_;
1407  Builtins builtins_;
1408  unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1409  unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
1410  unibrow::Mapping<unibrow::Ecma262Canonicalize>
1411      regexp_macro_assembler_canonicalize_;
1412  RegExpStack* regexp_stack_;
1413  List<int> regexp_indices_;
1414  DateCache* date_cache_;
1415  CallInterfaceDescriptorData* call_descriptor_data_;
1416  AccessCompilerData* access_compiler_data_;
1417  base::RandomNumberGenerator* random_number_generator_;
1418  base::AtomicValue<RAILMode> rail_mode_;
1419  bool promise_hook_or_debug_is_active_;
1420  PromiseHook promise_hook_;
1421  base::Mutex rail_mutex_;
1422  double load_start_time_ms_;
1423
1424  // Whether the isolate has been created for snapshotting.
1425  bool serializer_enabled_;
1426
1427  // True if fatal error has been signaled for this isolate.
1428  bool has_fatal_error_;
1429
1430  // True if this isolate was initialized from a snapshot.
1431  bool initialized_from_snapshot_;
1432
1433  // True if ES2015 tail call elimination feature is enabled.
1434  bool is_tail_call_elimination_enabled_;
1435
1436  // True if the isolate is in background. This flag is used
1437  // to prioritize between memory usage and latency.
1438  bool is_isolate_in_background_;
1439
1440  // Time stamp at initialization.
1441  double time_millis_at_init_;
1442
1443#ifdef DEBUG
1444  // A static array of histogram info for each type.
1445  HistogramInfo heap_histograms_[LAST_TYPE + 1];
1446  JSObject::SpillInformation js_spill_information_;
1447#endif
1448
1449  Debug* debug_;
1450  CpuProfiler* cpu_profiler_;
1451  HeapProfiler* heap_profiler_;
1452  std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1453  FunctionEntryHook function_entry_hook_;
1454
1455  const AstStringConstants* ast_string_constants_;
1456
1457  interpreter::Interpreter* interpreter_;
1458
1459  CompilerDispatcher* compiler_dispatcher_;
1460
1461  typedef std::pair<InterruptCallback, void*> InterruptEntry;
1462  std::queue<InterruptEntry> api_interrupts_queue_;
1463
1464#define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
1465  type name##_;
1466  ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1467#undef GLOBAL_BACKING_STORE
1468
1469#define GLOBAL_ARRAY_BACKING_STORE(type, name, length)                         \
1470  type name##_[length];
1471  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1472#undef GLOBAL_ARRAY_BACKING_STORE
1473
1474#ifdef DEBUG
1475  // This class is huge and has a number of fields controlled by
1476  // preprocessor defines. Make sure the offsets of these fields agree
1477  // between compilation units.
1478#define ISOLATE_FIELD_OFFSET(type, name, ignored)                              \
1479  static const intptr_t name##_debug_offset_;
1480  ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1481  ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1482#undef ISOLATE_FIELD_OFFSET
1483#endif
1484
1485  DeferredHandles* deferred_handles_head_;
1486  OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
1487
1488  // Counts deopt points if deopt_every_n_times is enabled.
1489  unsigned int stress_deopt_count_;
1490
1491  int next_optimization_id_;
1492
1493#if TRACE_MAPS
1494  int next_unique_sfi_id_;
1495#endif
1496
1497  // List of callbacks before a Call starts execution.
1498  List<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1499
1500  // List of callbacks when a Call completes.
1501  List<CallCompletedCallback> call_completed_callbacks_;
1502
1503  // List of callbacks after microtasks were run.
1504  List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
1505  bool is_running_microtasks_;
1506
1507  v8::Isolate::UseCounterCallback use_counter_callback_;
1508  BasicBlockProfiler* basic_block_profiler_;
1509
1510  List<Object*> partial_snapshot_cache_;
1511
1512  v8::ArrayBuffer::Allocator* array_buffer_allocator_;
1513
1514  FutexWaitListNode futex_wait_list_node_;
1515
1516  CancelableTaskManager* cancelable_task_manager_;
1517
1518  v8::Isolate::AbortOnUncaughtExceptionCallback
1519      abort_on_uncaught_exception_callback_;
1520
1521#ifdef USE_SIMULATOR
1522  base::Mutex simulator_i_cache_mutex_;
1523#endif
1524
1525  bool allow_atomics_wait_;
1526
1527  ManagedObjectFinalizer managed_object_finalizers_list_;
1528
1529  size_t total_regexp_code_generated_;
1530
1531  friend class ExecutionAccess;
1532  friend class HandleScopeImplementer;
1533  friend class HeapTester;
1534  friend class OptimizingCompileDispatcher;
1535  friend class SweeperThread;
1536  friend class ThreadManager;
1537  friend class Simulator;
1538  friend class StackGuard;
1539  friend class ThreadId;
1540  friend class v8::Isolate;
1541  friend class v8::Locker;
1542  friend class v8::Unlocker;
1543  friend class v8::SnapshotCreator;
1544  friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
1545  friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
1546                                                        const char*);
1547
1548  DISALLOW_COPY_AND_ASSIGN(Isolate);
1549};
1550
1551
1552#undef FIELD_ACCESSOR
1553#undef THREAD_LOCAL_TOP_ACCESSOR
1554
1555
1556class PromiseOnStack {
1557 public:
1558  PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
1559      : promise_(promise), prev_(prev) {}
1560  Handle<JSObject> promise() { return promise_; }
1561  PromiseOnStack* prev() { return prev_; }
1562
1563 private:
1564  Handle<JSObject> promise_;
1565  PromiseOnStack* prev_;
1566};
1567
1568
1569// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
1570// class as a work around for a bug in the generated code found with these
1571// versions of GCC. See V8 issue 122 for details.
1572class SaveContext BASE_EMBEDDED {
1573 public:
1574  explicit SaveContext(Isolate* isolate);
1575  ~SaveContext();
1576
1577  Handle<Context> context() { return context_; }
1578  SaveContext* prev() { return prev_; }
1579
1580  // Returns true if this save context is below a given JavaScript frame.
1581  bool IsBelowFrame(StandardFrame* frame) {
1582    return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
1583  }
1584
1585 private:
1586  Isolate* const isolate_;
1587  Handle<Context> context_;
1588  SaveContext* const prev_;
1589  Address c_entry_fp_;
1590};
1591
1592
1593class AssertNoContextChange BASE_EMBEDDED {
1594#ifdef DEBUG
1595 public:
1596  explicit AssertNoContextChange(Isolate* isolate);
1597  ~AssertNoContextChange() {
1598    DCHECK(isolate_->context() == *context_);
1599  }
1600
1601 private:
1602  Isolate* isolate_;
1603  Handle<Context> context_;
1604#else
1605 public:
1606  explicit AssertNoContextChange(Isolate* isolate) { }
1607#endif
1608};
1609
1610
1611class ExecutionAccess BASE_EMBEDDED {
1612 public:
1613  explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
1614    Lock(isolate);
1615  }
1616  ~ExecutionAccess() { Unlock(isolate_); }
1617
1618  static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
1619  static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1620
1621  static bool TryLock(Isolate* isolate) {
1622    return isolate->break_access()->TryLock();
1623  }
1624
1625 private:
1626  Isolate* isolate_;
1627};
1628
1629
1630// Support for checking for stack-overflows.
1631class StackLimitCheck BASE_EMBEDDED {
1632 public:
1633  explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
1634
1635  // Use this to check for stack-overflows in C++ code.
1636  bool HasOverflowed() const {
1637    StackGuard* stack_guard = isolate_->stack_guard();
1638    return GetCurrentStackPosition() < stack_guard->real_climit();
1639  }
1640
1641  // Use this to check for interrupt request in C++ code.
1642  bool InterruptRequested() {
1643    StackGuard* stack_guard = isolate_->stack_guard();
1644    return GetCurrentStackPosition() < stack_guard->climit();
1645  }
1646
1647  // Use this to check for stack-overflow when entering runtime from JS code.
1648  bool JsHasOverflowed(uintptr_t gap = 0) const;
1649
1650 private:
1651  Isolate* isolate_;
1652};
1653
1654#define STACK_CHECK(isolate, result_value) \
1655  do {                                     \
1656    StackLimitCheck stack_check(isolate);  \
1657    if (stack_check.HasOverflowed()) {     \
1658      isolate->StackOverflow();            \
1659      return result_value;                 \
1660    }                                      \
1661  } while (false)
1662
1663// Support for temporarily postponing interrupts. When the outermost
1664// postpone scope is left the interrupts will be re-enabled and any
1665// interrupts that occurred while in the scope will be taken into
1666// account.
1667class PostponeInterruptsScope BASE_EMBEDDED {
1668 public:
1669  PostponeInterruptsScope(Isolate* isolate,
1670                          int intercept_mask = StackGuard::ALL_INTERRUPTS)
1671      : stack_guard_(isolate->stack_guard()),
1672        intercept_mask_(intercept_mask),
1673        intercepted_flags_(0) {
1674    stack_guard_->PushPostponeInterruptsScope(this);
1675  }
1676
1677  ~PostponeInterruptsScope() {
1678    stack_guard_->PopPostponeInterruptsScope();
1679  }
1680
1681  // Find the bottom-most scope that intercepts this interrupt.
1682  // Return whether the interrupt has been intercepted.
1683  bool Intercept(StackGuard::InterruptFlag flag);
1684
1685 private:
1686  StackGuard* stack_guard_;
1687  int intercept_mask_;
1688  int intercepted_flags_;
1689  PostponeInterruptsScope* prev_;
1690
1691  friend class StackGuard;
1692};
1693
1694
1695class CodeTracer final : public Malloced {
1696 public:
1697  explicit CodeTracer(int isolate_id)
1698      : file_(NULL),
1699        scope_depth_(0) {
1700    if (!ShouldRedirect()) {
1701      file_ = stdout;
1702      return;
1703    }
1704
1705    if (FLAG_redirect_code_traces_to == NULL) {
1706      SNPrintF(filename_,
1707               "code-%d-%d.asm",
1708               base::OS::GetCurrentProcessId(),
1709               isolate_id);
1710    } else {
1711      StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
1712    }
1713
1714    WriteChars(filename_.start(), "", 0, false);
1715  }
1716
1717  class Scope {
1718   public:
1719    explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
1720    ~Scope() { tracer_->CloseFile();  }
1721
1722    FILE* file() const { return tracer_->file(); }
1723
1724   private:
1725    CodeTracer* tracer_;
1726  };
1727
1728  void OpenFile() {
1729    if (!ShouldRedirect()) {
1730      return;
1731    }
1732
1733    if (file_ == NULL) {
1734      file_ = base::OS::FOpen(filename_.start(), "ab");
1735    }
1736
1737    scope_depth_++;
1738  }
1739
1740  void CloseFile() {
1741    if (!ShouldRedirect()) {
1742      return;
1743    }
1744
1745    if (--scope_depth_ == 0) {
1746      fclose(file_);
1747      file_ = NULL;
1748    }
1749  }
1750
1751  FILE* file() const { return file_; }
1752
1753 private:
1754  static bool ShouldRedirect() {
1755    return FLAG_redirect_code_traces;
1756  }
1757
1758  EmbeddedVector<char, 128> filename_;
1759  FILE* file_;
1760  int scope_depth_;
1761};
1762
1763}  // namespace internal
1764}  // namespace v8
1765
1766#endif  // V8_ISOLATE_H_
1767