heap-inl.h revision c7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_HEAP_INL_H_
29#define V8_HEAP_INL_H_
30
31#include "heap.h"
32#include "isolate.h"
33#include "list-inl.h"
34#include "objects.h"
35#include "v8-counters.h"
36#include "store-buffer.h"
37#include "store-buffer-inl.h"
38
39namespace v8 {
40namespace internal {
41
42void PromotionQueue::insert(HeapObject* target, int size) {
43  if (emergency_stack_ != NULL) {
44    emergency_stack_->Add(Entry(target, size));
45    return;
46  }
47
48  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
49    NewSpacePage* rear_page =
50        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
51    ASSERT(!rear_page->prev_page()->is_anchor());
52    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
53    ActivateGuardIfOnTheSamePage();
54  }
55
56  if (guard_) {
57    ASSERT(GetHeadPage() ==
58           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
59
60    if ((rear_ - 2) < limit_) {
61      RelocateQueueHead();
62      emergency_stack_->Add(Entry(target, size));
63      return;
64    }
65  }
66
67  *(--rear_) = reinterpret_cast<intptr_t>(target);
68  *(--rear_) = size;
69  // Assert no overflow into live objects.
70#ifdef DEBUG
71  SemiSpace::AssertValidRange(HEAP->new_space()->top(),
72                              reinterpret_cast<Address>(rear_));
73#endif
74}
75
76
77void PromotionQueue::ActivateGuardIfOnTheSamePage() {
78  guard_ = guard_ ||
79      heap_->new_space()->active_space()->current_page()->address() ==
80      GetHeadPage()->address();
81}
82
83
84MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
85                                          PretenureFlag pretenure) {
86  // Check for ASCII first since this is the common case.
87  if (String::IsAscii(str.start(), str.length())) {
88    // If the string is ASCII, we do not need to convert the characters
89    // since UTF8 is backwards compatible with ASCII.
90    return AllocateStringFromAscii(str, pretenure);
91  }
92  // Non-ASCII and we need to decode.
93  return AllocateStringFromUtf8Slow(str, pretenure);
94}
95
96
97MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
98                                  int chars,
99                                  uint32_t hash_field) {
100  unibrow::Utf8InputBuffer<> buffer(str.start(),
101                                    static_cast<unsigned>(str.length()));
102  return AllocateInternalSymbol(&buffer, chars, hash_field);
103}
104
105
106MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
107                                       uint32_t hash_field) {
108  if (str.length() > SeqAsciiString::kMaxLength) {
109    return Failure::OutOfMemoryException();
110  }
111  // Compute map and object size.
112  Map* map = ascii_symbol_map();
113  int size = SeqAsciiString::SizeFor(str.length());
114
115  // Allocate string.
116  Object* result;
117  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
118                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
119                   : old_data_space_->AllocateRaw(size);
120    if (!maybe_result->ToObject(&result)) return maybe_result;
121  }
122
123  // String maps are all immortal immovable objects.
124  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
125  // Set length and hash fields of the allocated string.
126  String* answer = String::cast(result);
127  answer->set_length(str.length());
128  answer->set_hash_field(hash_field);
129
130  ASSERT_EQ(size, answer->Size());
131
132  // Fill in the characters.
133  memcpy(answer->address() + SeqAsciiString::kHeaderSize,
134         str.start(), str.length());
135
136  return answer;
137}
138
139
140MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
141                                         uint32_t hash_field) {
142  if (str.length() > SeqTwoByteString::kMaxLength) {
143    return Failure::OutOfMemoryException();
144  }
145  // Compute map and object size.
146  Map* map = symbol_map();
147  int size = SeqTwoByteString::SizeFor(str.length());
148
149  // Allocate string.
150  Object* result;
151  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
152                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
153                   : old_data_space_->AllocateRaw(size);
154    if (!maybe_result->ToObject(&result)) return maybe_result;
155  }
156
157  reinterpret_cast<HeapObject*>(result)->set_map(map);
158  // Set length and hash fields of the allocated string.
159  String* answer = String::cast(result);
160  answer->set_length(str.length());
161  answer->set_hash_field(hash_field);
162
163  ASSERT_EQ(size, answer->Size());
164
165  // Fill in the characters.
166  memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
167         str.start(), str.length() * kUC16Size);
168
169  return answer;
170}
171
172MaybeObject* Heap::CopyFixedArray(FixedArray* src) {
173  return CopyFixedArrayWithMap(src, src->map());
174}
175
176
177MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
178  return CopyFixedDoubleArrayWithMap(src, src->map());
179}
180
181
182MaybeObject* Heap::AllocateRaw(int size_in_bytes,
183                               AllocationSpace space,
184                               AllocationSpace retry_space) {
185  ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
186  ASSERT(space != NEW_SPACE ||
187         retry_space == OLD_POINTER_SPACE ||
188         retry_space == OLD_DATA_SPACE ||
189         retry_space == LO_SPACE);
190#ifdef DEBUG
191  if (FLAG_gc_interval >= 0 &&
192      !disallow_allocation_failure_ &&
193      Heap::allocation_timeout_-- <= 0) {
194    return Failure::RetryAfterGC(space);
195  }
196  isolate_->counters()->objs_since_last_full()->Increment();
197  isolate_->counters()->objs_since_last_young()->Increment();
198#endif
199  MaybeObject* result;
200  if (NEW_SPACE == space) {
201    result = new_space_.AllocateRaw(size_in_bytes);
202    if (always_allocate() && result->IsFailure()) {
203      space = retry_space;
204    } else {
205      return result;
206    }
207  }
208
209  if (OLD_POINTER_SPACE == space) {
210    result = old_pointer_space_->AllocateRaw(size_in_bytes);
211  } else if (OLD_DATA_SPACE == space) {
212    result = old_data_space_->AllocateRaw(size_in_bytes);
213  } else if (CODE_SPACE == space) {
214    result = code_space_->AllocateRaw(size_in_bytes);
215  } else if (LO_SPACE == space) {
216    result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
217  } else if (CELL_SPACE == space) {
218    result = cell_space_->AllocateRaw(size_in_bytes);
219  } else {
220    ASSERT(MAP_SPACE == space);
221    result = map_space_->AllocateRaw(size_in_bytes);
222  }
223  if (result->IsFailure()) old_gen_exhausted_ = true;
224  return result;
225}
226
227
228MaybeObject* Heap::NumberFromInt32(
229    int32_t value, PretenureFlag pretenure) {
230  if (Smi::IsValid(value)) return Smi::FromInt(value);
231  // Bypass NumberFromDouble to avoid various redundant checks.
232  return AllocateHeapNumber(FastI2D(value), pretenure);
233}
234
235
236MaybeObject* Heap::NumberFromUint32(
237    uint32_t value, PretenureFlag pretenure) {
238  if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
239    return Smi::FromInt((int32_t)value);
240  }
241  // Bypass NumberFromDouble to avoid various redundant checks.
242  return AllocateHeapNumber(FastUI2D(value), pretenure);
243}
244
245
246void Heap::FinalizeExternalString(String* string) {
247  ASSERT(string->IsExternalString());
248  v8::String::ExternalStringResourceBase** resource_addr =
249      reinterpret_cast<v8::String::ExternalStringResourceBase**>(
250          reinterpret_cast<byte*>(string) +
251          ExternalString::kResourceOffset -
252          kHeapObjectTag);
253
254  // Dispose of the C++ object if it has not already been disposed.
255  if (*resource_addr != NULL) {
256    (*resource_addr)->Dispose();
257    *resource_addr = NULL;
258  }
259}
260
261
262MaybeObject* Heap::AllocateRawMap() {
263#ifdef DEBUG
264  isolate_->counters()->objs_since_last_full()->Increment();
265  isolate_->counters()->objs_since_last_young()->Increment();
266#endif
267  MaybeObject* result = map_space_->AllocateRaw(Map::kSize);
268  if (result->IsFailure()) old_gen_exhausted_ = true;
269#ifdef DEBUG
270  if (!result->IsFailure()) {
271    // Maps have their own alignment.
272    CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
273          static_cast<intptr_t>(kHeapObjectTag));
274  }
275#endif
276  return result;
277}
278
279
280MaybeObject* Heap::AllocateRawCell() {
281#ifdef DEBUG
282  isolate_->counters()->objs_since_last_full()->Increment();
283  isolate_->counters()->objs_since_last_young()->Increment();
284#endif
285  MaybeObject* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
286  if (result->IsFailure()) old_gen_exhausted_ = true;
287  return result;
288}
289
290
291bool Heap::InNewSpace(Object* object) {
292  bool result = new_space_.Contains(object);
293  ASSERT(!result ||                  // Either not in new space
294         gc_state_ != NOT_IN_GC ||   // ... or in the middle of GC
295         InToSpace(object));         // ... or in to-space (where we allocate).
296  return result;
297}
298
299
300bool Heap::InNewSpace(Address addr) {
301  return new_space_.Contains(addr);
302}
303
304
305bool Heap::InFromSpace(Object* object) {
306  return new_space_.FromSpaceContains(object);
307}
308
309
310bool Heap::InToSpace(Object* object) {
311  return new_space_.ToSpaceContains(object);
312}
313
314
315bool Heap::OldGenerationAllocationLimitReached() {
316  if (!incremental_marking()->IsStopped()) return false;
317  return OldGenerationSpaceAvailable() < 0;
318}
319
320
321bool Heap::ShouldBePromoted(Address old_address, int object_size) {
322  // An object should be promoted if:
323  // - the object has survived a scavenge operation or
324  // - to space is already 25% full.
325  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
326  Address age_mark = new_space_.age_mark();
327  bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
328      (!page->ContainsLimit(age_mark) || old_address < age_mark);
329  return below_mark || (new_space_.Size() + object_size) >=
330                        (new_space_.EffectiveCapacity() >> 2);
331}
332
333
334void Heap::RecordWrite(Address address, int offset) {
335  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
336}
337
338
339void Heap::RecordWrites(Address address, int start, int len) {
340  if (!InNewSpace(address)) {
341    for (int i = 0; i < len; i++) {
342      store_buffer_.Mark(address + start + i * kPointerSize);
343    }
344  }
345}
346
347
348OldSpace* Heap::TargetSpace(HeapObject* object) {
349  InstanceType type = object->map()->instance_type();
350  AllocationSpace space = TargetSpaceId(type);
351  return (space == OLD_POINTER_SPACE)
352      ? old_pointer_space_
353      : old_data_space_;
354}
355
356
357AllocationSpace Heap::TargetSpaceId(InstanceType type) {
358  // Heap numbers and sequential strings are promoted to old data space, all
359  // other object types are promoted to old pointer space.  We do not use
360  // object->IsHeapNumber() and object->IsSeqString() because we already
361  // know that object has the heap object tag.
362
363  // These objects are never allocated in new space.
364  ASSERT(type != MAP_TYPE);
365  ASSERT(type != CODE_TYPE);
366  ASSERT(type != ODDBALL_TYPE);
367  ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
368
369  if (type < FIRST_NONSTRING_TYPE) {
370    // There are four string representations: sequential strings, external
371    // strings, cons strings, and sliced strings.
372    // Only the latter two contain non-map-word pointers to heap objects.
373    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
374        ? OLD_POINTER_SPACE
375        : OLD_DATA_SPACE;
376  } else {
377    return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
378  }
379}
380
381
382void Heap::CopyBlock(Address dst, Address src, int byte_size) {
383  CopyWords(reinterpret_cast<Object**>(dst),
384            reinterpret_cast<Object**>(src),
385            byte_size / kPointerSize);
386}
387
388
389void Heap::MoveBlock(Address dst, Address src, int byte_size) {
390  ASSERT(IsAligned(byte_size, kPointerSize));
391
392  int size_in_words = byte_size / kPointerSize;
393
394  if ((dst < src) || (dst >= (src + byte_size))) {
395    Object** src_slot = reinterpret_cast<Object**>(src);
396    Object** dst_slot = reinterpret_cast<Object**>(dst);
397    Object** end_slot = src_slot + size_in_words;
398
399    while (src_slot != end_slot) {
400      *dst_slot++ = *src_slot++;
401    }
402  } else {
403    memmove(dst, src, byte_size);
404  }
405}
406
407
408void Heap::ScavengePointer(HeapObject** p) {
409  ScavengeObject(p, *p);
410}
411
412
413void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
414  ASSERT(HEAP->InFromSpace(object));
415
416  // We use the first word (where the map pointer usually is) of a heap
417  // object to record the forwarding pointer.  A forwarding pointer can
418  // point to an old space, the code space, or the to space of the new
419  // generation.
420  MapWord first_word = object->map_word();
421
422  // If the first word is a forwarding address, the object has already been
423  // copied.
424  if (first_word.IsForwardingAddress()) {
425    HeapObject* dest = first_word.ToForwardingAddress();
426    ASSERT(HEAP->InFromSpace(*p));
427    *p = dest;
428    return;
429  }
430
431  // Call the slow part of scavenge object.
432  return ScavengeObjectSlow(p, object);
433}
434
435
436bool Heap::CollectGarbage(AllocationSpace space) {
437  return CollectGarbage(space, SelectGarbageCollector(space));
438}
439
440
441MaybeObject* Heap::PrepareForCompare(String* str) {
442  // Always flatten small strings and force flattening of long strings
443  // after we have accumulated a certain amount we failed to flatten.
444  static const int kMaxAlwaysFlattenLength = 32;
445  static const int kFlattenLongThreshold = 16*KB;
446
447  const int length = str->length();
448  MaybeObject* obj = str->TryFlatten();
449  if (length <= kMaxAlwaysFlattenLength ||
450      unflattened_strings_length_ >= kFlattenLongThreshold) {
451    return obj;
452  }
453  if (obj->IsFailure()) {
454    unflattened_strings_length_ += length;
455  }
456  return str;
457}
458
459
460int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
461  ASSERT(HasBeenSetUp());
462  int amount = amount_of_external_allocated_memory_ + change_in_bytes;
463  if (change_in_bytes >= 0) {
464    // Avoid overflow.
465    if (amount > amount_of_external_allocated_memory_) {
466      amount_of_external_allocated_memory_ = amount;
467    }
468    int amount_since_last_global_gc =
469        amount_of_external_allocated_memory_ -
470        amount_of_external_allocated_memory_at_last_global_gc_;
471    if (amount_since_last_global_gc > external_allocation_limit_) {
472      CollectAllGarbage(kNoGCFlags);
473    }
474  } else {
475    // Avoid underflow.
476    if (amount >= 0) {
477      amount_of_external_allocated_memory_ = amount;
478    }
479  }
480  ASSERT(amount_of_external_allocated_memory_ >= 0);
481  return amount_of_external_allocated_memory_;
482}
483
484
485void Heap::SetLastScriptId(Object* last_script_id) {
486  roots_[kLastScriptIdRootIndex] = last_script_id;
487}
488
489
490Isolate* Heap::isolate() {
491  return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
492      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
493}
494
495
496#ifdef DEBUG
497#define GC_GREEDY_CHECK() \
498  if (FLAG_gc_greedy) HEAP->GarbageCollectionGreedyCheck()
499#else
500#define GC_GREEDY_CHECK() { }
501#endif
502
503// Calls the FUNCTION_CALL function and retries it up to three times
504// to guarantee that any allocations performed during the call will
505// succeed if there's enough memory.
506
507// Warning: Do not use the identifiers __object__, __maybe_object__ or
508// __scope__ in a call to this macro.
509
510#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\
511  do {                                                                    \
512    GC_GREEDY_CHECK();                                                    \
513    MaybeObject* __maybe_object__ = FUNCTION_CALL;                        \
514    Object* __object__ = NULL;                                            \
515    if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
516    if (__maybe_object__->IsOutOfMemory()) {                              \
517      v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
518    }                                                                     \
519    if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
520    ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)->     \
521                                    allocation_space());                  \
522    __maybe_object__ = FUNCTION_CALL;                                     \
523    if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
524    if (__maybe_object__->IsOutOfMemory()) {                              \
525      v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\
526    }                                                                     \
527    if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
528    ISOLATE->counters()->gc_last_resort_from_handles()->Increment();      \
529    ISOLATE->heap()->CollectAllAvailableGarbage();                        \
530    {                                                                     \
531      AlwaysAllocateScope __scope__;                                      \
532      __maybe_object__ = FUNCTION_CALL;                                   \
533    }                                                                     \
534    if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
535    if (__maybe_object__->IsOutOfMemory() ||                              \
536        __maybe_object__->IsRetryAfterGC()) {                             \
537      /* TODO(1181417): Fix this. */                                      \
538      v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\
539    }                                                                     \
540    RETURN_EMPTY;                                                         \
541  } while (false)
542
543
544#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)       \
545  CALL_AND_RETRY(ISOLATE,                                      \
546                 FUNCTION_CALL,                                \
547                 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE),  \
548                 return Handle<TYPE>())
549
550
551#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
552  CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return)
553
554
555#ifdef DEBUG
556
557inline bool Heap::allow_allocation(bool new_state) {
558  bool old = allocation_allowed_;
559  allocation_allowed_ = new_state;
560  return old;
561}
562
563#endif
564
565
566void ExternalStringTable::AddString(String* string) {
567  ASSERT(string->IsExternalString());
568  if (heap_->InNewSpace(string)) {
569    new_space_strings_.Add(string);
570  } else {
571    old_space_strings_.Add(string);
572  }
573}
574
575
576void ExternalStringTable::Iterate(ObjectVisitor* v) {
577  if (!new_space_strings_.is_empty()) {
578    Object** start = &new_space_strings_[0];
579    v->VisitPointers(start, start + new_space_strings_.length());
580  }
581  if (!old_space_strings_.is_empty()) {
582    Object** start = &old_space_strings_[0];
583    v->VisitPointers(start, start + old_space_strings_.length());
584  }
585}
586
587
588// Verify() is inline to avoid ifdef-s around its calls in release
589// mode.
590void ExternalStringTable::Verify() {
591#ifdef DEBUG
592  for (int i = 0; i < new_space_strings_.length(); ++i) {
593    ASSERT(heap_->InNewSpace(new_space_strings_[i]));
594    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
595  }
596  for (int i = 0; i < old_space_strings_.length(); ++i) {
597    ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
598    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
599  }
600#endif
601}
602
603
604void ExternalStringTable::AddOldString(String* string) {
605  ASSERT(string->IsExternalString());
606  ASSERT(!heap_->InNewSpace(string));
607  old_space_strings_.Add(string);
608}
609
610
611void ExternalStringTable::ShrinkNewStrings(int position) {
612  new_space_strings_.Rewind(position);
613  if (FLAG_verify_heap) {
614    Verify();
615  }
616}
617
618
619void Heap::ClearInstanceofCache() {
620  set_instanceof_cache_function(the_hole_value());
621}
622
623
624Object* Heap::ToBoolean(bool condition) {
625  return condition ? true_value() : false_value();
626}
627
628
629void Heap::CompletelyClearInstanceofCache() {
630  set_instanceof_cache_map(the_hole_value());
631  set_instanceof_cache_function(the_hole_value());
632}
633
634
635MaybeObject* TranscendentalCache::Get(Type type, double input) {
636  SubCache* cache = caches_[type];
637  if (cache == NULL) {
638    caches_[type] = cache = new SubCache(type);
639  }
640  return cache->Get(input);
641}
642
643
644Address TranscendentalCache::cache_array_address() {
645  return reinterpret_cast<Address>(caches_);
646}
647
648
649double TranscendentalCache::SubCache::Calculate(double input) {
650  switch (type_) {
651    case ACOS:
652      return acos(input);
653    case ASIN:
654      return asin(input);
655    case ATAN:
656      return atan(input);
657    case COS:
658      return cos(input);
659    case EXP:
660      return exp(input);
661    case LOG:
662      return log(input);
663    case SIN:
664      return sin(input);
665    case TAN:
666      return tan(input);
667    default:
668      return 0.0;  // Never happens.
669  }
670}
671
672
673MaybeObject* TranscendentalCache::SubCache::Get(double input) {
674  Converter c;
675  c.dbl = input;
676  int hash = Hash(c);
677  Element e = elements_[hash];
678  if (e.in[0] == c.integers[0] &&
679      e.in[1] == c.integers[1]) {
680    ASSERT(e.output != NULL);
681    isolate_->counters()->transcendental_cache_hit()->Increment();
682    return e.output;
683  }
684  double answer = Calculate(input);
685  isolate_->counters()->transcendental_cache_miss()->Increment();
686  Object* heap_number;
687  { MaybeObject* maybe_heap_number =
688        isolate_->heap()->AllocateHeapNumber(answer);
689    if (!maybe_heap_number->ToObject(&heap_number)) return maybe_heap_number;
690  }
691  elements_[hash].in[0] = c.integers[0];
692  elements_[hash].in[1] = c.integers[1];
693  elements_[hash].output = heap_number;
694  return heap_number;
695}
696
697
698Heap* _inline_get_heap_() {
699  return HEAP;
700}
701
702
703} }  // namespace v8::internal
704
705#endif  // V8_HEAP_INL_H_
706