utils.h revision 257744e915dfc84d6d07a6b2accf8402d9ffc708
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_UTILS_H_
29#define V8_UTILS_H_
30
31#include <stdlib.h>
32#include <string.h>
33
34#include "globals.h"
35#include "checks.h"
36#include "allocation.h"
37
38namespace v8 {
39namespace internal {
40
41// ----------------------------------------------------------------------------
42// General helper functions
43
44#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
45
46// Returns true iff x is a power of 2 (or zero). Cannot be used with the
47// maximally negative value of the type T (the -1 overflows).
48template <typename T>
49static inline bool IsPowerOf2(T x) {
50  return IS_POWER_OF_TWO(x);
51}
52
53
54// X must be a power of 2.  Returns the number of trailing zeros.
55template <typename T>
56static inline int WhichPowerOf2(T x) {
57  ASSERT(IsPowerOf2(x));
58  ASSERT(x != 0);
59  if (x < 0) return 31;
60  int bits = 0;
61#ifdef DEBUG
62  int original_x = x;
63#endif
64  if (x >= 0x10000) {
65    bits += 16;
66    x >>= 16;
67  }
68  if (x >= 0x100) {
69    bits += 8;
70    x >>= 8;
71  }
72  if (x >= 0x10) {
73    bits += 4;
74    x >>= 4;
75  }
76  switch (x) {
77    default: UNREACHABLE();
78    case 8: bits++;  // Fall through.
79    case 4: bits++;  // Fall through.
80    case 2: bits++;  // Fall through.
81    case 1: break;
82  }
83  ASSERT_EQ(1 << bits, original_x);
84  return bits;
85  return 0;
86}
87
88
89// The C++ standard leaves the semantics of '>>' undefined for
90// negative signed operands. Most implementations do the right thing,
91// though.
92static inline int ArithmeticShiftRight(int x, int s) {
93  return x >> s;
94}
95
96
97// Compute the 0-relative offset of some absolute value x of type T.
98// This allows conversion of Addresses and integral types into
99// 0-relative int offsets.
100template <typename T>
101static inline intptr_t OffsetFrom(T x) {
102  return x - static_cast<T>(0);
103}
104
105
106// Compute the absolute value of type T for some 0-relative offset x.
107// This allows conversion of 0-relative int offsets into Addresses and
108// integral types.
109template <typename T>
110static inline T AddressFrom(intptr_t x) {
111  return static_cast<T>(static_cast<T>(0) + x);
112}
113
114
115// Return the largest multiple of m which is <= x.
116template <typename T>
117static inline T RoundDown(T x, int m) {
118  ASSERT(IsPowerOf2(m));
119  return AddressFrom<T>(OffsetFrom(x) & -m);
120}
121
122
123// Return the smallest multiple of m which is >= x.
124template <typename T>
125static inline T RoundUp(T x, int m) {
126  return RoundDown(x + m - 1, m);
127}
128
129
130template <typename T>
131static int Compare(const T& a, const T& b) {
132  if (a == b)
133    return 0;
134  else if (a < b)
135    return -1;
136  else
137    return 1;
138}
139
140
141template <typename T>
142static int PointerValueCompare(const T* a, const T* b) {
143  return Compare<T>(*a, *b);
144}
145
146
147// Returns the smallest power of two which is >= x. If you pass in a
148// number that is already a power of two, it is returned as is.
149// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
150// figure 3-3, page 48, where the function is called clp2.
151static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
152  ASSERT(x <= 0x80000000u);
153  x = x - 1;
154  x = x | (x >> 1);
155  x = x | (x >> 2);
156  x = x | (x >> 4);
157  x = x | (x >> 8);
158  x = x | (x >> 16);
159  return x + 1;
160}
161
162
163
164template <typename T>
165static inline bool IsAligned(T value, T alignment) {
166  ASSERT(IsPowerOf2(alignment));
167  return (value & (alignment - 1)) == 0;
168}
169
170
171// Returns true if (addr + offset) is aligned.
172static inline bool IsAddressAligned(Address addr,
173                                    intptr_t alignment,
174                                    int offset) {
175  intptr_t offs = OffsetFrom(addr + offset);
176  return IsAligned(offs, alignment);
177}
178
179
180// Returns the maximum of the two parameters.
181template <typename T>
182static T Max(T a, T b) {
183  return a < b ? b : a;
184}
185
186
187// Returns the minimum of the two parameters.
188template <typename T>
189static T Min(T a, T b) {
190  return a < b ? a : b;
191}
192
193
194inline int StrLength(const char* string) {
195  size_t length = strlen(string);
196  ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
197  return static_cast<int>(length);
198}
199
200
201// ----------------------------------------------------------------------------
202// BitField is a help template for encoding and decode bitfield with
203// unsigned content.
204template<class T, int shift, int size>
205class BitField {
206 public:
207  // Tells whether the provided value fits into the bit field.
208  static bool is_valid(T value) {
209    return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
210  }
211
212  // Returns a uint32_t mask of bit field.
213  static uint32_t mask() {
214    // To use all bits of a uint32 in a bitfield without compiler warnings we
215    // have to compute 2^32 without using a shift count of 32.
216    return ((1U << shift) << size) - (1U << shift);
217  }
218
219  // Returns a uint32_t with the bit field value encoded.
220  static uint32_t encode(T value) {
221    ASSERT(is_valid(value));
222    return static_cast<uint32_t>(value) << shift;
223  }
224
225  // Returns a uint32_t with the bit field value updated.
226  static uint32_t update(uint32_t previous, T value) {
227    return (previous & ~mask()) | encode(value);
228  }
229
230  // Extracts the bit field from the value.
231  static T decode(uint32_t value) {
232    return static_cast<T>((value & mask()) >> shift);
233  }
234
235  // Value for the field with all bits set.
236  static T max() {
237    return decode(mask());
238  }
239};
240
241
242// ----------------------------------------------------------------------------
243// Hash function.
244
245// Thomas Wang, Integer Hash Functions.
246// http://www.concentric.net/~Ttwang/tech/inthash.htm
247static inline uint32_t ComputeIntegerHash(uint32_t key) {
248  uint32_t hash = key;
249  hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
250  hash = hash ^ (hash >> 12);
251  hash = hash + (hash << 2);
252  hash = hash ^ (hash >> 4);
253  hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
254  hash = hash ^ (hash >> 16);
255  return hash;
256}
257
258
259static inline uint32_t ComputePointerHash(void* ptr) {
260  return ComputeIntegerHash(
261      static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
262}
263
264
265// ----------------------------------------------------------------------------
266// Miscellaneous
267
268// A static resource holds a static instance that can be reserved in
269// a local scope using an instance of Access.  Attempts to re-reserve
270// the instance will cause an error.
271template <typename T>
272class StaticResource {
273 public:
274  StaticResource() : is_reserved_(false)  {}
275
276 private:
277  template <typename S> friend class Access;
278  T instance_;
279  bool is_reserved_;
280};
281
282
283// Locally scoped access to a static resource.
284template <typename T>
285class Access {
286 public:
287  explicit Access(StaticResource<T>* resource)
288    : resource_(resource)
289    , instance_(&resource->instance_) {
290    ASSERT(!resource->is_reserved_);
291    resource->is_reserved_ = true;
292  }
293
294  ~Access() {
295    resource_->is_reserved_ = false;
296    resource_ = NULL;
297    instance_ = NULL;
298  }
299
300  T* value()  { return instance_; }
301  T* operator -> ()  { return instance_; }
302
303 private:
304  StaticResource<T>* resource_;
305  T* instance_;
306};
307
308
309template <typename T>
310class Vector {
311 public:
312  Vector() : start_(NULL), length_(0) {}
313  Vector(T* data, int length) : start_(data), length_(length) {
314    ASSERT(length == 0 || (length > 0 && data != NULL));
315  }
316
317  static Vector<T> New(int length) {
318    return Vector<T>(NewArray<T>(length), length);
319  }
320
321  // Returns a vector using the same backing storage as this one,
322  // spanning from and including 'from', to but not including 'to'.
323  Vector<T> SubVector(int from, int to) {
324    ASSERT(to <= length_);
325    ASSERT(from < to);
326    ASSERT(0 <= from);
327    return Vector<T>(start() + from, to - from);
328  }
329
330  // Returns the length of the vector.
331  int length() const { return length_; }
332
333  // Returns whether or not the vector is empty.
334  bool is_empty() const { return length_ == 0; }
335
336  // Returns the pointer to the start of the data in the vector.
337  T* start() const { return start_; }
338
339  // Access individual vector elements - checks bounds in debug mode.
340  T& operator[](int index) const {
341    ASSERT(0 <= index && index < length_);
342    return start_[index];
343  }
344
345  const T& at(int index) const { return operator[](index); }
346
347  T& first() { return start_[0]; }
348
349  T& last() { return start_[length_ - 1]; }
350
351  // Returns a clone of this vector with a new backing store.
352  Vector<T> Clone() const {
353    T* result = NewArray<T>(length_);
354    for (int i = 0; i < length_; i++) result[i] = start_[i];
355    return Vector<T>(result, length_);
356  }
357
358  void Sort(int (*cmp)(const T*, const T*)) {
359    typedef int (*RawComparer)(const void*, const void*);
360    qsort(start(),
361          length(),
362          sizeof(T),
363          reinterpret_cast<RawComparer>(cmp));
364  }
365
366  void Sort() {
367    Sort(PointerValueCompare<T>);
368  }
369
370  void Truncate(int length) {
371    ASSERT(length <= length_);
372    length_ = length;
373  }
374
375  // Releases the array underlying this vector. Once disposed the
376  // vector is empty.
377  void Dispose() {
378    DeleteArray(start_);
379    start_ = NULL;
380    length_ = 0;
381  }
382
383  inline Vector<T> operator+(int offset) {
384    ASSERT(offset < length_);
385    return Vector<T>(start_ + offset, length_ - offset);
386  }
387
388  // Factory method for creating empty vectors.
389  static Vector<T> empty() { return Vector<T>(NULL, 0); }
390
391  template<typename S>
392  static Vector<T> cast(Vector<S> input) {
393    return Vector<T>(reinterpret_cast<T*>(input.start()),
394                     input.length() * sizeof(S) / sizeof(T));
395  }
396
397 protected:
398  void set_start(T* start) { start_ = start; }
399
400 private:
401  T* start_;
402  int length_;
403};
404
405
406// A pointer that can only be set once and doesn't allow NULL values.
407template<typename T>
408class SetOncePointer {
409 public:
410  SetOncePointer() : pointer_(NULL) { }
411
412  bool is_set() const { return pointer_ != NULL; }
413
414  T* get() const {
415    ASSERT(pointer_ != NULL);
416    return pointer_;
417  }
418
419  void set(T* value) {
420    ASSERT(pointer_ == NULL && value != NULL);
421    pointer_ = value;
422  }
423
424 private:
425  T* pointer_;
426};
427
428
429template <typename T, int kSize>
430class EmbeddedVector : public Vector<T> {
431 public:
432  EmbeddedVector() : Vector<T>(buffer_, kSize) { }
433
434  explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
435    for (int i = 0; i < kSize; ++i) {
436      buffer_[i] = initial_value;
437    }
438  }
439
440  // When copying, make underlying Vector to reference our buffer.
441  EmbeddedVector(const EmbeddedVector& rhs)
442      : Vector<T>(rhs) {
443    memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
444    set_start(buffer_);
445  }
446
447  EmbeddedVector& operator=(const EmbeddedVector& rhs) {
448    if (this == &rhs) return *this;
449    Vector<T>::operator=(rhs);
450    memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
451    this->set_start(buffer_);
452    return *this;
453  }
454
455 private:
456  T buffer_[kSize];
457};
458
459
460template <typename T>
461class ScopedVector : public Vector<T> {
462 public:
463  explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
464  ~ScopedVector() {
465    DeleteArray(this->start());
466  }
467
468 private:
469  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
470};
471
472
473inline Vector<const char> CStrVector(const char* data) {
474  return Vector<const char>(data, StrLength(data));
475}
476
477inline Vector<char> MutableCStrVector(char* data) {
478  return Vector<char>(data, StrLength(data));
479}
480
481inline Vector<char> MutableCStrVector(char* data, int max) {
482  int length = StrLength(data);
483  return Vector<char>(data, (length < max) ? length : max);
484}
485
486
487/*
488 * A class that collects values into a backing store.
489 * Specialized versions of the class can allow access to the backing store
490 * in different ways.
491 * There is no guarantee that the backing store is contiguous (and, as a
492 * consequence, no guarantees that consecutively added elements are adjacent
493 * in memory). The collector may move elements unless it has guaranteed not
494 * to.
495 */
496template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
497class Collector {
498 public:
499  explicit Collector(int initial_capacity = kMinCapacity)
500      : index_(0), size_(0) {
501    if (initial_capacity < kMinCapacity) {
502      initial_capacity = kMinCapacity;
503    }
504    current_chunk_ = Vector<T>::New(initial_capacity);
505  }
506
507  virtual ~Collector() {
508    // Free backing store (in reverse allocation order).
509    current_chunk_.Dispose();
510    for (int i = chunks_.length() - 1; i >= 0; i--) {
511      chunks_.at(i).Dispose();
512    }
513  }
514
515  // Add a single element.
516  inline void Add(T value) {
517    if (index_ >= current_chunk_.length()) {
518      Grow(1);
519    }
520    current_chunk_[index_] = value;
521    index_++;
522    size_++;
523  }
524
525  // Add a block of contiguous elements and return a Vector backed by the
526  // memory area.
527  // A basic Collector will keep this vector valid as long as the Collector
528  // is alive.
529  inline Vector<T> AddBlock(int size, T initial_value) {
530    ASSERT(size > 0);
531    if (size > current_chunk_.length() - index_) {
532      Grow(size);
533    }
534    T* position = current_chunk_.start() + index_;
535    index_ += size;
536    size_ += size;
537    for (int i = 0; i < size; i++) {
538      position[i] = initial_value;
539    }
540    return Vector<T>(position, size);
541  }
542
543
544  // Add a contiguous block of elements and return a vector backed
545  // by the added block.
546  // A basic Collector will keep this vector valid as long as the Collector
547  // is alive.
548  inline Vector<T> AddBlock(Vector<const T> source) {
549    if (source.length() > current_chunk_.length() - index_) {
550      Grow(source.length());
551    }
552    T* position = current_chunk_.start() + index_;
553    index_ += source.length();
554    size_ += source.length();
555    for (int i = 0; i < source.length(); i++) {
556      position[i] = source[i];
557    }
558    return Vector<T>(position, source.length());
559  }
560
561
562  // Write the contents of the collector into the provided vector.
563  void WriteTo(Vector<T> destination) {
564    ASSERT(size_ <= destination.length());
565    int position = 0;
566    for (int i = 0; i < chunks_.length(); i++) {
567      Vector<T> chunk = chunks_.at(i);
568      for (int j = 0; j < chunk.length(); j++) {
569        destination[position] = chunk[j];
570        position++;
571      }
572    }
573    for (int i = 0; i < index_; i++) {
574      destination[position] = current_chunk_[i];
575      position++;
576    }
577  }
578
579  // Allocate a single contiguous vector, copy all the collected
580  // elements to the vector, and return it.
581  // The caller is responsible for freeing the memory of the returned
582  // vector (e.g., using Vector::Dispose).
583  Vector<T> ToVector() {
584    Vector<T> new_store = Vector<T>::New(size_);
585    WriteTo(new_store);
586    return new_store;
587  }
588
589  // Resets the collector to be empty.
590  virtual void Reset();
591
592  // Total number of elements added to collector so far.
593  inline int size() { return size_; }
594
595 protected:
596  static const int kMinCapacity = 16;
597  List<Vector<T> > chunks_;
598  Vector<T> current_chunk_;  // Block of memory currently being written into.
599  int index_;  // Current index in current chunk.
600  int size_;  // Total number of elements in collector.
601
602  // Creates a new current chunk, and stores the old chunk in the chunks_ list.
603  void Grow(int min_capacity) {
604    ASSERT(growth_factor > 1);
605    int growth = current_chunk_.length() * (growth_factor - 1);
606    if (growth > max_growth) {
607      growth = max_growth;
608    }
609    int new_capacity = current_chunk_.length() + growth;
610    if (new_capacity < min_capacity) {
611      new_capacity = min_capacity + growth;
612    }
613    Vector<T> new_chunk = Vector<T>::New(new_capacity);
614    int new_index = PrepareGrow(new_chunk);
615    if (index_ > 0) {
616      chunks_.Add(current_chunk_.SubVector(0, index_));
617    } else {
618      // Can happen if the call to PrepareGrow moves everything into
619      // the new chunk.
620      current_chunk_.Dispose();
621    }
622    current_chunk_ = new_chunk;
623    index_ = new_index;
624    ASSERT(index_ + min_capacity <= current_chunk_.length());
625  }
626
627  // Before replacing the current chunk, give a subclass the option to move
628  // some of the current data into the new chunk. The function may update
629  // the current index_ value to represent data no longer in the current chunk.
630  // Returns the initial index of the new chunk (after copied data).
631  virtual int PrepareGrow(Vector<T> new_chunk)  {
632    return 0;
633  }
634};
635
636
637/*
638 * A collector that allows sequences of values to be guaranteed to
639 * stay consecutive.
640 * If the backing store grows while a sequence is active, the current
641 * sequence might be moved, but after the sequence is ended, it will
642 * not move again.
643 * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
644 * as well, if inside an active sequence where another element is added.
645 */
646template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
647class SequenceCollector : public Collector<T, growth_factor, max_growth> {
648 public:
649  explicit SequenceCollector(int initial_capacity)
650      : Collector<T, growth_factor, max_growth>(initial_capacity),
651        sequence_start_(kNoSequence) { }
652
653  virtual ~SequenceCollector() {}
654
655  void StartSequence() {
656    ASSERT(sequence_start_ == kNoSequence);
657    sequence_start_ = this->index_;
658  }
659
660  Vector<T> EndSequence() {
661    ASSERT(sequence_start_ != kNoSequence);
662    int sequence_start = sequence_start_;
663    sequence_start_ = kNoSequence;
664    if (sequence_start == this->index_) return Vector<T>();
665    return this->current_chunk_.SubVector(sequence_start, this->index_);
666  }
667
668  // Drops the currently added sequence, and all collected elements in it.
669  void DropSequence() {
670    ASSERT(sequence_start_ != kNoSequence);
671    int sequence_length = this->index_ - sequence_start_;
672    this->index_ = sequence_start_;
673    this->size_ -= sequence_length;
674    sequence_start_ = kNoSequence;
675  }
676
677  virtual void Reset() {
678    sequence_start_ = kNoSequence;
679    this->Collector<T, growth_factor, max_growth>::Reset();
680  }
681
682 private:
683  static const int kNoSequence = -1;
684  int sequence_start_;
685
686  // Move the currently active sequence to the new chunk.
687  virtual int PrepareGrow(Vector<T> new_chunk) {
688    if (sequence_start_ != kNoSequence) {
689      int sequence_length = this->index_ - sequence_start_;
690      // The new chunk is always larger than the current chunk, so there
691      // is room for the copy.
692      ASSERT(sequence_length < new_chunk.length());
693      for (int i = 0; i < sequence_length; i++) {
694        new_chunk[i] = this->current_chunk_[sequence_start_ + i];
695      }
696      this->index_ = sequence_start_;
697      sequence_start_ = 0;
698      return sequence_length;
699    }
700    return 0;
701  }
702};
703
704
705// Compare ASCII/16bit chars to ASCII/16bit chars.
706template <typename lchar, typename rchar>
707static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
708  const lchar* limit = lhs + chars;
709#ifdef V8_HOST_CAN_READ_UNALIGNED
710  if (sizeof(*lhs) == sizeof(*rhs)) {
711    // Number of characters in a uintptr_t.
712    static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs);  // NOLINT
713    while (lhs <= limit - kStepSize) {
714      if (*reinterpret_cast<const uintptr_t*>(lhs) !=
715          *reinterpret_cast<const uintptr_t*>(rhs)) {
716        break;
717      }
718      lhs += kStepSize;
719      rhs += kStepSize;
720    }
721  }
722#endif
723  while (lhs < limit) {
724    int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
725    if (r != 0) return r;
726    ++lhs;
727    ++rhs;
728  }
729  return 0;
730}
731
732
733// Calculate 10^exponent.
734static inline int TenToThe(int exponent) {
735  ASSERT(exponent <= 9);
736  ASSERT(exponent >= 1);
737  int answer = 10;
738  for (int i = 1; i < exponent; i++) answer *= 10;
739  return answer;
740}
741
742
743// The type-based aliasing rule allows the compiler to assume that pointers of
744// different types (for some definition of different) never alias each other.
745// Thus the following code does not work:
746//
747// float f = foo();
748// int fbits = *(int*)(&f);
749//
750// The compiler 'knows' that the int pointer can't refer to f since the types
751// don't match, so the compiler may cache f in a register, leaving random data
752// in fbits.  Using C++ style casts makes no difference, however a pointer to
753// char data is assumed to alias any other pointer.  This is the 'memcpy
754// exception'.
755//
756// Bit_cast uses the memcpy exception to move the bits from a variable of one
757// type of a variable of another type.  Of course the end result is likely to
758// be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
759// will completely optimize BitCast away.
760//
761// There is an additional use for BitCast.
762// Recent gccs will warn when they see casts that may result in breakage due to
763// the type-based aliasing rule.  If you have checked that there is no breakage
764// you can use BitCast to cast one pointer type to another.  This confuses gcc
765// enough that it can no longer see that you have cast one pointer type to
766// another thus avoiding the warning.
767
768// We need different implementations of BitCast for pointer and non-pointer
769// values. We use partial specialization of auxiliary struct to work around
770// issues with template functions overloading.
771template <class Dest, class Source>
772struct BitCastHelper {
773  STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
774
775  INLINE(static Dest cast(const Source& source)) {
776    Dest dest;
777    memcpy(&dest, &source, sizeof(dest));
778    return dest;
779  }
780};
781
782template <class Dest, class Source>
783struct BitCastHelper<Dest, Source*> {
784  INLINE(static Dest cast(Source* source)) {
785    return BitCastHelper<Dest, uintptr_t>::
786        cast(reinterpret_cast<uintptr_t>(source));
787  }
788};
789
790template <class Dest, class Source>
791INLINE(Dest BitCast(const Source& source));
792
793template <class Dest, class Source>
794inline Dest BitCast(const Source& source) {
795  return BitCastHelper<Dest, Source>::cast(source);
796}
797
798} }  // namespace v8::internal
799
800#endif  // V8_UTILS_H_
801