1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27//
28// Tests of the circular queue.
29
30#include "v8.h"
31#include "circular-queue-inl.h"
32#include "cctest.h"
33
34using i::SamplingCircularQueue;
35
36
37TEST(SamplingCircularQueue) {
38  typedef SamplingCircularQueue::Cell Record;
39  const int kRecordsPerChunk = 4;
40  SamplingCircularQueue scq(sizeof(Record),
41                            kRecordsPerChunk * sizeof(Record),
42                            3);
43
44  // Check that we are using non-reserved values.
45  // Fill up the first chunk.
46  CHECK_EQ(NULL, scq.StartDequeue());
47  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
48    Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
49    CHECK_NE(NULL, rec);
50    *rec = i;
51    CHECK_EQ(NULL, scq.StartDequeue());
52  }
53
54  // Fill up the second chunk. Consumption must still be unavailable.
55  CHECK_EQ(NULL, scq.StartDequeue());
56  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
57    Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
58    CHECK_NE(NULL, rec);
59    *rec = i;
60    CHECK_EQ(NULL, scq.StartDequeue());
61  }
62
63  Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
64  CHECK_NE(NULL, rec);
65  *rec = 20;
66  // Now as we started filling up the third chunk, consumption
67  // must become possible.
68  CHECK_NE(NULL, scq.StartDequeue());
69
70  // Consume the first chunk.
71  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
72    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
73    CHECK_NE(NULL, rec);
74    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
75    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
76    scq.FinishDequeue();
77    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
78  }
79  // Now consumption must not be possible, as consumer now polls
80  // the first chunk for emptinness.
81  CHECK_EQ(NULL, scq.StartDequeue());
82
83  scq.FlushResidualRecords();
84  // From now, consumer no more polls ahead of the current chunk,
85  // so it's possible to consume the second chunk.
86  CHECK_NE(NULL, scq.StartDequeue());
87  // Consume the second chunk
88  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
89    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
90    CHECK_NE(NULL, rec);
91    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
92    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
93    scq.FinishDequeue();
94    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
95  }
96  // Consumption must still be possible as the first cell of the
97  // last chunk is not clean.
98  CHECK_NE(NULL, scq.StartDequeue());
99}
100
101
102namespace {
103
104class ProducerThread: public i::Thread {
105 public:
106  typedef SamplingCircularQueue::Cell Record;
107
108  ProducerThread(SamplingCircularQueue* scq,
109                 int records_per_chunk,
110                 Record value,
111                 i::Semaphore* finished)
112      : Thread("producer"),
113        scq_(scq),
114        records_per_chunk_(records_per_chunk),
115        value_(value),
116        finished_(finished) { }
117
118  virtual void Run() {
119    for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
120      Record* rec = reinterpret_cast<Record*>(scq_->Enqueue());
121      CHECK_NE(NULL, rec);
122      *rec = i;
123    }
124
125    finished_->Signal();
126  }
127
128 private:
129  SamplingCircularQueue* scq_;
130  const int records_per_chunk_;
131  Record value_;
132  i::Semaphore* finished_;
133};
134
135}  // namespace
136
137TEST(SamplingCircularQueueMultithreading) {
138  // Emulate multiple VM threads working 'one thread at a time.'
139  // This test enqueues data from different threads. This corresponds
140  // to the case of profiling under Linux, where signal handler that
141  // does sampling is called in the context of different VM threads.
142
143  typedef ProducerThread::Record Record;
144  const int kRecordsPerChunk = 4;
145  SamplingCircularQueue scq(sizeof(Record),
146                            kRecordsPerChunk * sizeof(Record),
147                            3);
148  i::Semaphore* semaphore = i::OS::CreateSemaphore(0);
149  // Don't poll ahead, making possible to check data in the buffer
150  // immediately after enqueuing.
151  scq.FlushResidualRecords();
152
153  // Check that we are using non-reserved values.
154  ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
155  ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
156  ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
157
158  CHECK_EQ(NULL, scq.StartDequeue());
159  producer1.Start();
160  semaphore->Wait();
161  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
162    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
163    CHECK_NE(NULL, rec);
164    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
165    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
166    scq.FinishDequeue();
167    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
168  }
169
170  CHECK_EQ(NULL, scq.StartDequeue());
171  producer2.Start();
172  semaphore->Wait();
173  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
174    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
175    CHECK_NE(NULL, rec);
176    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
177    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
178    scq.FinishDequeue();
179    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
180  }
181
182  CHECK_EQ(NULL, scq.StartDequeue());
183  producer3.Start();
184  semaphore->Wait();
185  for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
186    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
187    CHECK_NE(NULL, rec);
188    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
189    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
190    scq.FinishDequeue();
191    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
192  }
193
194  CHECK_EQ(NULL, scq.StartDequeue());
195
196  delete semaphore;
197}
198