1// Copyright 2008 Google Inc. All Rights Reserved.
2
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6
7//      http://www.apache.org/licenses/LICENSE-2.0
8
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Thread-safe container of disk blocks
16
17#include <utility>
18
19// This file must work with autoconf on its public version,
20// so these includes are correct.
21#include "disk_blocks.h"
22
23DiskBlockTable::DiskBlockTable() {
24  nelems_ = 0;
25  pthread_mutex_init(&data_mutex_, NULL);
26  pthread_mutex_init(&parameter_mutex_, NULL);
27  pthread_cond_init(&data_condition_, NULL);
28}
29
30DiskBlockTable::~DiskBlockTable() {
31  CleanTable();
32  pthread_mutex_destroy(&data_mutex_);
33  pthread_mutex_destroy(&parameter_mutex_);
34  pthread_cond_destroy(&data_condition_);
35}
36
37void DiskBlockTable::CleanTable() {
38  pthread_mutex_lock(&data_mutex_);
39  for (map<int64, StorageData*>::iterator it =
40           addr_to_block_.begin(); it != addr_to_block_.end(); ++it) {
41    delete it->second;
42  }
43  addr_to_block_.erase(addr_to_block_.begin(), addr_to_block_.end());
44  nelems_ = 0;
45  pthread_cond_broadcast(&data_condition_);
46  pthread_mutex_unlock(&data_mutex_);
47}
48
49// 64-bit non-negative random number generator.  Stolen from
50// depot/google3/base/tracecontext_unittest.cc.
51int64 DiskBlockTable::Random64() {
52  int64 x = random();
53  x = (x << 30) ^ random();
54  x = (x << 30) ^ random();
55  if (x >= 0)
56    return x;
57  else
58    return -x;
59}
60
61int64 DiskBlockTable::NumElems() {
62  unsigned int nelems;
63  pthread_mutex_lock(&data_mutex_);
64  nelems = nelems_;
65  pthread_mutex_unlock(&data_mutex_);
66  return nelems;
67}
68
69void DiskBlockTable::InsertOnStructure(BlockData *block) {
70  int64 address = block->GetAddress();
71  StorageData *sd = new StorageData();
72  sd->block = block;
73  sd->pos = nelems_;
74  // Creating new block ...
75  pthread_mutex_lock(&data_mutex_);
76  if (pos_to_addr_.size() <= nelems_) {
77    pos_to_addr_.insert(pos_to_addr_.end(), address);
78  } else {
79    pos_to_addr_[nelems_] = address;
80  }
81  addr_to_block_.insert(std::make_pair(address, sd));
82  nelems_++;
83  pthread_cond_broadcast(&data_condition_);
84  pthread_mutex_unlock(&data_mutex_);
85}
86
87int DiskBlockTable::RemoveBlock(BlockData *block) {
88  // For write threads, check the reference counter and remove
89  // it from the structure.
90  int64 address = block->GetAddress();
91  AddrToBlockMap::iterator it = addr_to_block_.find(address);
92  int ret = 1;
93  if (it != addr_to_block_.end()) {
94    int curr_pos = it->second->pos;
95    int last_pos = nelems_ - 1;
96    AddrToBlockMap::iterator last_it = addr_to_block_.find(
97        pos_to_addr_[last_pos]);
98    sat_assert(nelems_ > 0);
99    sat_assert(last_it != addr_to_block_.end());
100    // Everything is fine, updating ...
101    pthread_mutex_lock(&data_mutex_);
102    pos_to_addr_[curr_pos] = pos_to_addr_[last_pos];
103    last_it->second->pos = curr_pos;
104    delete it->second;
105    addr_to_block_.erase(it);
106    nelems_--;
107    block->DecreaseReferenceCounter();
108    if (block->GetReferenceCounter() == 0)
109      delete block;
110    pthread_cond_broadcast(&data_condition_);
111    pthread_mutex_unlock(&data_mutex_);
112  } else {
113    ret = 0;
114  }
115  return ret;
116}
117
118int DiskBlockTable::ReleaseBlock(BlockData *block) {
119  // If is a random thread, just check the reference counter.
120  int ret = 1;
121  pthread_mutex_lock(&data_mutex_);
122  int references = block->GetReferenceCounter();
123  if (references > 0) {
124    if (references == 1)
125      delete block;
126    else
127      block->DecreaseReferenceCounter();
128  } else {
129    ret = 0;
130  }
131  pthread_mutex_unlock(&data_mutex_);
132  return ret;
133}
134
135BlockData *DiskBlockTable::GetRandomBlock() {
136  struct timespec ts;
137  struct timeval tp;
138  int result = 0;
139  gettimeofday(&tp, NULL);
140  ts.tv_sec  = tp.tv_sec;
141  ts.tv_nsec = tp.tv_usec * 1000;
142  ts.tv_sec += 2;  // Wait for 2 seconds.
143  pthread_mutex_lock(&data_mutex_);
144  while (!nelems_ && result != ETIMEDOUT) {
145    result = pthread_cond_timedwait(&data_condition_, &data_mutex_, &ts);
146  }
147  if (result == ETIMEDOUT) {
148    pthread_mutex_unlock(&data_mutex_);
149    return NULL;
150  } else {
151    int64 random_number = Random64();
152    int64 random_pos = random_number % nelems_;
153    int64 address = pos_to_addr_[random_pos];
154    AddrToBlockMap::const_iterator it = addr_to_block_.find(address);
155    sat_assert(it != addr_to_block_.end());
156    BlockData *b = it->second->block;
157    // A block is returned only if its content is written on disk.
158    if (b->BlockIsInitialized()) {
159      b->IncreaseReferenceCounter();
160    } else {
161      b = NULL;
162    }
163    pthread_mutex_unlock(&data_mutex_);
164    return b;
165  }
166}
167
168void DiskBlockTable::SetParameters(
169    int sector_size, int write_block_size, int64 device_sectors,
170    int64 segment_size, string device_name) {
171  pthread_mutex_lock(&parameter_mutex_);
172  sector_size_ = sector_size;
173  write_block_size_ = write_block_size;
174  device_sectors_ = device_sectors;
175  segment_size_ = segment_size;
176  device_name_ = device_name;
177  CleanTable();
178  pthread_mutex_unlock(&parameter_mutex_);
179}
180
181BlockData *DiskBlockTable::GetUnusedBlock(int64 segment) {
182  int64 sector = 0;
183  BlockData *block = new BlockData();
184
185  bool good_sequence = false;
186  int num_sectors;
187
188  if (block == NULL) {
189    logprintf(0, "Process Error: Unable to allocate memory "
190              "for sector data for disk %s.\n", device_name_.c_str());
191    return NULL;
192  }
193
194  pthread_mutex_lock(&parameter_mutex_);
195
196  sat_assert(device_sectors_ != 0);
197
198  // Align the first sector with the beginning of a write block
199  num_sectors = write_block_size_ / sector_size_;
200
201  for (int i = 0; i < kBlockRetry && !good_sequence; i++) {
202    good_sequence = true;
203
204    // Use the entire disk or a small segment of the disk to allocate the first
205    // sector in the block from.
206
207    if (segment_size_ == -1) {
208      sector = (Random64() & 0x7FFFFFFFFFFFFFFFLL) % (
209          device_sectors_ / num_sectors);
210      sector *= num_sectors;
211    } else {
212      sector = (Random64() & 0x7FFFFFFFFFFFFFFFLL) % (
213          segment_size_ / num_sectors);
214      sector *= num_sectors;
215      sector += segment * segment_size_;
216
217      // Make sure the block is within the segment.
218      if (sector + num_sectors > (segment + 1) * segment_size_) {
219        good_sequence = false;
220        continue;
221      }
222    }
223    // Make sure the entire block is in range.
224    if (sector + num_sectors > device_sectors_) {
225      good_sequence = false;
226      continue;
227    }
228    // Check to see if the block is free. Since the blocks are
229    // now aligned to the write_block_size, it is not necessary
230    // to check each sector, just the first block (a sector
231    // overlap will never occur).
232
233    pthread_mutex_lock(&data_mutex_);
234    if (addr_to_block_.find(sector) != addr_to_block_.end()) {
235      good_sequence = false;
236    }
237    pthread_mutex_unlock(&data_mutex_);
238  }
239
240  if (good_sequence) {
241    block->SetParameters(sector, write_block_size_);
242    block->IncreaseReferenceCounter();
243    InsertOnStructure(block);
244  } else {
245    // No contiguous sequence of num_sectors sectors was found within
246    // kBlockRetry iterations so return an error value.
247    delete block;
248    block = NULL;
249  }
250  pthread_mutex_unlock(&parameter_mutex_);
251
252  return block;
253}
254
255// BlockData
256
257BlockData::BlockData() {
258  addr_ = 0;
259  size_ = 0;
260  references_ = 0;
261  initialized_ = false;
262  pthread_mutex_init(&data_mutex_, NULL);
263}
264
265BlockData::~BlockData() {
266  pthread_mutex_destroy(&data_mutex_);
267}
268
269void BlockData::SetParameters(int64 address, int64 size) {
270  addr_ = address;
271  size_ = size;
272}
273
274void BlockData::IncreaseReferenceCounter() {
275  references_++;
276}
277
278void BlockData::DecreaseReferenceCounter() {
279  references_--;
280}
281
282int BlockData::GetReferenceCounter() {
283  return references_;
284}
285
286void BlockData::SetBlockAsInitialized() {
287  pthread_mutex_lock(&data_mutex_);
288  initialized_ = true;
289  pthread_mutex_unlock(&data_mutex_);
290}
291
292bool BlockData::BlockIsInitialized() {
293  pthread_mutex_lock(&data_mutex_);
294  bool initialized = initialized_;
295  pthread_mutex_unlock(&data_mutex_);
296  return initialized;
297}
298
299int64 BlockData::GetAddress() {
300  return addr_;
301}
302
303int64 BlockData::GetSize() {
304  return size_;
305}
306
307Pattern *BlockData::GetPattern() {
308  return pattern_;
309}
310
311void BlockData::SetPattern(Pattern *p) {
312  pattern_ = p;
313}
314