1/**
2 * @file db_manage.c
3 * Management of a DB file
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author Philippe Elie
9 */
10
11#define _GNU_SOURCE
12
13#include <stdlib.h>
14#ifdef ANDROID
15#include <fcntl.h>
16#else
17#include <sys/fcntl.h>
18#endif
19#include <sys/mman.h>
20#include <sys/types.h>
21#include <sys/stat.h>
22#include <unistd.h>
23#include <errno.h>
24#include <string.h>
25#include <stdio.h>
26
27#include "odb.h"
28#include "op_string.h"
29#include "op_libiberty.h"
30
31
32static __inline odb_descr_t * odb_to_descr(odb_data_t * data)
33{
34	return (odb_descr_t *)(((char*)data->base_memory) + data->sizeof_header);
35}
36
37
38static __inline odb_node_t * odb_to_node_base(odb_data_t * data)
39{
40	return (odb_node_t *)(((char *)data->base_memory) + data->offset_node);
41}
42
43
44static __inline odb_index_t * odb_to_hash_base(odb_data_t * data)
45{
46	return (odb_index_t *)(((char *)data->base_memory) +
47				data->offset_node +
48				(data->descr->size * sizeof(odb_node_t)));
49}
50
51
52/**
53 * return the number of bytes used by hash table, node table and header.
54 */
55static unsigned int tables_size(odb_data_t const * data, odb_node_nr_t node_nr)
56{
57	size_t size;
58
59	size = node_nr * (sizeof(odb_index_t) * BUCKET_FACTOR);
60	size += node_nr * sizeof(odb_node_t);
61	size += data->offset_node;
62
63	return size;
64}
65
66
67int odb_grow_hashtable(odb_data_t * data)
68{
69	unsigned int old_file_size;
70	unsigned int new_file_size;
71	unsigned int pos;
72	void * new_map;
73
74	old_file_size = tables_size(data, data->descr->size);
75	new_file_size = tables_size(data, data->descr->size * 2);
76
77	if (ftruncate(data->fd, new_file_size))
78		return 1;
79
80#ifdef MISSING_MREMAP
81	new_map = mmap(0, new_file_size, PROT_READ | PROT_WRITE,
82		MAP_SHARED, data->fd, 0);
83#else
84	new_map = mremap(data->base_memory,
85			 old_file_size, new_file_size, MREMAP_MAYMOVE);
86#endif
87
88	if (new_map == MAP_FAILED)
89		return 1;
90
91#ifdef MISSING_MREMAP
92	munmap(data->base_memory, old_file_size);
93#endif
94
95	data->base_memory = new_map;
96	data->descr = odb_to_descr(data);
97	data->descr->size *= 2;
98	data->node_base = odb_to_node_base(data);
99	data->hash_base = odb_to_hash_base(data);
100	data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
101
102	/* rebuild the hash table, node zero is never used. This works
103	 * because layout of file is node table then hash table,
104	 * sizeof(node) > sizeof(bucket) and when we grow table we
105	 * double size ==> old hash table and new hash table can't
106	 * overlap so on the new hash table is entirely in the new
107	 * memory area (the grown part) and we know the new hash
108	 * hash table is zeroed. That's why we don't need to zero init
109	 * the new table */
110	/* OK: the above is not exact
111	 * if BUCKET_FACTOR < sizeof(bd_node_t) / sizeof(bd_node_nr_t)
112	 * all things are fine and we don't need to init the hash
113	 * table because in this case the new hash table is completely
114	 * inside the new growed part. Avoiding to touch this memory is
115	 * useful.
116	 */
117#if 0
118	for (pos = 0 ; pos < data->descr->size*BUCKET_FACTOR ; ++pos)
119		data->hash_base[pos] = 0;
120#endif
121
122	for (pos = 1; pos < data->descr->current_size; ++pos) {
123		odb_node_t * node = &data->node_base[pos];
124		size_t index = odb_do_hash(data, node->key);
125		node->next = data->hash_base[index];
126		data->hash_base[index] = pos;
127	}
128
129	return 0;
130}
131
132
133void odb_init(odb_t * odb)
134{
135	odb->data = NULL;
136}
137
138
139/* the default number of page, calculated to fit in 4096 bytes */
140#define DEFAULT_NODE_NR(offset_node)	128
141#define FILES_HASH_SIZE                 512
142
143static struct list_head files_hash[FILES_HASH_SIZE];
144
145
146static void init_hash()
147{
148	size_t i;
149	for (i = 0; i < FILES_HASH_SIZE; ++i)
150		list_init(&files_hash[i]);
151}
152
153
154static odb_data_t *
155find_samples_data(size_t hash, char const * filename)
156{
157	struct list_head * pos;
158
159	/* FIXME: maybe an initial init routine ? */
160	if (files_hash[0].next == NULL) {
161		init_hash();
162		return NULL;
163	}
164
165	list_for_each(pos, &files_hash[hash]) {
166		odb_data_t * entry = list_entry(pos, odb_data_t, list);
167		if (strcmp(entry->filename, filename) == 0)
168			return entry;
169	}
170
171	return NULL;
172}
173
174
175int odb_open(odb_t * odb, char const * filename, enum odb_rw rw,
176	     size_t sizeof_header)
177{
178	struct stat stat_buf;
179	odb_node_nr_t nr_node;
180	odb_data_t * data;
181	size_t hash;
182	int err = 0;
183
184	int flags = (rw == ODB_RDWR) ? (O_CREAT | O_RDWR) : O_RDONLY;
185	int mmflags = (rw == ODB_RDWR) ? (PROT_READ | PROT_WRITE) : PROT_READ;
186
187	hash = op_hash_string(filename) % FILES_HASH_SIZE;
188	data = find_samples_data(hash, filename);
189	if (data) {
190		odb->data = data;
191		data->ref_count++;
192		return 0;
193	}
194
195	data = xmalloc(sizeof(odb_data_t));
196	memset(data, '\0', sizeof(odb_data_t));
197	list_init(&data->list);
198	data->offset_node = sizeof_header + sizeof(odb_descr_t);
199	data->sizeof_header = sizeof_header;
200	data->ref_count = 1;
201	data->filename = xstrdup(filename);
202
203	data->fd = open(filename, flags, 0644);
204	if (data->fd < 0) {
205		err = errno;
206		goto out;
207	}
208
209	if (fstat(data->fd, &stat_buf)) {
210		err = errno;
211		goto fail;
212	}
213
214	if (stat_buf.st_size == 0) {
215		size_t file_size;
216
217		if (rw == ODB_RDONLY) {
218			err = EIO;
219			goto fail;
220		}
221
222		nr_node = DEFAULT_NODE_NR(data->offset_node);
223
224		file_size = tables_size(data, nr_node);
225		if (ftruncate(data->fd, file_size)) {
226			err = errno;
227			goto fail;
228		}
229	} else {
230		/* Calculate nr node allowing a sanity check later */
231		nr_node = (stat_buf.st_size - data->offset_node) /
232			((sizeof(odb_index_t) * BUCKET_FACTOR) + sizeof(odb_node_t));
233	}
234
235	data->base_memory = mmap(0, tables_size(data, nr_node), mmflags,
236				MAP_SHARED, data->fd, 0);
237
238	if (data->base_memory == MAP_FAILED) {
239		err = errno;
240		goto fail;
241	}
242
243	data->descr = odb_to_descr(data);
244
245	if (stat_buf.st_size == 0) {
246		data->descr->size = nr_node;
247		/* page zero is not used */
248		data->descr->current_size = 1;
249	} else {
250		/* file already exist, sanity check nr node */
251		if (nr_node != data->descr->size) {
252			err = EINVAL;
253			goto fail_unmap;
254		}
255	}
256
257	data->hash_base = odb_to_hash_base(data);
258	data->node_base = odb_to_node_base(data);
259	data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
260
261	list_add(&data->list, &files_hash[hash]);
262	odb->data = data;
263out:
264	return err;
265fail_unmap:
266	munmap(data->base_memory, tables_size(data, nr_node));
267fail:
268	close(data->fd);
269	free(data->filename);
270	free(data);
271	odb->data = NULL;
272	goto out;
273}
274
275
276void odb_close(odb_t * odb)
277{
278	odb_data_t * data = odb->data;
279
280	if (data) {
281		data->ref_count--;
282		if (data->ref_count == 0) {
283			size_t size = tables_size(data, data->descr->size);
284			list_del(&data->list);
285			munmap(data->base_memory, size);
286			if (data->fd >= 0)
287				close(data->fd);
288			free(data->filename);
289			free(data);
290			odb->data = NULL;
291		}
292	}
293}
294
295
296int odb_open_count(odb_t const * odb)
297{
298	if (!odb->data)
299		return 0;
300	return odb->data->ref_count;
301}
302
303
304void * odb_get_data(odb_t * odb)
305{
306	return odb->data->base_memory;
307}
308
309
310void odb_sync(odb_t const * odb)
311{
312	odb_data_t * data = odb->data;
313	size_t size;
314
315	if (!data)
316		return;
317
318	size = tables_size(data, data->descr->size);
319	msync(data->base_memory, size, MS_ASYNC);
320}
321