db_manage.c revision 10e23eebca4175a8dfe3a788b2bebacb1fcfce54
1/**
2 * @file db_manage.c
3 * Management of a DB file
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author Philippe Elie
9 */
10
11#define _GNU_SOURCE
12
13#include <stdlib.h>
14#ifndef ANDROID
15#include <sys/fcntl.h>
16#else
17#include <fcntl.h>
18#endif
19#include <sys/mman.h>
20#include <sys/types.h>
21#include <sys/stat.h>
22#include <unistd.h>
23#include <errno.h>
24#include <string.h>
25#include <stdio.h>
26
27#include "odb.h"
28#include "op_string.h"
29#include "op_libiberty.h"
30
31
32static __inline odb_descr_t * odb_to_descr(odb_data_t * data)
33{
34	return (odb_descr_t *)(((char*)data->base_memory) + data->sizeof_header);
35}
36
37
38static __inline odb_node_t * odb_to_node_base(odb_data_t * data)
39{
40	return (odb_node_t *)(((char *)data->base_memory) + data->offset_node);
41}
42
43
44static __inline odb_index_t * odb_to_hash_base(odb_data_t * data)
45{
46	return (odb_index_t *)(((char *)data->base_memory) +
47				data->offset_node +
48				(data->descr->size * sizeof(odb_node_t)));
49}
50
51
52/**
53 * return the number of bytes used by hash table, node table and header.
54 */
55static unsigned int tables_size(odb_data_t const * data, odb_node_nr_t node_nr)
56{
57	size_t size;
58
59	size = node_nr * (sizeof(odb_index_t) * BUCKET_FACTOR);
60	size += node_nr * sizeof(odb_node_t);
61	size += data->offset_node;
62
63	return size;
64}
65
66
67int odb_grow_hashtable(odb_data_t * data)
68{
69	unsigned int old_file_size;
70	unsigned int new_file_size;
71	unsigned int pos;
72	void * new_map;
73
74	old_file_size = tables_size(data, data->descr->size);
75	new_file_size = tables_size(data, data->descr->size * 2);
76
77	if (ftruncate(data->fd, new_file_size))
78		return 1;
79
80	new_map = mremap(data->base_memory,
81			 old_file_size, new_file_size, MREMAP_MAYMOVE);
82
83	if (new_map == MAP_FAILED)
84		return 1;
85
86	data->base_memory = new_map;
87	data->descr = odb_to_descr(data);
88	data->descr->size *= 2;
89	data->node_base = odb_to_node_base(data);
90	data->hash_base = odb_to_hash_base(data);
91	data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
92
93	/* rebuild the hash table, node zero is never used. This works
94	 * because layout of file is node table then hash table,
95	 * sizeof(node) > sizeof(bucket) and when we grow table we
96	 * double size ==> old hash table and new hash table can't
97	 * overlap so on the new hash table is entirely in the new
98	 * memory area (the grown part) and we know the new hash
99	 * hash table is zeroed. That's why we don't need to zero init
100	 * the new table */
101	/* OK: the above is not exact
102	 * if BUCKET_FACTOR < sizeof(bd_node_t) / sizeof(bd_node_nr_t)
103	 * all things are fine and we don't need to init the hash
104	 * table because in this case the new hash table is completely
105	 * inside the new growed part. Avoiding to touch this memory is
106	 * useful.
107	 */
108#if 0
109	for (pos = 0 ; pos < data->descr->size*BUCKET_FACTOR ; ++pos)
110		data->hash_base[pos] = 0;
111#endif
112
113	for (pos = 1; pos < data->descr->current_size; ++pos) {
114		odb_node_t * node = &data->node_base[pos];
115		size_t index = odb_do_hash(data, node->key);
116		node->next = data->hash_base[index];
117		data->hash_base[index] = pos;
118	}
119
120	return 0;
121}
122
123
124void odb_init(odb_t * odb)
125{
126	odb->data = NULL;
127}
128
129
130/* the default number of page, calculated to fit in 4096 bytes */
131#define DEFAULT_NODE_NR(offset_node)	128
132#define FILES_HASH_SIZE                 512
133
134static struct list_head files_hash[FILES_HASH_SIZE];
135
136
137static void init_hash()
138{
139	size_t i;
140	for (i = 0; i < FILES_HASH_SIZE; ++i)
141		list_init(&files_hash[i]);
142}
143
144
145static odb_data_t *
146find_samples_data(size_t hash, char const * filename)
147{
148	struct list_head * pos;
149
150	/* FIXME: maybe an initial init routine ? */
151	if (files_hash[0].next == NULL) {
152		init_hash();
153		return NULL;
154	}
155
156	list_for_each(pos, &files_hash[hash]) {
157		odb_data_t * entry = list_entry(pos, odb_data_t, list);
158		if (strcmp(entry->filename, filename) == 0)
159			return entry;
160	}
161
162	return NULL;
163}
164
165
166int odb_open(odb_t * odb, char const * filename, enum odb_rw rw,
167	     size_t sizeof_header)
168{
169	struct stat stat_buf;
170	odb_node_nr_t nr_node;
171	odb_data_t * data;
172	size_t hash;
173	int err = 0;
174
175	int flags = (rw == ODB_RDWR) ? (O_CREAT | O_RDWR) : O_RDONLY;
176	int mmflags = (rw == ODB_RDWR) ? (PROT_READ | PROT_WRITE) : PROT_READ;
177
178	hash = op_hash_string(filename) % FILES_HASH_SIZE;
179	data = find_samples_data(hash, filename);
180	if (data) {
181		odb->data = data;
182		data->ref_count++;
183		return 0;
184	}
185
186	data = xmalloc(sizeof(odb_data_t));
187	memset(data, '\0', sizeof(odb_data_t));
188	list_init(&data->list);
189	data->offset_node = sizeof_header + sizeof(odb_descr_t);
190	data->sizeof_header = sizeof_header;
191	data->ref_count = 1;
192	data->filename = xstrdup(filename);
193
194	data->fd = open(filename, flags, 0644);
195	if (data->fd < 0) {
196		err = errno;
197		goto out;
198	}
199
200	if (fstat(data->fd, &stat_buf)) {
201		err = errno;
202		goto fail;
203	}
204
205	if (stat_buf.st_size == 0) {
206		size_t file_size;
207
208		if (rw == ODB_RDONLY) {
209			err = EIO;
210			goto fail;
211		}
212
213		nr_node = DEFAULT_NODE_NR(data->offset_node);
214
215		file_size = tables_size(data, nr_node);
216		if (ftruncate(data->fd, file_size)) {
217			err = errno;
218			goto fail;
219		}
220	} else {
221		/* Calculate nr node allowing a sanity check later */
222		nr_node = (stat_buf.st_size - data->offset_node) /
223			((sizeof(odb_index_t) * BUCKET_FACTOR) + sizeof(odb_node_t));
224	}
225
226	data->base_memory = mmap(0, tables_size(data, nr_node), mmflags,
227				MAP_SHARED, data->fd, 0);
228
229	if (data->base_memory == MAP_FAILED) {
230		err = errno;
231		goto fail;
232	}
233
234	data->descr = odb_to_descr(data);
235
236	if (stat_buf.st_size == 0) {
237		data->descr->size = nr_node;
238		/* page zero is not used */
239		data->descr->current_size = 1;
240	} else {
241		/* file already exist, sanity check nr node */
242		if (nr_node != data->descr->size) {
243			err = EINVAL;
244			goto fail_unmap;
245		}
246	}
247
248	data->hash_base = odb_to_hash_base(data);
249	data->node_base = odb_to_node_base(data);
250	data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
251
252	list_add(&data->list, &files_hash[hash]);
253	odb->data = data;
254out:
255	return err;
256fail_unmap:
257	munmap(data->base_memory, tables_size(data, nr_node));
258fail:
259	close(data->fd);
260	free(data->filename);
261	free(data);
262	odb->data = NULL;
263	goto out;
264}
265
266
267void odb_close(odb_t * odb)
268{
269	odb_data_t * data = odb->data;
270
271	if (data) {
272		data->ref_count--;
273		if (data->ref_count == 0) {
274			size_t size = tables_size(data, data->descr->size);
275			list_del(&data->list);
276			munmap(data->base_memory, size);
277			if (data->fd >= 0)
278				close(data->fd);
279			free(data->filename);
280			free(data);
281			odb->data = NULL;
282		}
283	}
284}
285
286
287int odb_open_count(odb_t const * odb)
288{
289	if (!odb->data)
290		return 0;
291	return odb->data->ref_count;
292}
293
294
295void * odb_get_data(odb_t * odb)
296{
297	return odb->data->base_memory;
298}
299
300
301void odb_sync(odb_t const * odb)
302{
303	odb_data_t * data = odb->data;
304	size_t size;
305
306	if (!data)
307		return;
308
309	size = tables_size(data, data->descr->size);
310	msync(data->base_memory, size, MS_ASYNC);
311}
312