allocate.c revision 8aef66d2125af8de7672a12895276802fcc1948f
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdio.h>
18#include <stdlib.h>
19
20#include "ext4_utils.h"
21#include "allocate.h"
22#include "backed_block.h"
23#include "ext_utils.h"
24#include "ext4.h"
25
26struct region_list {
27	struct region *first;
28	struct region *last;
29	struct region *iter;
30	u32 partial_iter;
31};
32
33struct block_allocation {
34	struct region_list list;
35	struct region_list oob_list;
36};
37
38struct region {
39	u32 block;
40	u32 len;
41	int bg;
42	struct region *next;
43	struct region *prev;
44};
45
46struct block_group_info {
47	u32 first_block;
48	int header_blocks;
49	int data_blocks_used;
50	int has_superblock;
51	u8 *bitmaps;
52	u8 *block_bitmap;
53	u8 *inode_bitmap;
54	u8 *inode_table;
55	u32 free_blocks;
56	u32 first_free_block;
57	u32 free_inodes;
58	u32 first_free_inode;
59	u16 used_dirs;
60};
61
62struct block_allocation *create_allocation()
63{
64	struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
65	alloc->list.first = NULL;
66	alloc->list.last = NULL;
67	alloc->oob_list.first = NULL;
68	alloc->oob_list.last = NULL;
69	alloc->list.iter = NULL;
70	alloc->list.partial_iter = 0;
71	alloc->oob_list.iter = NULL;
72	alloc->oob_list.partial_iter = 0;
73	return alloc;
74}
75
76static void region_list_remove(struct region_list *list, struct region *reg)
77{
78	if (reg->prev)
79		reg->prev->next = reg->next;
80
81	if (reg->next)
82		reg->next->prev = reg->prev;
83
84	if (list->first == reg)
85		list->first = reg->next;
86
87	if (list->last == reg)
88		list->last = reg->prev;
89
90	reg->next = NULL;
91	reg->prev = NULL;
92}
93
94static void region_list_append(struct region_list *list, struct region *reg)
95{
96	if (list->first == NULL) {
97		list->first = reg;
98		list->last = reg;
99		list->iter = reg;
100		list->partial_iter = 0;
101		reg->prev = NULL;
102	} else {
103		list->last->next = reg;
104		reg->prev = list->last;
105		list->last = reg;
106	}
107	reg->next = NULL;
108}
109
110#if 0
111static void dump_starting_from(struct region *reg)
112{
113	for (; reg; reg = reg->next) {
114		printf("%p: Blocks %d-%d (%d)\n", reg,
115				reg->bg * info.blocks_per_group + reg->block,
116				reg->bg * info.blocks_per_group + reg->block + reg->len - 1,
117				reg->len);
118	}
119}
120
121static void dump_region_lists(struct block_allocation *alloc) {
122
123	printf("Main list:\n");
124	dump_starting_from(alloc->list.first);
125
126	printf("OOB list:\n");
127	dump_starting_from(alloc->oob_list.first);
128}
129#endif
130
131void append_region(struct block_allocation *alloc,
132		u32 block, u32 len, int bg_num)
133{
134	struct region *reg;
135	reg = malloc(sizeof(struct region));
136	reg->block = block;
137	reg->len = len;
138	reg->bg = bg_num;
139	reg->next = NULL;
140
141	region_list_append(&alloc->list, reg);
142}
143
144static void allocate_bg_inode_table(struct block_group_info *bg)
145{
146	if (bg->inode_table != NULL)
147		return;
148
149	u32 block = bg->first_block + 2;
150
151	if (bg->has_superblock)
152		block += aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks + 1;
153
154	bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
155	if (bg->inode_table == NULL)
156		critical_error_errno("calloc");
157
158	queue_data_block(bg->inode_table, aux_info.inode_table_blocks
159			* info.block_size, block);
160}
161
162static int bitmap_set_bit(u8 *bitmap, u32 bit)
163{
164	if (bitmap[bit / 8] & 1 << (bit % 8))
165		return 1;
166
167	bitmap[bit / 8] |= 1 << (bit % 8);
168	return 0;
169}
170
171static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
172{
173	int ret = bitmap[bit / 8];
174	bitmap[bit / 8] = 0xFF;
175	return ret;
176}
177
178/* Marks a the first num_blocks blocks in a block group as used, and accounts
179 for them in the block group free block info. */
180static int reserve_blocks(struct block_group_info *bg, u32 start, u32 num)
181{
182	unsigned int i = 0;
183
184	u32 block = start;
185	if (num > bg->free_blocks)
186		return -1;
187
188	for (i = 0; i < num && block % 8 != 0; i++, block++) {
189		if (bitmap_set_bit(bg->block_bitmap, block)) {
190			error("attempted to reserve already reserved block");
191			return -1;
192		}
193	}
194
195	for (; i + 8 <= (num & ~7); i += 8, block += 8) {
196		if (bitmap_set_8_bits(bg->block_bitmap, block)) {
197			error("attempted to reserve already reserved block");
198			return -1;
199		}
200	}
201
202	for (; i < num; i++, block++) {
203		if (bitmap_set_bit(bg->block_bitmap, block)) {
204			error("attempted to reserve already reserved block");
205			return -1;
206		}
207	}
208
209	bg->free_blocks -= num;
210	if (start == bg->first_free_block)
211		bg->first_free_block = start + num;
212
213	return 0;
214}
215
216static void free_blocks(struct block_group_info *bg, u32 num_blocks)
217{
218	unsigned int i;
219	u32 block = bg->first_free_block - 1;
220	for (i = 0; i < num_blocks; i++, block--)
221		bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
222	bg->free_blocks += num_blocks;
223	bg->first_free_block -= num_blocks;
224}
225
226/* Reduces an existing allocation by len blocks by return the last blocks
227   to the free pool in their block group. Assumes that the blocks being
228   returned were the last ones allocated out of the block group */
229void reduce_allocation(struct block_allocation *alloc, u32 len)
230{
231	while (len) {
232		struct region *last_reg = alloc->list.last;
233
234		if (last_reg->len > len) {
235			free_blocks(&aux_info.bgs[last_reg->bg], len);
236			last_reg->len -= len;
237			len = 0;
238		} else {
239			struct region *reg = alloc->list.last->prev;
240			free_blocks(&aux_info.bgs[last_reg->bg], last_reg->len);
241			len -= last_reg->len;
242			if (reg) {
243				reg->next = NULL;
244			} else {
245				alloc->list.first = NULL;
246				alloc->list.last = NULL;
247				alloc->list.iter = NULL;
248				alloc->list.partial_iter = 0;
249			}
250			free(last_reg);
251		}
252	}
253}
254
255static void init_bg(struct block_group_info *bg, unsigned int i)
256{
257	int header_blocks = 2 + aux_info.inode_table_blocks;
258
259	bg->has_superblock = ext4_bg_has_super_block(i);
260
261	if (bg->has_superblock)
262		header_blocks += 1 + aux_info.bg_desc_blocks + aux_info.bg_desc_reserve_blocks;
263
264	bg->bitmaps = calloc(info.block_size, 2);
265	bg->block_bitmap = bg->bitmaps;
266	bg->inode_bitmap = bg->bitmaps + info.block_size;
267
268	bg->header_blocks = header_blocks;
269	bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
270
271	u32 block = bg->first_block;
272	if (bg->has_superblock)
273		block += 1 + aux_info.bg_desc_blocks +  aux_info.bg_desc_reserve_blocks;
274	queue_data_block(bg->bitmaps, 2 * info.block_size, block);
275
276	bg->data_blocks_used = 0;
277	bg->free_blocks = info.blocks_per_group;
278	bg->first_free_block = 0;
279	bg->free_inodes = info.inodes_per_group;
280	bg->first_free_inode = 1;
281
282	if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
283		error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
284
285	u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
286	if (overrun > 0)
287		reserve_blocks(bg, info.blocks_per_group - overrun, overrun);
288}
289
290void block_allocator_init()
291{
292	unsigned int i;
293
294	aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
295	if (aux_info.bgs == NULL)
296		critical_error_errno("calloc");
297
298	for (i = 0; i < aux_info.groups; i++)
299		init_bg(&aux_info.bgs[i], i);
300}
301
302void block_allocator_free()
303{
304	unsigned int i;
305
306	for (i = 0; i < aux_info.groups; i++) {
307		free(aux_info.bgs[i].bitmaps);
308		free(aux_info.bgs[i].inode_table);
309	}
310	free(aux_info.bgs);
311}
312
313static u32 ext4_allocate_blocks_from_block_group(u32 len, int bg_num)
314{
315	if (get_free_blocks(bg_num) < len)
316		return EXT4_ALLOCATE_FAILED;
317
318	u32 block = aux_info.bgs[bg_num].first_free_block;
319	struct block_group_info *bg = &aux_info.bgs[bg_num];
320	if (reserve_blocks(bg, bg->first_free_block, len) < 0) {
321		error("failed to reserve %u blocks in block group %u\n", len, bg_num);
322		return EXT4_ALLOCATE_FAILED;
323	}
324
325	aux_info.bgs[bg_num].data_blocks_used += len;
326
327	return bg->first_block + block;
328}
329
330static struct region *ext4_allocate_contiguous_blocks(u32 len)
331{
332	unsigned int i;
333	struct region *reg;
334
335	for (i = 0; i < aux_info.groups; i++) {
336		u32 block = ext4_allocate_blocks_from_block_group(len, i);
337
338		if (block != EXT4_ALLOCATE_FAILED) {
339			reg = malloc(sizeof(struct region));
340			reg->block = block;
341			reg->len = len;
342			reg->next = NULL;
343			reg->prev = NULL;
344			reg->bg = i;
345			return reg;
346		}
347	}
348
349	return NULL;
350}
351
352/* Allocate a single block and return its block number */
353u32 allocate_block()
354{
355	unsigned int i;
356	for (i = 0; i < aux_info.groups; i++) {
357		u32 block = ext4_allocate_blocks_from_block_group(1, i);
358
359		if (block != EXT4_ALLOCATE_FAILED)
360			return block;
361	}
362
363	return EXT4_ALLOCATE_FAILED;
364}
365
366static struct region *ext4_allocate_partial(u32 len)
367{
368	unsigned int i;
369	struct region *reg;
370
371	for (i = 0; i < aux_info.groups; i++) {
372		if (aux_info.bgs[i].data_blocks_used == 0) {
373			u32 bg_len = aux_info.bgs[i].free_blocks;
374			u32 block;
375
376			if (len <= bg_len) {
377				/* If the requested length would fit in a block group,
378				 use the regular allocator to try to fit it in a partially
379				 used block group */
380				bg_len = len;
381				reg = ext4_allocate_contiguous_blocks(len);
382			} else {
383				block = ext4_allocate_blocks_from_block_group(bg_len, i);
384
385				if (block == EXT4_ALLOCATE_FAILED) {
386					error("failed to allocate %d blocks in block group %d", bg_len, i);
387					return NULL;
388				}
389
390				reg = malloc(sizeof(struct region));
391				reg->block = block;
392				reg->len = bg_len;
393				reg->next = NULL;
394				reg->prev = NULL;
395				reg->bg = i;
396			}
397
398			return reg;
399		}
400	}
401	return NULL;
402}
403
404static struct region *ext4_allocate_multiple_contiguous_blocks(u32 len)
405{
406	struct region *first_reg = NULL;
407	struct region *prev_reg = NULL;
408	struct region *reg;
409
410	while (len > 0) {
411		reg = ext4_allocate_partial(len);
412		if (reg == NULL)
413			return NULL;
414
415		if (first_reg == NULL)
416			first_reg = reg;
417
418		if (prev_reg) {
419			prev_reg->next = reg;
420			reg->prev = prev_reg;
421		}
422
423		prev_reg = reg;
424		len -= reg->len;
425	}
426
427	return first_reg;
428}
429
430struct region *do_allocate(u32 len)
431{
432	struct region *reg = ext4_allocate_contiguous_blocks(len);
433
434	if (reg == NULL)
435		reg = ext4_allocate_multiple_contiguous_blocks(len);
436
437	return reg;
438}
439
440/* Allocate len blocks.  The blocks may be spread across multiple block groups,
441   and are returned in a linked list of the blocks in each block group.  The
442   allocation algorithm is:
443      1.  Try to allocate the entire block len in each block group
444      2.  If the request doesn't fit in any block group, allocate as many
445          blocks as possible into each block group that is completely empty
446      3.  Put the last set of blocks in the first block group they fit in
447*/
448struct block_allocation *allocate_blocks(u32 len)
449{
450	struct region *reg = do_allocate(len);
451
452	if (reg == NULL)
453		return NULL;
454
455	struct block_allocation *alloc = create_allocation();
456	alloc->list.first = reg;
457	alloc->list.last = reg;
458	alloc->list.iter = alloc->list.first;
459	alloc->list.partial_iter = 0;
460	return alloc;
461}
462
463/* Returns the number of discontiguous regions used by an allocation */
464int block_allocation_num_regions(struct block_allocation *alloc)
465{
466	unsigned int i;
467	struct region *reg = alloc->list.first;
468
469	for (i = 0; reg != NULL; reg = reg->next)
470		i++;
471
472	return i;
473}
474
475int block_allocation_len(struct block_allocation *alloc)
476{
477	unsigned int i;
478	struct region *reg = alloc->list.first;
479
480	for (i = 0; reg != NULL; reg = reg->next)
481		i += reg->len;
482
483	return i;
484}
485
486/* Returns the block number of the block'th block in an allocation */
487u32 get_block(struct block_allocation *alloc, u32 block)
488{
489	struct region *reg = alloc->list.iter;
490	block += alloc->list.partial_iter;
491
492	for (; reg; reg = reg->next) {
493		if (block < reg->len)
494			return reg->block + block;
495		block -= reg->len;
496	}
497	return EXT4_ALLOCATE_FAILED;
498}
499
500u32 get_oob_block(struct block_allocation *alloc, u32 block)
501{
502	struct region *reg = alloc->oob_list.iter;
503	block += alloc->oob_list.partial_iter;
504
505	for (; reg; reg = reg->next) {
506		if (block < reg->len)
507			return reg->block + block;
508		block -= reg->len;
509	}
510	return EXT4_ALLOCATE_FAILED;
511}
512
513/* Gets the starting block and length in blocks of the first region
514   of an allocation */
515void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
516{
517	*block = alloc->list.iter->block;
518	*len = alloc->list.iter->len - alloc->list.partial_iter;
519}
520
521/* Move to the next region in an allocation */
522void get_next_region(struct block_allocation *alloc)
523{
524	alloc->list.iter = alloc->list.iter->next;
525	alloc->list.partial_iter = 0;
526}
527
528/* Returns the number of free blocks in a block group */
529u32 get_free_blocks(u32 bg)
530{
531	return aux_info.bgs[bg].free_blocks;
532}
533
534int last_region(struct block_allocation *alloc)
535{
536	return (alloc->list.iter == NULL);
537}
538
539void rewind_alloc(struct block_allocation *alloc)
540{
541	alloc->list.iter = alloc->list.first;
542	alloc->list.partial_iter = 0;
543}
544
545static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
546{
547	struct region *reg = alloc->list.iter;
548	struct region *new;
549	struct region *tmp;
550
551	while (reg && len >= reg->len) {
552		len -= reg->len;
553		reg = reg->next;
554	}
555
556	if (reg == NULL && len > 0)
557		return NULL;
558
559	if (len > 0) {
560		new = malloc(sizeof(struct region));
561
562		new->bg = reg->bg;
563		new->block = reg->block + len;
564		new->len = reg->len - len;
565		new->next = reg->next;
566		new->prev = reg;
567
568		reg->next = new;
569		reg->len = len;
570
571		tmp = alloc->list.iter;
572		alloc->list.iter = new;
573		return tmp;
574	} else {
575		return reg;
576	}
577}
578
579/* Splits an allocation into two allocations.  The returned allocation will
580   point to the first half, and the original allocation ptr will point to the
581   second half. */
582static struct region *split_allocation(struct block_allocation *alloc, u32 len)
583{
584	/* First make sure there is a split at the current ptr */
585	do_split_allocation(alloc, alloc->list.partial_iter);
586
587	/* Then split off len blocks */
588	struct region *middle = do_split_allocation(alloc, len);
589	alloc->list.partial_iter = 0;
590	return middle;
591}
592
593/* Reserve the next blocks for oob data (indirect or extent blocks) */
594int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
595{
596	struct region *oob = split_allocation(alloc, blocks);
597	struct region *next;
598
599	if (oob == NULL)
600		return -1;
601
602	while (oob && oob != alloc->list.iter) {
603		next = oob->next;
604		region_list_remove(&alloc->list, oob);
605		region_list_append(&alloc->oob_list, oob);
606		oob = next;
607	}
608
609	return 0;
610}
611
612static int advance_list_ptr(struct region_list *list, int blocks)
613{
614	struct region *reg = list->iter;
615
616	while (reg != NULL && blocks > 0) {
617		if (reg->len > list->partial_iter + blocks) {
618			list->partial_iter += blocks;
619			return 0;
620		}
621
622		blocks -= (reg->len - list->partial_iter);
623		list->partial_iter = 0;
624		reg = reg->next;
625	}
626
627	if (blocks > 0)
628		return -1;
629
630	return 0;
631}
632
633/* Move the allocation pointer forward */
634int advance_blocks(struct block_allocation *alloc, int blocks)
635{
636	return advance_list_ptr(&alloc->list, blocks);
637}
638
639int advance_oob_blocks(struct block_allocation *alloc, int blocks)
640{
641	return advance_list_ptr(&alloc->oob_list, blocks);
642}
643
644int append_oob_allocation(struct block_allocation *alloc, u32 len)
645{
646	struct region *reg = do_allocate(len);
647
648	if (reg == NULL) {
649		error("failed to allocate %d blocks", len);
650		return -1;
651	}
652
653	for (; reg; reg = reg->next)
654		region_list_append(&alloc->oob_list, reg);
655
656	return 0;
657}
658
659/* Returns an ext4_inode structure for an inode number */
660struct ext4_inode *get_inode(u32 inode)
661{
662	inode -= 1;
663	int bg = inode / info.inodes_per_group;
664	inode %= info.inodes_per_group;
665
666	allocate_bg_inode_table(&aux_info.bgs[bg]);
667	return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
668		info.inode_size);
669}
670
671/* Mark the first len inodes in a block group as used */
672u32 reserve_inodes(int bg, u32 num)
673{
674	unsigned int i;
675	u32 inode;
676
677	if (get_free_inodes(bg) < num)
678		return EXT4_ALLOCATE_FAILED;
679
680	for (i = 0; i < num; i++) {
681		inode = aux_info.bgs[bg].first_free_inode + i - 1;
682		aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
683	}
684
685	inode = aux_info.bgs[bg].first_free_inode;
686
687	aux_info.bgs[bg].first_free_inode += num;
688	aux_info.bgs[bg].free_inodes -= num;
689
690	return inode;
691}
692
693/* Returns the first free inode number
694   TODO: Inodes should be allocated in the block group of the data? */
695u32 allocate_inode()
696{
697	unsigned int bg;
698	u32 inode;
699
700	for (bg = 0; bg < aux_info.groups; bg++) {
701		inode = reserve_inodes(bg, 1);
702		if (inode != EXT4_ALLOCATE_FAILED)
703			return bg * info.inodes_per_group + inode;
704	}
705
706	return EXT4_ALLOCATE_FAILED;
707}
708
709/* Returns the number of free inodes in a block group */
710u32 get_free_inodes(u32 bg)
711{
712	return aux_info.bgs[bg].free_inodes;
713}
714
715/* Increments the directory count of the block group that contains inode */
716void add_directory(u32 inode)
717{
718	int bg = (inode - 1) / info.inodes_per_group;
719	aux_info.bgs[bg].used_dirs += 1;
720}
721
722/* Returns the number of inodes in a block group that are directories */
723u16 get_directories(int bg)
724{
725	return aux_info.bgs[bg].used_dirs;
726}
727
728/* Frees the memory used by a linked list of allocation regions */
729void free_alloc(struct block_allocation *alloc)
730{
731	struct region *reg;
732
733	reg = alloc->list.first;
734	while (reg) {
735		struct region *next = reg->next;
736		free(reg);
737		reg = next;
738	}
739
740	reg = alloc->oob_list.first;
741	while (reg) {
742		struct region *next = reg->next;
743		free(reg);
744		reg = next;
745	}
746
747	free(alloc);
748}
749