bmap.c revision 1e7451493ed67ae94ba5c1d2bed059527bc9848d
1/*
2 * bmap.c --- logical to physical block mapping
3 *
4 * Copyright (C) 1997 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
9 * %End-Header%
10 */
11
12#include "config.h"
13#include <stdio.h>
14#include <string.h>
15#if HAVE_UNISTD_H
16#include <unistd.h>
17#endif
18#include <errno.h>
19
20#include "ext2_fs.h"
21#include "ext2fs.h"
22
23#if defined(__GNUC__) && !defined(NO_INLINE_FUNCS)
24#define _BMAP_INLINE_	__inline__
25#else
26#define _BMAP_INLINE_
27#endif
28
29extern errcode_t ext2fs_bmap(ext2_filsys fs, ext2_ino_t ino,
30			     struct ext2_inode *inode,
31			     char *block_buf, int bmap_flags,
32			     blk_t block, blk_t *phys_blk);
33
34#define inode_bmap(inode, nr) ((inode)->i_block[(nr)])
35
36static _BMAP_INLINE_ errcode_t block_ind_bmap(ext2_filsys fs, int flags,
37					      blk_t ind, char *block_buf,
38					      int *blocks_alloc,
39					      blk_t nr, blk_t *ret_blk)
40{
41	errcode_t	retval;
42	blk_t		b;
43
44	if (!ind) {
45		if (flags & BMAP_SET)
46			return EXT2_ET_SET_BMAP_NO_IND;
47		*ret_blk = 0;
48		return 0;
49	}
50	retval = io_channel_read_blk(fs->io, ind, 1, block_buf);
51	if (retval)
52		return retval;
53
54	if (flags & BMAP_SET) {
55		b = *ret_blk;
56#ifdef WORDS_BIGENDIAN
57		b = ext2fs_swab32(b);
58#endif
59		((blk_t *) block_buf)[nr] = b;
60		return io_channel_write_blk(fs->io, ind, 1, block_buf);
61	}
62
63	b = ((blk_t *) block_buf)[nr];
64
65#ifdef WORDS_BIGENDIAN
66	b = ext2fs_swab32(b);
67#endif
68
69	if (!b && (flags & BMAP_ALLOC)) {
70		b = nr ? ((blk_t *) block_buf)[nr-1] : 0;
71		retval = ext2fs_alloc_block(fs, b,
72					    block_buf + fs->blocksize, &b);
73		if (retval)
74			return retval;
75
76#ifdef WORDS_BIGENDIAN
77		((blk_t *) block_buf)[nr] = ext2fs_swab32(b);
78#else
79		((blk_t *) block_buf)[nr] = b;
80#endif
81
82		retval = io_channel_write_blk(fs->io, ind, 1, block_buf);
83		if (retval)
84			return retval;
85
86		(*blocks_alloc)++;
87	}
88
89	*ret_blk = b;
90	return 0;
91}
92
93static _BMAP_INLINE_ errcode_t block_dind_bmap(ext2_filsys fs, int flags,
94					       blk_t dind, char *block_buf,
95					       int *blocks_alloc,
96					       blk_t nr, blk_t *ret_blk)
97{
98	blk_t		b = 0;
99	errcode_t	retval;
100	blk_t		addr_per_block;
101
102	addr_per_block = (blk_t) fs->blocksize >> 2;
103
104	retval = block_ind_bmap(fs, flags & ~BMAP_SET, dind, block_buf,
105				blocks_alloc, nr / addr_per_block, &b);
106	if (retval)
107		return retval;
108	retval = block_ind_bmap(fs, flags, b, block_buf, blocks_alloc,
109				nr % addr_per_block, ret_blk);
110	return retval;
111}
112
113static _BMAP_INLINE_ errcode_t block_tind_bmap(ext2_filsys fs, int flags,
114					       blk_t tind, char *block_buf,
115					       int *blocks_alloc,
116					       blk_t nr, blk_t *ret_blk)
117{
118	blk_t		b = 0;
119	errcode_t	retval;
120	blk_t		addr_per_block;
121
122	addr_per_block = (blk_t) fs->blocksize >> 2;
123
124	retval = block_dind_bmap(fs, flags & ~BMAP_SET, tind, block_buf,
125				 blocks_alloc, nr / addr_per_block, &b);
126	if (retval)
127		return retval;
128	retval = block_ind_bmap(fs, flags, b, block_buf, blocks_alloc,
129				nr % addr_per_block, ret_blk);
130	return retval;
131}
132
133static errcode_t extent_bmap(ext2_filsys fs, ext2_ino_t ino,
134			     struct ext2_inode *inode,
135			     ext2_extent_handle_t handle,
136			     char *block_buf, int bmap_flags, blk64_t block,
137			     int *ret_flags, int *blocks_alloc,
138			     blk64_t *phys_blk);
139
140static errcode_t implied_cluster_alloc(ext2_filsys fs, ext2_ino_t ino,
141				       struct ext2_inode *inode,
142				       ext2_extent_handle_t handle,
143				       blk64_t lblk, blk64_t *phys_blk)
144{
145	blk64_t	base_block, pblock = 0;
146	int i;
147
148	if (!EXT2_HAS_RO_COMPAT_FEATURE(fs->super,
149					EXT4_FEATURE_RO_COMPAT_BIGALLOC))
150		return 0;
151
152	base_block = lblk & ~EXT2FS_CLUSTER_MASK(fs);
153	/*
154	 * Except for the logical block (lblk) that was passed in, search all
155	 * blocks in this logical cluster for a mapping to a physical cluster.
156	 * If any such map exists, calculate the physical block that maps to
157	 * the logical block and return that.
158	 *
159	 * The old code wouldn't even look if (block % cluster_ratio) == 0;
160	 * this is incorrect if we're allocating blocks in reverse order.
161	 */
162	for (i = 0; i < EXT2FS_CLUSTER_RATIO(fs); i++) {
163		if (base_block + i == lblk)
164			continue;
165		extent_bmap(fs, ino, inode, handle, 0, 0,
166			    base_block + i, 0, 0, &pblock);
167		if (pblock)
168			break;
169	}
170	if (pblock == 0)
171		return 0;
172	*phys_blk = pblock - i + (lblk - base_block);
173	return 0;
174}
175
176static errcode_t extent_bmap(ext2_filsys fs, ext2_ino_t ino,
177			     struct ext2_inode *inode,
178			     ext2_extent_handle_t handle,
179			     char *block_buf, int bmap_flags, blk64_t block,
180			     int *ret_flags, int *blocks_alloc,
181			     blk64_t *phys_blk)
182{
183	struct ext2fs_extent	extent;
184	unsigned int		offset;
185	errcode_t		retval = 0;
186	blk64_t			blk64 = 0;
187	int			alloc = 0;
188
189	if (bmap_flags & BMAP_SET) {
190		retval = ext2fs_extent_set_bmap(handle, block,
191						*phys_blk, 0);
192		return retval;
193	}
194	retval = ext2fs_extent_goto(handle, block);
195	if (retval) {
196		/* If the extent is not found, return phys_blk = 0 */
197		if (retval == EXT2_ET_EXTENT_NOT_FOUND)
198			goto got_block;
199		return retval;
200	}
201	retval = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT, &extent);
202	if (retval)
203		return retval;
204	offset = block - extent.e_lblk;
205	if (block >= extent.e_lblk && (offset <= extent.e_len)) {
206		*phys_blk = extent.e_pblk + offset;
207		if (ret_flags && extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
208			*ret_flags |= BMAP_RET_UNINIT;
209	}
210got_block:
211	if ((*phys_blk == 0) && (bmap_flags & BMAP_ALLOC)) {
212		implied_cluster_alloc(fs, ino, inode, handle, block, &blk64);
213		if (blk64)
214			goto set_extent;
215		retval = extent_bmap(fs, ino, inode, handle, block_buf,
216				     0, block-1, 0, blocks_alloc, &blk64);
217		if (retval)
218			blk64 = 0;
219		retval = ext2fs_alloc_block2(fs, blk64, block_buf,
220					     &blk64);
221		if (retval)
222			return retval;
223		blk64 &= ~EXT2FS_CLUSTER_MASK(fs);
224		blk64 += EXT2FS_CLUSTER_MASK(fs) & block;
225		alloc++;
226	set_extent:
227		retval = ext2fs_extent_set_bmap(handle, block,
228						blk64, 0);
229		if (retval)
230			return retval;
231		/* Update inode after setting extent */
232		retval = ext2fs_read_inode(fs, ino, inode);
233		if (retval)
234			return retval;
235		*blocks_alloc += alloc;
236		*phys_blk = blk64;
237	}
238	return 0;
239}
240
241int ext2fs_file_block_offset_too_big(ext2_filsys fs,
242				     struct ext2_inode *inode,
243				     blk64_t offset)
244{
245	blk64_t addr_per_block, max_map_block;
246
247	/* Kernel seems to cut us off at 4294967294 blocks */
248	if (offset >= (1ULL << 32) - 1)
249		return 1;
250
251	if (inode->i_flags & EXT4_EXTENTS_FL)
252		return 0;
253
254	addr_per_block = fs->blocksize >> 2;
255	max_map_block = addr_per_block;
256	max_map_block += addr_per_block * addr_per_block;
257	max_map_block += addr_per_block * addr_per_block * addr_per_block;
258	max_map_block += 12;
259
260	return offset >= max_map_block;
261}
262
263errcode_t ext2fs_bmap2(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode,
264		       char *block_buf, int bmap_flags, blk64_t block,
265		       int *ret_flags, blk64_t *phys_blk)
266{
267	struct ext2_inode inode_buf;
268	ext2_extent_handle_t handle = 0;
269	blk_t addr_per_block;
270	blk_t	b, blk32;
271	char	*buf = 0;
272	errcode_t	retval = 0;
273	int		blocks_alloc = 0, inode_dirty = 0;
274
275	if (!(bmap_flags & BMAP_SET))
276		*phys_blk = 0;
277
278	if (ret_flags)
279		*ret_flags = 0;
280
281	/* Read inode structure if necessary */
282	if (!inode) {
283		retval = ext2fs_read_inode(fs, ino, &inode_buf);
284		if (retval)
285			return retval;
286		inode = &inode_buf;
287	}
288	addr_per_block = (blk_t) fs->blocksize >> 2;
289
290	if (ext2fs_file_block_offset_too_big(fs, inode, block))
291		return EXT2_ET_FILE_TOO_BIG;
292
293	if (!block_buf) {
294		retval = ext2fs_get_array(2, fs->blocksize, &buf);
295		if (retval)
296			return retval;
297		block_buf = buf;
298	}
299
300	if (inode->i_flags & EXT4_EXTENTS_FL) {
301		retval = ext2fs_extent_open2(fs, ino, inode, &handle);
302		if (retval)
303			goto done;
304		retval = extent_bmap(fs, ino, inode, handle, block_buf,
305				     bmap_flags, block, ret_flags,
306				     &blocks_alloc, phys_blk);
307		goto done;
308	}
309
310	if (block < EXT2_NDIR_BLOCKS) {
311		if (bmap_flags & BMAP_SET) {
312			b = *phys_blk;
313			inode_bmap(inode, block) = b;
314			inode_dirty++;
315			goto done;
316		}
317
318		*phys_blk = inode_bmap(inode, block);
319		b = block ? inode_bmap(inode, block-1) : 0;
320
321		if ((*phys_blk == 0) && (bmap_flags & BMAP_ALLOC)) {
322			retval = ext2fs_alloc_block(fs, b, block_buf, &b);
323			if (retval)
324				goto done;
325			inode_bmap(inode, block) = b;
326			blocks_alloc++;
327			*phys_blk = b;
328		}
329		goto done;
330	}
331
332	/* Indirect block */
333	block -= EXT2_NDIR_BLOCKS;
334	blk32 = *phys_blk;
335	if (block < addr_per_block) {
336		b = inode_bmap(inode, EXT2_IND_BLOCK);
337		if (!b) {
338			if (!(bmap_flags & BMAP_ALLOC)) {
339				if (bmap_flags & BMAP_SET)
340					retval = EXT2_ET_SET_BMAP_NO_IND;
341				goto done;
342			}
343
344			b = inode_bmap(inode, EXT2_IND_BLOCK-1);
345 			retval = ext2fs_alloc_block(fs, b, block_buf, &b);
346			if (retval)
347				goto done;
348			inode_bmap(inode, EXT2_IND_BLOCK) = b;
349			blocks_alloc++;
350		}
351		retval = block_ind_bmap(fs, bmap_flags, b, block_buf,
352					&blocks_alloc, block, &blk32);
353		if (retval == 0)
354			*phys_blk = blk32;
355		goto done;
356	}
357
358	/* Doubly indirect block  */
359	block -= addr_per_block;
360	if (block < addr_per_block * addr_per_block) {
361		b = inode_bmap(inode, EXT2_DIND_BLOCK);
362		if (!b) {
363			if (!(bmap_flags & BMAP_ALLOC)) {
364				if (bmap_flags & BMAP_SET)
365					retval = EXT2_ET_SET_BMAP_NO_IND;
366				goto done;
367			}
368
369			b = inode_bmap(inode, EXT2_IND_BLOCK);
370 			retval = ext2fs_alloc_block(fs, b, block_buf, &b);
371			if (retval)
372				goto done;
373			inode_bmap(inode, EXT2_DIND_BLOCK) = b;
374			blocks_alloc++;
375		}
376		retval = block_dind_bmap(fs, bmap_flags, b, block_buf,
377					 &blocks_alloc, block, &blk32);
378		if (retval == 0)
379			*phys_blk = blk32;
380		goto done;
381	}
382
383	/* Triply indirect block */
384	block -= addr_per_block * addr_per_block;
385	b = inode_bmap(inode, EXT2_TIND_BLOCK);
386	if (!b) {
387		if (!(bmap_flags & BMAP_ALLOC)) {
388			if (bmap_flags & BMAP_SET)
389				retval = EXT2_ET_SET_BMAP_NO_IND;
390			goto done;
391		}
392
393		b = inode_bmap(inode, EXT2_DIND_BLOCK);
394		retval = ext2fs_alloc_block(fs, b, block_buf, &b);
395		if (retval)
396			goto done;
397		inode_bmap(inode, EXT2_TIND_BLOCK) = b;
398		blocks_alloc++;
399	}
400	retval = block_tind_bmap(fs, bmap_flags, b, block_buf,
401				 &blocks_alloc, block, &blk32);
402	if (retval == 0)
403		*phys_blk = blk32;
404done:
405	if (buf)
406		ext2fs_free_mem(&buf);
407	if (handle)
408		ext2fs_extent_free(handle);
409	if ((retval == 0) && (blocks_alloc || inode_dirty)) {
410		ext2fs_iblk_add_blocks(fs, inode, blocks_alloc);
411		retval = ext2fs_write_inode(fs, ino, inode);
412	}
413	return retval;
414}
415
416errcode_t ext2fs_bmap(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode,
417		      char *block_buf, int bmap_flags, blk_t block,
418		      blk_t *phys_blk)
419{
420	errcode_t ret;
421	blk64_t	ret_blk = *phys_blk;
422
423	ret = ext2fs_bmap2(fs, ino, inode, block_buf, bmap_flags, block,
424			    0, &ret_blk);
425	if (ret)
426		return ret;
427	if (ret_blk >= ((long long) 1 << 32))
428		return EOVERFLOW;
429	*phys_blk = ret_blk;
430	return 0;
431}
432