inode.c revision 31170b6ad4ebe6c43c1cc3b8112274cf59474de0
1/*
2 * inode.c
3 *
4 * PURPOSE
5 *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 *  This file is distributed under the terms of the GNU General Public
9 *  License (GPL). Copies of the GPL can be obtained from:
10 *    ftp://prep.ai.mit.edu/pub/gnu/GPL
11 *  Each contributing author retains all rights to their own work.
12 *
13 *  (C) 1998 Dave Boynton
14 *  (C) 1998-2004 Ben Fennema
15 *  (C) 1999-2000 Stelias Computing Inc
16 *
17 * HISTORY
18 *
19 *  10/04/98 dgb  Added rudimentary directory functions
20 *  10/07/98      Fully working udf_block_map! It works!
21 *  11/25/98      bmap altered to better support extents
22 *  12/06/98 blf  partition support in udf_iget, udf_block_map and udf_read_inode
23 *  12/12/98      rewrote udf_block_map to handle next extents and descs across
24 *                block boundaries (which is not actually allowed)
25 *  12/20/98      added support for strategy 4096
26 *  03/07/99      rewrote udf_block_map (again)
27 *                New funcs, inode_bmap, udf_next_aext
28 *  04/19/99      Support for writing device EA's for major/minor #
29 */
30
31#include "udfdecl.h"
32#include <linux/mm.h>
33#include <linux/smp_lock.h>
34#include <linux/module.h>
35#include <linux/pagemap.h>
36#include <linux/buffer_head.h>
37#include <linux/writeback.h>
38#include <linux/slab.h>
39
40#include "udf_i.h"
41#include "udf_sb.h"
42
43MODULE_AUTHOR("Ben Fennema");
44MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45MODULE_LICENSE("GPL");
46
47#define EXTENT_MERGE_SIZE 5
48
49static mode_t udf_convert_permissions(struct fileEntry *);
50static int udf_update_inode(struct inode *, int);
51static void udf_fill_inode(struct inode *, struct buffer_head *);
52static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
53	long *, int *);
54static int8_t udf_insert_aext(struct inode *, struct extent_position,
55	kernel_lb_addr, uint32_t);
56static void udf_split_extents(struct inode *, int *, int, int,
57	kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58static void udf_prealloc_extents(struct inode *, int, int,
59	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60static void udf_merge_extents(struct inode *,
61	 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62static void udf_update_extents(struct inode *,
63	kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64	struct extent_position *);
65static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
66
67/*
68 * udf_delete_inode
69 *
70 * PURPOSE
71 *	Clean-up before the specified inode is destroyed.
72 *
73 * DESCRIPTION
74 *	This routine is called when the kernel destroys an inode structure
75 *	ie. when iput() finds i_count == 0.
76 *
77 * HISTORY
78 *	July 1, 1997 - Andrew E. Mileski
79 *	Written, tested, and released.
80 *
81 *  Called at the last iput() if i_nlink is zero.
82 */
83void udf_delete_inode(struct inode * inode)
84{
85	truncate_inode_pages(&inode->i_data, 0);
86
87	if (is_bad_inode(inode))
88		goto no_delete;
89
90	inode->i_size = 0;
91	udf_truncate(inode);
92	lock_kernel();
93
94	udf_update_inode(inode, IS_SYNC(inode));
95	udf_free_inode(inode);
96
97	unlock_kernel();
98	return;
99no_delete:
100	clear_inode(inode);
101}
102
103void udf_clear_inode(struct inode *inode)
104{
105	if (!(inode->i_sb->s_flags & MS_RDONLY)) {
106		lock_kernel();
107		udf_discard_prealloc(inode);
108		unlock_kernel();
109	}
110
111	kfree(UDF_I_DATA(inode));
112	UDF_I_DATA(inode) = NULL;
113}
114
115static int udf_writepage(struct page *page, struct writeback_control *wbc)
116{
117	return block_write_full_page(page, udf_get_block, wbc);
118}
119
120static int udf_readpage(struct file *file, struct page *page)
121{
122	return block_read_full_page(page, udf_get_block);
123}
124
125static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
126{
127	return block_prepare_write(page, from, to, udf_get_block);
128}
129
130static sector_t udf_bmap(struct address_space *mapping, sector_t block)
131{
132	return generic_block_bmap(mapping,block,udf_get_block);
133}
134
135const struct address_space_operations udf_aops = {
136	.readpage		= udf_readpage,
137	.writepage		= udf_writepage,
138	.sync_page		= block_sync_page,
139	.prepare_write		= udf_prepare_write,
140	.commit_write		= generic_commit_write,
141	.bmap			= udf_bmap,
142};
143
144void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
145{
146	struct page *page;
147	char *kaddr;
148	struct writeback_control udf_wbc = {
149		.sync_mode = WB_SYNC_NONE,
150		.nr_to_write = 1,
151	};
152
153	/* from now on we have normal address_space methods */
154	inode->i_data.a_ops = &udf_aops;
155
156	if (!UDF_I_LENALLOC(inode))
157	{
158		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
160		else
161			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162		mark_inode_dirty(inode);
163		return;
164	}
165
166	page = grab_cache_page(inode->i_mapping, 0);
167	BUG_ON(!PageLocked(page));
168
169	if (!PageUptodate(page))
170	{
171		kaddr = kmap(page);
172		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174		memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175			UDF_I_LENALLOC(inode));
176		flush_dcache_page(page);
177		SetPageUptodate(page);
178		kunmap(page);
179	}
180	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181		UDF_I_LENALLOC(inode));
182	UDF_I_LENALLOC(inode) = 0;
183	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
185	else
186		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
187
188	inode->i_data.a_ops->writepage(page, &udf_wbc);
189	page_cache_release(page);
190
191	mark_inode_dirty(inode);
192}
193
194struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
195{
196	int newblock;
197	struct buffer_head *dbh = NULL;
198	kernel_lb_addr eloc;
199	uint32_t elen;
200	uint8_t alloctype;
201	struct extent_position epos;
202
203	struct udf_fileident_bh sfibh, dfibh;
204	loff_t f_pos = udf_ext0_offset(inode) >> 2;
205	int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
206	struct fileIdentDesc cfi, *sfi, *dfi;
207
208	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
209		alloctype = ICBTAG_FLAG_AD_SHORT;
210	else
211		alloctype = ICBTAG_FLAG_AD_LONG;
212
213	if (!inode->i_size)
214	{
215		UDF_I_ALLOCTYPE(inode) = alloctype;
216		mark_inode_dirty(inode);
217		return NULL;
218	}
219
220	/* alloc block, and copy data to it */
221	*block = udf_new_block(inode->i_sb, inode,
222		UDF_I_LOCATION(inode).partitionReferenceNum,
223		UDF_I_LOCATION(inode).logicalBlockNum, err);
224
225	if (!(*block))
226		return NULL;
227	newblock = udf_get_pblock(inode->i_sb, *block,
228		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
229	if (!newblock)
230		return NULL;
231	dbh = udf_tgetblk(inode->i_sb, newblock);
232	if (!dbh)
233		return NULL;
234	lock_buffer(dbh);
235	memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
236	set_buffer_uptodate(dbh);
237	unlock_buffer(dbh);
238	mark_buffer_dirty_inode(dbh, inode);
239
240	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
241	sfibh.sbh = sfibh.ebh = NULL;
242	dfibh.soffset = dfibh.eoffset = 0;
243	dfibh.sbh = dfibh.ebh = dbh;
244	while ( (f_pos < size) )
245	{
246		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
247		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
248		if (!sfi)
249		{
250			brelse(dbh);
251			return NULL;
252		}
253		UDF_I_ALLOCTYPE(inode) = alloctype;
254		sfi->descTag.tagLocation = cpu_to_le32(*block);
255		dfibh.soffset = dfibh.eoffset;
256		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
257		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
258		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
259			sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
260		{
261			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
262			brelse(dbh);
263			return NULL;
264		}
265	}
266	mark_buffer_dirty_inode(dbh, inode);
267
268	memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
269	UDF_I_LENALLOC(inode) = 0;
270	eloc.logicalBlockNum = *block;
271	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272	elen = inode->i_size;
273	UDF_I_LENEXTENTS(inode) = elen;
274	epos.bh = NULL;
275	epos.block = UDF_I_LOCATION(inode);
276	epos.offset = udf_file_entry_alloc_offset(inode);
277	udf_add_aext(inode, &epos, eloc, elen, 0);
278	/* UniqueID stuff */
279
280	brelse(epos.bh);
281	mark_inode_dirty(inode);
282	return dbh;
283}
284
285static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
286{
287	int err, new;
288	struct buffer_head *bh;
289	unsigned long phys;
290
291	if (!create)
292	{
293		phys = udf_block_map(inode, block);
294		if (phys)
295			map_bh(bh_result, inode->i_sb, phys);
296		return 0;
297	}
298
299	err = -EIO;
300	new = 0;
301	bh = NULL;
302
303	lock_kernel();
304
305	if (block < 0)
306		goto abort_negative;
307
308	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
309	{
310		UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
311		UDF_I_NEXT_ALLOC_GOAL(inode) ++;
312	}
313
314	err = 0;
315
316	bh = inode_getblk(inode, block, &err, &phys, &new);
317	BUG_ON(bh);
318	if (err)
319		goto abort;
320	BUG_ON(!phys);
321
322	if (new)
323		set_buffer_new(bh_result);
324	map_bh(bh_result, inode->i_sb, phys);
325abort:
326	unlock_kernel();
327	return err;
328
329abort_negative:
330	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
331	goto abort;
332}
333
334static struct buffer_head *
335udf_getblk(struct inode *inode, long block, int create, int *err)
336{
337	struct buffer_head dummy;
338
339	dummy.b_state = 0;
340	dummy.b_blocknr = -1000;
341	*err = udf_get_block(inode, block, &dummy, create);
342	if (!*err && buffer_mapped(&dummy))
343	{
344		struct buffer_head *bh;
345		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346		if (buffer_new(&dummy))
347		{
348			lock_buffer(bh);
349			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
350			set_buffer_uptodate(bh);
351			unlock_buffer(bh);
352			mark_buffer_dirty_inode(bh, inode);
353		}
354		return bh;
355	}
356	return NULL;
357}
358
359/* Extend the file by 'blocks' blocks, return the number of extents added */
360int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
361	kernel_long_ad *last_ext, sector_t blocks)
362{
363	sector_t add;
364	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
365	struct super_block *sb = inode->i_sb;
366	kernel_lb_addr prealloc_loc = {0, 0};
367	int prealloc_len = 0;
368
369	/* The previous extent is fake and we should not extend by anything
370	 * - there's nothing to do... */
371	if (!blocks && fake)
372		return 0;
373	/* Round the last extent up to a multiple of block size */
374	if (last_ext->extLength & (sb->s_blocksize - 1)) {
375		last_ext->extLength =
376			(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
377			(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
378				sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
379		UDF_I_LENEXTENTS(inode) =
380			(UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
381				~(sb->s_blocksize - 1);
382	}
383	/* Last extent are just preallocated blocks? */
384	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
385		/* Save the extent so that we can reattach it to the end */
386		prealloc_loc = last_ext->extLocation;
387		prealloc_len = last_ext->extLength;
388		/* Mark the extent as a hole */
389		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
390			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
391		last_ext->extLocation.logicalBlockNum = 0;
392       		last_ext->extLocation.partitionReferenceNum = 0;
393	}
394	/* Can we merge with the previous extent? */
395	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
396		add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
397			UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
398		if (add > blocks)
399			add = blocks;
400		blocks -= add;
401		last_ext->extLength += add << sb->s_blocksize_bits;
402	}
403
404	if (fake) {
405		udf_add_aext(inode, last_pos, last_ext->extLocation,
406			last_ext->extLength, 1);
407		count++;
408	}
409	else
410		udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
411	/* Managed to do everything necessary? */
412	if (!blocks)
413		goto out;
414
415	/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
416	last_ext->extLocation.logicalBlockNum = 0;
417       	last_ext->extLocation.partitionReferenceNum = 0;
418	add = (1 << (30-sb->s_blocksize_bits)) - 1;
419	last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
420	/* Create enough extents to cover the whole hole */
421	while (blocks > add) {
422		blocks -= add;
423		if (udf_add_aext(inode, last_pos, last_ext->extLocation,
424			last_ext->extLength, 1) == -1)
425			return -1;
426		count++;
427	}
428	if (blocks) {
429		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
430			(blocks << sb->s_blocksize_bits);
431		if (udf_add_aext(inode, last_pos, last_ext->extLocation,
432			last_ext->extLength, 1) == -1)
433			return -1;
434		count++;
435	}
436out:
437	/* Do we have some preallocated blocks saved? */
438	if (prealloc_len) {
439		if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
440			return -1;
441		last_ext->extLocation = prealloc_loc;
442		last_ext->extLength = prealloc_len;
443		count++;
444	}
445	/* last_pos should point to the last written extent... */
446	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
447		last_pos->offset -= sizeof(short_ad);
448	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
449		last_pos->offset -= sizeof(long_ad);
450	else
451		return -1;
452	return count;
453}
454
455static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
456	int *err, long *phys, int *new)
457{
458	static sector_t last_block;
459	struct buffer_head *result = NULL;
460	kernel_long_ad laarr[EXTENT_MERGE_SIZE];
461	struct extent_position prev_epos, cur_epos, next_epos;
462	int count = 0, startnum = 0, endnum = 0;
463	uint32_t elen = 0;
464	kernel_lb_addr eloc;
465	int c = 1;
466	loff_t lbcount = 0, b_off = 0;
467	uint32_t newblocknum, newblock;
468	sector_t offset = 0;
469	int8_t etype;
470	int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
471	int lastblock = 0;
472
473	prev_epos.offset = udf_file_entry_alloc_offset(inode);
474	prev_epos.block = UDF_I_LOCATION(inode);
475	prev_epos.bh = NULL;
476	cur_epos = next_epos = prev_epos;
477	b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
478
479	/* find the extent which contains the block we are looking for.
480       alternate between laarr[0] and laarr[1] for locations of the
481       current extent, and the previous extent */
482	do
483	{
484		if (prev_epos.bh != cur_epos.bh)
485		{
486			brelse(prev_epos.bh);
487			get_bh(cur_epos.bh);
488			prev_epos.bh = cur_epos.bh;
489		}
490		if (cur_epos.bh != next_epos.bh)
491		{
492			brelse(cur_epos.bh);
493			get_bh(next_epos.bh);
494			cur_epos.bh = next_epos.bh;
495		}
496
497		lbcount += elen;
498
499		prev_epos.block = cur_epos.block;
500		cur_epos.block = next_epos.block;
501
502		prev_epos.offset = cur_epos.offset;
503		cur_epos.offset = next_epos.offset;
504
505		if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
506			break;
507
508		c = !c;
509
510		laarr[c].extLength = (etype << 30) | elen;
511		laarr[c].extLocation = eloc;
512
513		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
514			pgoal = eloc.logicalBlockNum +
515				((elen + inode->i_sb->s_blocksize - 1) >>
516				inode->i_sb->s_blocksize_bits);
517
518		count ++;
519	} while (lbcount + elen <= b_off);
520
521	b_off -= lbcount;
522	offset = b_off >> inode->i_sb->s_blocksize_bits;
523	/* Move into indirect extent if we are at a pointer to it */
524	udf_next_aext(inode, &prev_epos, &eloc, &elen, 0);
525
526	/* if the extent is allocated and recorded, return the block
527       if the extent is not a multiple of the blocksize, round up */
528
529	if (etype == (EXT_RECORDED_ALLOCATED >> 30))
530	{
531		if (elen & (inode->i_sb->s_blocksize - 1))
532		{
533			elen = EXT_RECORDED_ALLOCATED |
534				((elen + inode->i_sb->s_blocksize - 1) &
535				~(inode->i_sb->s_blocksize - 1));
536			etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
537		}
538		brelse(prev_epos.bh);
539		brelse(cur_epos.bh);
540		brelse(next_epos.bh);
541		newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
542		*phys = newblock;
543		return NULL;
544	}
545
546	last_block = block;
547	/* Are we beyond EOF? */
548	if (etype == -1)
549	{
550		int ret;
551
552		if (count) {
553			if (c)
554				laarr[0] = laarr[1];
555			startnum = 1;
556		}
557		else {
558			/* Create a fake extent when there's not one */
559			memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
560			laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
561			/* Will udf_extend_file() create real extent from a fake one? */
562			startnum = (offset > 0);
563		}
564		/* Create extents for the hole between EOF and offset */
565		ret = udf_extend_file(inode, &prev_epos, laarr, offset);
566		if (ret == -1) {
567			brelse(prev_epos.bh);
568			brelse(cur_epos.bh);
569			brelse(next_epos.bh);
570			/* We don't really know the error here so we just make
571			 * something up */
572			*err = -ENOSPC;
573			return NULL;
574		}
575		c = 0;
576		offset = 0;
577		count += ret;
578		/* We are not covered by a preallocated extent? */
579		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
580			/* Is there any real extent? - otherwise we overwrite
581			 * the fake one... */
582			if (count)
583				c = !c;
584			laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
585				inode->i_sb->s_blocksize;
586			memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
587			count ++;
588			endnum ++;
589		}
590		endnum = c+1;
591		lastblock = 1;
592	}
593	else {
594		endnum = startnum = ((count > 2) ? 2 : count);
595
596		/* if the current extent is in position 0, swap it with the previous */
597		if (!c && count != 1)
598		{
599			laarr[2] = laarr[0];
600			laarr[0] = laarr[1];
601			laarr[1] = laarr[2];
602			c = 1;
603		}
604
605		/* if the current block is located in an extent, read the next extent */
606		if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
607		{
608			laarr[c+1].extLength = (etype << 30) | elen;
609			laarr[c+1].extLocation = eloc;
610			count ++;
611			startnum ++;
612			endnum ++;
613		}
614		else {
615			lastblock = 1;
616		}
617	}
618
619	/* if the current extent is not recorded but allocated, get the
620		block in the extent corresponding to the requested block */
621	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
622		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
623	else /* otherwise, allocate a new block */
624	{
625		if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
626			goal = UDF_I_NEXT_ALLOC_GOAL(inode);
627
628		if (!goal)
629		{
630			if (!(goal = pgoal))
631				goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
632		}
633
634		if (!(newblocknum = udf_new_block(inode->i_sb, inode,
635			UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
636		{
637			brelse(prev_epos.bh);
638			*err = -ENOSPC;
639			return NULL;
640		}
641		UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
642	}
643
644	/* if the extent the requsted block is located in contains multiple blocks,
645       split the extent into at most three extents. blocks prior to requested
646       block, requested block, and blocks after requested block */
647	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
648
649#ifdef UDF_PREALLOCATE
650	/* preallocate blocks */
651	udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
652#endif
653
654	/* merge any continuous blocks in laarr */
655	udf_merge_extents(inode, laarr, &endnum);
656
657	/* write back the new extents, inserting new extents if the new number
658	of extents is greater than the old number, and deleting extents if
659	the new number of extents is less than the old number */
660	udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
661
662	brelse(prev_epos.bh);
663
664	if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
665		UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
666	{
667		return NULL;
668	}
669	*phys = newblock;
670	*err = 0;
671	*new = 1;
672	UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
673	UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
674	inode->i_ctime = current_fs_time(inode->i_sb);
675
676	if (IS_SYNC(inode))
677		udf_sync_inode(inode);
678	else
679		mark_inode_dirty(inode);
680	return result;
681}
682
683static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
684	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
685{
686	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
687		(laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
688	{
689		int curr = *c;
690		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
691			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
692		int8_t etype = (laarr[curr].extLength >> 30);
693
694		if (blen == 1)
695			;
696		else if (!offset || blen == offset + 1)
697		{
698			laarr[curr+2] = laarr[curr+1];
699			laarr[curr+1] = laarr[curr];
700		}
701		else
702		{
703			laarr[curr+3] = laarr[curr+1];
704			laarr[curr+2] = laarr[curr+1] = laarr[curr];
705		}
706
707		if (offset)
708		{
709			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
710			{
711				udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
712				laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
713					(offset << inode->i_sb->s_blocksize_bits);
714				laarr[curr].extLocation.logicalBlockNum = 0;
715				laarr[curr].extLocation.partitionReferenceNum = 0;
716			}
717			else
718				laarr[curr].extLength = (etype << 30) |
719					(offset << inode->i_sb->s_blocksize_bits);
720			curr ++;
721			(*c) ++;
722			(*endnum) ++;
723		}
724
725		laarr[curr].extLocation.logicalBlockNum = newblocknum;
726		if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
727			laarr[curr].extLocation.partitionReferenceNum =
728				UDF_I_LOCATION(inode).partitionReferenceNum;
729		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
730			inode->i_sb->s_blocksize;
731		curr ++;
732
733		if (blen != offset + 1)
734		{
735			if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
736				laarr[curr].extLocation.logicalBlockNum += (offset + 1);
737			laarr[curr].extLength = (etype << 30) |
738				((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
739			curr ++;
740			(*endnum) ++;
741		}
742	}
743}
744
745static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
746	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
747{
748	int start, length = 0, currlength = 0, i;
749
750	if (*endnum >= (c+1))
751	{
752		if (!lastblock)
753			return;
754		else
755			start = c;
756	}
757	else
758	{
759		if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
760		{
761			start = c+1;
762			length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
763				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
764		}
765		else
766			start = c;
767	}
768
769	for (i=start+1; i<=*endnum; i++)
770	{
771		if (i == *endnum)
772		{
773			if (lastblock)
774				length += UDF_DEFAULT_PREALLOC_BLOCKS;
775		}
776		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
777			length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
778				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
779		else
780			break;
781	}
782
783	if (length)
784	{
785		int next = laarr[start].extLocation.logicalBlockNum +
786			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
787			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
788		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
789			laarr[start].extLocation.partitionReferenceNum,
790			next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
791				UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
792
793		if (numalloc)
794		{
795			if (start == (c+1))
796				laarr[start].extLength +=
797					(numalloc << inode->i_sb->s_blocksize_bits);
798			else
799			{
800				memmove(&laarr[c+2], &laarr[c+1],
801					sizeof(long_ad) * (*endnum - (c+1)));
802				(*endnum) ++;
803				laarr[c+1].extLocation.logicalBlockNum = next;
804				laarr[c+1].extLocation.partitionReferenceNum =
805					laarr[c].extLocation.partitionReferenceNum;
806				laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
807					(numalloc << inode->i_sb->s_blocksize_bits);
808				start = c+1;
809			}
810
811			for (i=start+1; numalloc && i<*endnum; i++)
812			{
813				int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
814					inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
815
816				if (elen > numalloc)
817				{
818					laarr[i].extLength -=
819						(numalloc << inode->i_sb->s_blocksize_bits);
820					numalloc = 0;
821				}
822				else
823				{
824					numalloc -= elen;
825					if (*endnum > (i+1))
826						memmove(&laarr[i], &laarr[i+1],
827							sizeof(long_ad) * (*endnum - (i+1)));
828					i --;
829					(*endnum) --;
830				}
831			}
832			UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
833		}
834	}
835}
836
837static void udf_merge_extents(struct inode *inode,
838	 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
839{
840	int i;
841
842	for (i=0; i<(*endnum-1); i++)
843	{
844		if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
845		{
846			if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
847				((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
848				(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
849				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
850			{
851				if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
852					(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
853					inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
854				{
855					laarr[i+1].extLength = (laarr[i+1].extLength -
856						(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
857						UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
858					laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
859						(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
860					laarr[i+1].extLocation.logicalBlockNum =
861						laarr[i].extLocation.logicalBlockNum +
862						((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
863							inode->i_sb->s_blocksize_bits);
864				}
865				else
866				{
867					laarr[i].extLength = laarr[i+1].extLength +
868						(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
869						inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
870					if (*endnum > (i+2))
871						memmove(&laarr[i+1], &laarr[i+2],
872							sizeof(long_ad) * (*endnum - (i+2)));
873					i --;
874					(*endnum) --;
875				}
876			}
877		}
878		else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
879			((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
880		{
881			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
882				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
883				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
884			laarr[i].extLocation.logicalBlockNum = 0;
885			laarr[i].extLocation.partitionReferenceNum = 0;
886
887			if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
888				(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
889				inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
890			{
891				laarr[i+1].extLength = (laarr[i+1].extLength -
892					(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
893					UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
894				laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
895					(UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
896			}
897			else
898			{
899				laarr[i].extLength = laarr[i+1].extLength +
900					(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
901					inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
902				if (*endnum > (i+2))
903					memmove(&laarr[i+1], &laarr[i+2],
904						sizeof(long_ad) * (*endnum - (i+2)));
905				i --;
906				(*endnum) --;
907			}
908		}
909		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
910		{
911			udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
912				((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
913			       inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
914			laarr[i].extLocation.logicalBlockNum = 0;
915			laarr[i].extLocation.partitionReferenceNum = 0;
916			laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
917				EXT_NOT_RECORDED_NOT_ALLOCATED;
918		}
919	}
920}
921
922static void udf_update_extents(struct inode *inode,
923	kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
924	struct extent_position *epos)
925{
926	int start = 0, i;
927	kernel_lb_addr tmploc;
928	uint32_t tmplen;
929
930	if (startnum > endnum)
931	{
932		for (i=0; i<(startnum-endnum); i++)
933			udf_delete_aext(inode, *epos, laarr[i].extLocation,
934				laarr[i].extLength);
935	}
936	else if (startnum < endnum)
937	{
938		for (i=0; i<(endnum-startnum); i++)
939		{
940			udf_insert_aext(inode, *epos, laarr[i].extLocation,
941				laarr[i].extLength);
942			udf_next_aext(inode, epos, &laarr[i].extLocation,
943				&laarr[i].extLength, 1);
944			start ++;
945		}
946	}
947
948	for (i=start; i<endnum; i++)
949	{
950		udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
951		udf_write_aext(inode, epos, laarr[i].extLocation,
952			laarr[i].extLength, 1);
953	}
954}
955
956struct buffer_head * udf_bread(struct inode * inode, int block,
957	int create, int * err)
958{
959	struct buffer_head * bh = NULL;
960
961	bh = udf_getblk(inode, block, create, err);
962	if (!bh)
963		return NULL;
964
965	if (buffer_uptodate(bh))
966		return bh;
967	ll_rw_block(READ, 1, &bh);
968	wait_on_buffer(bh);
969	if (buffer_uptodate(bh))
970		return bh;
971	brelse(bh);
972	*err = -EIO;
973	return NULL;
974}
975
976void udf_truncate(struct inode * inode)
977{
978	int offset;
979	int err;
980
981	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
982			S_ISLNK(inode->i_mode)))
983		return;
984	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
985		return;
986
987	lock_kernel();
988	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
989	{
990		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
991			inode->i_size))
992		{
993			udf_expand_file_adinicb(inode, inode->i_size, &err);
994			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
995			{
996				inode->i_size = UDF_I_LENALLOC(inode);
997				unlock_kernel();
998				return;
999			}
1000			else
1001				udf_truncate_extents(inode);
1002		}
1003		else
1004		{
1005			offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1006			memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
1007			UDF_I_LENALLOC(inode) = inode->i_size;
1008		}
1009	}
1010	else
1011	{
1012		block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
1013		udf_truncate_extents(inode);
1014	}
1015
1016	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1017	if (IS_SYNC(inode))
1018		udf_sync_inode (inode);
1019	else
1020		mark_inode_dirty(inode);
1021	unlock_kernel();
1022}
1023
1024static void
1025__udf_read_inode(struct inode *inode)
1026{
1027	struct buffer_head *bh = NULL;
1028	struct fileEntry *fe;
1029	uint16_t ident;
1030
1031	/*
1032	 * Set defaults, but the inode is still incomplete!
1033	 * Note: get_new_inode() sets the following on a new inode:
1034	 *      i_sb = sb
1035	 *      i_no = ino
1036	 *      i_flags = sb->s_flags
1037	 *      i_state = 0
1038	 * clean_inode(): zero fills and sets
1039	 *      i_count = 1
1040	 *      i_nlink = 1
1041	 *      i_op = NULL;
1042	 */
1043	bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
1044
1045	if (!bh)
1046	{
1047		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1048			inode->i_ino);
1049		make_bad_inode(inode);
1050		return;
1051	}
1052
1053	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1054		ident != TAG_IDENT_USE)
1055	{
1056		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1057			inode->i_ino, ident);
1058		brelse(bh);
1059		make_bad_inode(inode);
1060		return;
1061	}
1062
1063	fe = (struct fileEntry *)bh->b_data;
1064
1065	if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
1066	{
1067		struct buffer_head *ibh = NULL, *nbh = NULL;
1068		struct indirectEntry *ie;
1069
1070		ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
1071		if (ident == TAG_IDENT_IE)
1072		{
1073			if (ibh)
1074			{
1075				kernel_lb_addr loc;
1076				ie = (struct indirectEntry *)ibh->b_data;
1077
1078				loc = lelb_to_cpu(ie->indirectICB.extLocation);
1079
1080				if (ie->indirectICB.extLength &&
1081					(nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1082				{
1083					if (ident == TAG_IDENT_FE ||
1084						ident == TAG_IDENT_EFE)
1085					{
1086						memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
1087						brelse(bh);
1088						brelse(ibh);
1089						brelse(nbh);
1090						__udf_read_inode(inode);
1091						return;
1092					}
1093					else
1094					{
1095						brelse(nbh);
1096						brelse(ibh);
1097					}
1098				}
1099				else
1100					brelse(ibh);
1101			}
1102		}
1103		else
1104			brelse(ibh);
1105	}
1106	else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1107	{
1108		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1109			le16_to_cpu(fe->icbTag.strategyType));
1110		brelse(bh);
1111		make_bad_inode(inode);
1112		return;
1113	}
1114	udf_fill_inode(inode, bh);
1115
1116	brelse(bh);
1117}
1118
1119static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1120{
1121	struct fileEntry *fe;
1122	struct extendedFileEntry *efe;
1123	time_t convtime;
1124	long convtime_usec;
1125	int offset;
1126
1127	fe = (struct fileEntry *)bh->b_data;
1128	efe = (struct extendedFileEntry *)bh->b_data;
1129
1130	if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1131		UDF_I_STRAT4096(inode) = 0;
1132	else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1133		UDF_I_STRAT4096(inode) = 1;
1134
1135	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1136	UDF_I_UNIQUE(inode) = 0;
1137	UDF_I_LENEATTR(inode) = 0;
1138	UDF_I_LENEXTENTS(inode) = 0;
1139	UDF_I_LENALLOC(inode) = 0;
1140	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1141	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1142	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1143	{
1144		UDF_I_EFE(inode) = 1;
1145		UDF_I_USE(inode) = 0;
1146		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1147		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1148	}
1149	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1150	{
1151		UDF_I_EFE(inode) = 0;
1152		UDF_I_USE(inode) = 0;
1153		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1154		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1155	}
1156	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1157	{
1158		UDF_I_EFE(inode) = 0;
1159		UDF_I_USE(inode) = 1;
1160		UDF_I_LENALLOC(inode) =
1161			le32_to_cpu(
1162				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1163		UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1164		memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1165		return;
1166	}
1167
1168	inode->i_uid = le32_to_cpu(fe->uid);
1169	if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1170					UDF_FLAG_UID_IGNORE))
1171		inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1172
1173	inode->i_gid = le32_to_cpu(fe->gid);
1174	if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1175					UDF_FLAG_GID_IGNORE))
1176		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1177
1178	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1179	if (!inode->i_nlink)
1180		inode->i_nlink = 1;
1181
1182	inode->i_size = le64_to_cpu(fe->informationLength);
1183	UDF_I_LENEXTENTS(inode) = inode->i_size;
1184
1185	inode->i_mode = udf_convert_permissions(fe);
1186	inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1187
1188	if (UDF_I_EFE(inode) == 0)
1189	{
1190		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1191			(inode->i_sb->s_blocksize_bits - 9);
1192
1193		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1194			lets_to_cpu(fe->accessTime)) )
1195		{
1196			inode->i_atime.tv_sec = convtime;
1197			inode->i_atime.tv_nsec = convtime_usec * 1000;
1198		}
1199		else
1200		{
1201			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1202		}
1203
1204		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1205			lets_to_cpu(fe->modificationTime)) )
1206		{
1207			inode->i_mtime.tv_sec = convtime;
1208			inode->i_mtime.tv_nsec = convtime_usec * 1000;
1209		}
1210		else
1211		{
1212			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1213		}
1214
1215		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1216			lets_to_cpu(fe->attrTime)) )
1217		{
1218			inode->i_ctime.tv_sec = convtime;
1219			inode->i_ctime.tv_nsec = convtime_usec * 1000;
1220		}
1221		else
1222		{
1223			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1224		}
1225
1226		UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1227		UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1228		UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1229		offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1230	}
1231	else
1232	{
1233		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1234			(inode->i_sb->s_blocksize_bits - 9);
1235
1236		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1237			lets_to_cpu(efe->accessTime)) )
1238		{
1239			inode->i_atime.tv_sec = convtime;
1240			inode->i_atime.tv_nsec = convtime_usec * 1000;
1241		}
1242		else
1243		{
1244			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1245		}
1246
1247		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1248			lets_to_cpu(efe->modificationTime)) )
1249		{
1250			inode->i_mtime.tv_sec = convtime;
1251			inode->i_mtime.tv_nsec = convtime_usec * 1000;
1252		}
1253		else
1254		{
1255			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1256		}
1257
1258		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1259			lets_to_cpu(efe->createTime)) )
1260		{
1261			UDF_I_CRTIME(inode).tv_sec = convtime;
1262			UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1263		}
1264		else
1265		{
1266			UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1267		}
1268
1269		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1270			lets_to_cpu(efe->attrTime)) )
1271		{
1272			inode->i_ctime.tv_sec = convtime;
1273			inode->i_ctime.tv_nsec = convtime_usec * 1000;
1274		}
1275		else
1276		{
1277			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1278		}
1279
1280		UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1281		UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1282		UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1283		offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1284	}
1285
1286	switch (fe->icbTag.fileType)
1287	{
1288		case ICBTAG_FILE_TYPE_DIRECTORY:
1289		{
1290			inode->i_op = &udf_dir_inode_operations;
1291			inode->i_fop = &udf_dir_operations;
1292			inode->i_mode |= S_IFDIR;
1293			inc_nlink(inode);
1294			break;
1295		}
1296		case ICBTAG_FILE_TYPE_REALTIME:
1297		case ICBTAG_FILE_TYPE_REGULAR:
1298		case ICBTAG_FILE_TYPE_UNDEF:
1299		{
1300			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1301				inode->i_data.a_ops = &udf_adinicb_aops;
1302			else
1303				inode->i_data.a_ops = &udf_aops;
1304			inode->i_op = &udf_file_inode_operations;
1305			inode->i_fop = &udf_file_operations;
1306			inode->i_mode |= S_IFREG;
1307			break;
1308		}
1309		case ICBTAG_FILE_TYPE_BLOCK:
1310		{
1311			inode->i_mode |= S_IFBLK;
1312			break;
1313		}
1314		case ICBTAG_FILE_TYPE_CHAR:
1315		{
1316			inode->i_mode |= S_IFCHR;
1317			break;
1318		}
1319		case ICBTAG_FILE_TYPE_FIFO:
1320		{
1321			init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1322			break;
1323		}
1324		case ICBTAG_FILE_TYPE_SOCKET:
1325		{
1326			init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1327			break;
1328		}
1329		case ICBTAG_FILE_TYPE_SYMLINK:
1330		{
1331			inode->i_data.a_ops = &udf_symlink_aops;
1332			inode->i_op = &page_symlink_inode_operations;
1333			inode->i_mode = S_IFLNK|S_IRWXUGO;
1334			break;
1335		}
1336		default:
1337		{
1338			printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1339				inode->i_ino, fe->icbTag.fileType);
1340			make_bad_inode(inode);
1341			return;
1342		}
1343	}
1344	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1345	{
1346		struct deviceSpec *dsea =
1347			(struct deviceSpec *)
1348				udf_get_extendedattr(inode, 12, 1);
1349
1350		if (dsea)
1351		{
1352			init_special_inode(inode, inode->i_mode, MKDEV(
1353				le32_to_cpu(dsea->majorDeviceIdent),
1354				le32_to_cpu(dsea->minorDeviceIdent)));
1355			/* Developer ID ??? */
1356		}
1357		else
1358		{
1359			make_bad_inode(inode);
1360		}
1361	}
1362}
1363
1364static mode_t
1365udf_convert_permissions(struct fileEntry *fe)
1366{
1367	mode_t mode;
1368	uint32_t permissions;
1369	uint32_t flags;
1370
1371	permissions = le32_to_cpu(fe->permissions);
1372	flags = le16_to_cpu(fe->icbTag.flags);
1373
1374	mode =	(( permissions      ) & S_IRWXO) |
1375		(( permissions >> 2 ) & S_IRWXG) |
1376		(( permissions >> 4 ) & S_IRWXU) |
1377		(( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1378		(( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1379		(( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1380
1381	return mode;
1382}
1383
1384/*
1385 * udf_write_inode
1386 *
1387 * PURPOSE
1388 *	Write out the specified inode.
1389 *
1390 * DESCRIPTION
1391 *	This routine is called whenever an inode is synced.
1392 *	Currently this routine is just a placeholder.
1393 *
1394 * HISTORY
1395 *	July 1, 1997 - Andrew E. Mileski
1396 *	Written, tested, and released.
1397 */
1398
1399int udf_write_inode(struct inode * inode, int sync)
1400{
1401	int ret;
1402	lock_kernel();
1403	ret = udf_update_inode(inode, sync);
1404	unlock_kernel();
1405	return ret;
1406}
1407
1408int udf_sync_inode(struct inode * inode)
1409{
1410	return udf_update_inode(inode, 1);
1411}
1412
1413static int
1414udf_update_inode(struct inode *inode, int do_sync)
1415{
1416	struct buffer_head *bh = NULL;
1417	struct fileEntry *fe;
1418	struct extendedFileEntry *efe;
1419	uint32_t udfperms;
1420	uint16_t icbflags;
1421	uint16_t crclen;
1422	int i;
1423	kernel_timestamp cpu_time;
1424	int err = 0;
1425
1426	bh = udf_tread(inode->i_sb,
1427		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1428
1429	if (!bh)
1430	{
1431		udf_debug("bread failure\n");
1432		return -EIO;
1433	}
1434
1435	memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1436
1437	fe = (struct fileEntry *)bh->b_data;
1438	efe = (struct extendedFileEntry *)bh->b_data;
1439
1440	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1441	{
1442		struct unallocSpaceEntry *use =
1443			(struct unallocSpaceEntry *)bh->b_data;
1444
1445		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1446		memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1447		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1448			sizeof(tag);
1449		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1450		use->descTag.descCRCLength = cpu_to_le16(crclen);
1451		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1452
1453		use->descTag.tagChecksum = 0;
1454		for (i=0; i<16; i++)
1455			if (i != 4)
1456				use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1457
1458		mark_buffer_dirty(bh);
1459		brelse(bh);
1460		return err;
1461	}
1462
1463	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1464		fe->uid = cpu_to_le32(-1);
1465	else fe->uid = cpu_to_le32(inode->i_uid);
1466
1467	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1468		fe->gid = cpu_to_le32(-1);
1469	else fe->gid = cpu_to_le32(inode->i_gid);
1470
1471	udfperms =	((inode->i_mode & S_IRWXO)     ) |
1472			((inode->i_mode & S_IRWXG) << 2) |
1473			((inode->i_mode & S_IRWXU) << 4);
1474
1475	udfperms |=	(le32_to_cpu(fe->permissions) &
1476			(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1477			 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1478			 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1479	fe->permissions = cpu_to_le32(udfperms);
1480
1481	if (S_ISDIR(inode->i_mode))
1482		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1483	else
1484		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1485
1486	fe->informationLength = cpu_to_le64(inode->i_size);
1487
1488	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1489	{
1490		regid *eid;
1491		struct deviceSpec *dsea =
1492			(struct deviceSpec *)
1493				udf_get_extendedattr(inode, 12, 1);
1494
1495		if (!dsea)
1496		{
1497			dsea = (struct deviceSpec *)
1498				udf_add_extendedattr(inode,
1499					sizeof(struct deviceSpec) +
1500					sizeof(regid), 12, 0x3);
1501			dsea->attrType = cpu_to_le32(12);
1502			dsea->attrSubtype = 1;
1503			dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1504				sizeof(regid));
1505			dsea->impUseLength = cpu_to_le32(sizeof(regid));
1506		}
1507		eid = (regid *)dsea->impUse;
1508		memset(eid, 0, sizeof(regid));
1509		strcpy(eid->ident, UDF_ID_DEVELOPER);
1510		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1511		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1512		dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1513		dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1514	}
1515
1516	if (UDF_I_EFE(inode) == 0)
1517	{
1518		memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1519		fe->logicalBlocksRecorded = cpu_to_le64(
1520			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1521			(inode->i_sb->s_blocksize_bits - 9));
1522
1523		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1524			fe->accessTime = cpu_to_lets(cpu_time);
1525		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1526			fe->modificationTime = cpu_to_lets(cpu_time);
1527		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1528			fe->attrTime = cpu_to_lets(cpu_time);
1529		memset(&(fe->impIdent), 0, sizeof(regid));
1530		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1531		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1532		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1533		fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1534		fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1535		fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1536		fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1537		crclen = sizeof(struct fileEntry);
1538	}
1539	else
1540	{
1541		memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1542		efe->objectSize = cpu_to_le64(inode->i_size);
1543		efe->logicalBlocksRecorded = cpu_to_le64(
1544			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1545			(inode->i_sb->s_blocksize_bits - 9));
1546
1547		if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1548			(UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1549			 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1550		{
1551			UDF_I_CRTIME(inode) = inode->i_atime;
1552		}
1553		if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1554			(UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1555			 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1556		{
1557			UDF_I_CRTIME(inode) = inode->i_mtime;
1558		}
1559		if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1560			(UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1561			 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1562		{
1563			UDF_I_CRTIME(inode) = inode->i_ctime;
1564		}
1565
1566		if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1567			efe->accessTime = cpu_to_lets(cpu_time);
1568		if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1569			efe->modificationTime = cpu_to_lets(cpu_time);
1570		if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1571			efe->createTime = cpu_to_lets(cpu_time);
1572		if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1573			efe->attrTime = cpu_to_lets(cpu_time);
1574
1575		memset(&(efe->impIdent), 0, sizeof(regid));
1576		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1577		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1578		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1579		efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1580		efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1581		efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1582		efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1583		crclen = sizeof(struct extendedFileEntry);
1584	}
1585	if (UDF_I_STRAT4096(inode))
1586	{
1587		fe->icbTag.strategyType = cpu_to_le16(4096);
1588		fe->icbTag.strategyParameter = cpu_to_le16(1);
1589		fe->icbTag.numEntries = cpu_to_le16(2);
1590	}
1591	else
1592	{
1593		fe->icbTag.strategyType = cpu_to_le16(4);
1594		fe->icbTag.numEntries = cpu_to_le16(1);
1595	}
1596
1597	if (S_ISDIR(inode->i_mode))
1598		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1599	else if (S_ISREG(inode->i_mode))
1600		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1601	else if (S_ISLNK(inode->i_mode))
1602		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1603	else if (S_ISBLK(inode->i_mode))
1604		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1605	else if (S_ISCHR(inode->i_mode))
1606		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1607	else if (S_ISFIFO(inode->i_mode))
1608		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1609	else if (S_ISSOCK(inode->i_mode))
1610		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1611
1612	icbflags =	UDF_I_ALLOCTYPE(inode) |
1613			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1614			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1615			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1616			(le16_to_cpu(fe->icbTag.flags) &
1617				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1618				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1619
1620	fe->icbTag.flags = cpu_to_le16(icbflags);
1621	if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1622		fe->descTag.descVersion = cpu_to_le16(3);
1623	else
1624		fe->descTag.descVersion = cpu_to_le16(2);
1625	fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1626	fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1627	crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1628	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1629	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1630
1631	fe->descTag.tagChecksum = 0;
1632	for (i=0; i<16; i++)
1633		if (i != 4)
1634			fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1635
1636	/* write the data blocks */
1637	mark_buffer_dirty(bh);
1638	if (do_sync)
1639	{
1640		sync_dirty_buffer(bh);
1641		if (buffer_req(bh) && !buffer_uptodate(bh))
1642		{
1643			printk("IO error syncing udf inode [%s:%08lx]\n",
1644				inode->i_sb->s_id, inode->i_ino);
1645			err = -EIO;
1646		}
1647	}
1648	brelse(bh);
1649	return err;
1650}
1651
1652struct inode *
1653udf_iget(struct super_block *sb, kernel_lb_addr ino)
1654{
1655	unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1656	struct inode *inode = iget_locked(sb, block);
1657
1658	if (!inode)
1659		return NULL;
1660
1661	if (inode->i_state & I_NEW) {
1662		memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1663		__udf_read_inode(inode);
1664		unlock_new_inode(inode);
1665	}
1666
1667	if (is_bad_inode(inode))
1668		goto out_iput;
1669
1670	if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1671		udf_debug("block=%d, partition=%d out of range\n",
1672			ino.logicalBlockNum, ino.partitionReferenceNum);
1673		make_bad_inode(inode);
1674		goto out_iput;
1675	}
1676
1677	return inode;
1678
1679 out_iput:
1680	iput(inode);
1681	return NULL;
1682}
1683
1684int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1685	kernel_lb_addr eloc, uint32_t elen, int inc)
1686{
1687	int adsize;
1688	short_ad *sad = NULL;
1689	long_ad *lad = NULL;
1690	struct allocExtDesc *aed;
1691	int8_t etype;
1692	uint8_t *ptr;
1693
1694	if (!epos->bh)
1695		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1696	else
1697		ptr = epos->bh->b_data + epos->offset;
1698
1699	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1700		adsize = sizeof(short_ad);
1701	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1702		adsize = sizeof(long_ad);
1703	else
1704		return -1;
1705
1706	if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
1707	{
1708		char *sptr, *dptr;
1709		struct buffer_head *nbh;
1710		int err, loffset;
1711		kernel_lb_addr obloc = epos->block;
1712
1713		if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1714			obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1715		{
1716			return -1;
1717		}
1718		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1719			epos->block, 0))))
1720		{
1721			return -1;
1722		}
1723		lock_buffer(nbh);
1724		memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1725		set_buffer_uptodate(nbh);
1726		unlock_buffer(nbh);
1727		mark_buffer_dirty_inode(nbh, inode);
1728
1729		aed = (struct allocExtDesc *)(nbh->b_data);
1730		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1731			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1732		if (epos->offset + adsize > inode->i_sb->s_blocksize)
1733		{
1734			loffset = epos->offset;
1735			aed->lengthAllocDescs = cpu_to_le32(adsize);
1736			sptr = ptr - adsize;
1737			dptr = nbh->b_data + sizeof(struct allocExtDesc);
1738			memcpy(dptr, sptr, adsize);
1739			epos->offset = sizeof(struct allocExtDesc) + adsize;
1740		}
1741		else
1742		{
1743			loffset = epos->offset + adsize;
1744			aed->lengthAllocDescs = cpu_to_le32(0);
1745			sptr = ptr;
1746			epos->offset = sizeof(struct allocExtDesc);
1747
1748			if (epos->bh)
1749			{
1750				aed = (struct allocExtDesc *)epos->bh->b_data;
1751				aed->lengthAllocDescs =
1752					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1753			}
1754			else
1755			{
1756				UDF_I_LENALLOC(inode) += adsize;
1757				mark_inode_dirty(inode);
1758			}
1759		}
1760		if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1761			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1762				epos->block.logicalBlockNum, sizeof(tag));
1763		else
1764			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1765				epos->block.logicalBlockNum, sizeof(tag));
1766		switch (UDF_I_ALLOCTYPE(inode))
1767		{
1768			case ICBTAG_FLAG_AD_SHORT:
1769			{
1770				sad = (short_ad *)sptr;
1771				sad->extLength = cpu_to_le32(
1772					EXT_NEXT_EXTENT_ALLOCDECS |
1773					inode->i_sb->s_blocksize);
1774				sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1775				break;
1776			}
1777			case ICBTAG_FLAG_AD_LONG:
1778			{
1779				lad = (long_ad *)sptr;
1780				lad->extLength = cpu_to_le32(
1781					EXT_NEXT_EXTENT_ALLOCDECS |
1782					inode->i_sb->s_blocksize);
1783				lad->extLocation = cpu_to_lelb(epos->block);
1784				memset(lad->impUse, 0x00, sizeof(lad->impUse));
1785				break;
1786			}
1787		}
1788		if (epos->bh)
1789		{
1790			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1791				udf_update_tag(epos->bh->b_data, loffset);
1792			else
1793				udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1794			mark_buffer_dirty_inode(epos->bh, inode);
1795			brelse(epos->bh);
1796		}
1797		else
1798			mark_inode_dirty(inode);
1799		epos->bh = nbh;
1800	}
1801
1802	etype = udf_write_aext(inode, epos, eloc, elen, inc);
1803
1804	if (!epos->bh)
1805	{
1806		UDF_I_LENALLOC(inode) += adsize;
1807		mark_inode_dirty(inode);
1808	}
1809	else
1810	{
1811		aed = (struct allocExtDesc *)epos->bh->b_data;
1812		aed->lengthAllocDescs =
1813			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1814		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1815			udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1816		else
1817			udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1818		mark_buffer_dirty_inode(epos->bh, inode);
1819	}
1820
1821	return etype;
1822}
1823
1824int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1825    kernel_lb_addr eloc, uint32_t elen, int inc)
1826{
1827	int adsize;
1828	uint8_t *ptr;
1829
1830	if (!epos->bh)
1831		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1832	else
1833		ptr = epos->bh->b_data + epos->offset;
1834
1835	switch (UDF_I_ALLOCTYPE(inode))
1836	{
1837		case ICBTAG_FLAG_AD_SHORT:
1838		{
1839			short_ad *sad = (short_ad *)ptr;
1840			sad->extLength = cpu_to_le32(elen);
1841			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1842			adsize = sizeof(short_ad);
1843			break;
1844		}
1845		case ICBTAG_FLAG_AD_LONG:
1846		{
1847			long_ad *lad = (long_ad *)ptr;
1848			lad->extLength = cpu_to_le32(elen);
1849			lad->extLocation = cpu_to_lelb(eloc);
1850			memset(lad->impUse, 0x00, sizeof(lad->impUse));
1851			adsize = sizeof(long_ad);
1852			break;
1853		}
1854		default:
1855			return -1;
1856	}
1857
1858	if (epos->bh)
1859	{
1860		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1861		{
1862			struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1863			udf_update_tag(epos->bh->b_data,
1864				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1865		}
1866		mark_buffer_dirty_inode(epos->bh, inode);
1867	}
1868	else
1869		mark_inode_dirty(inode);
1870
1871	if (inc)
1872		epos->offset += adsize;
1873	return (elen >> 30);
1874}
1875
1876int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1877	kernel_lb_addr *eloc, uint32_t *elen, int inc)
1878{
1879	int8_t etype;
1880
1881	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1882		(EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1883	{
1884		epos->block = *eloc;
1885		epos->offset = sizeof(struct allocExtDesc);
1886		brelse(epos->bh);
1887		if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
1888		{
1889			udf_debug("reading block %d failed!\n",
1890				udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1891			return -1;
1892		}
1893	}
1894
1895	return etype;
1896}
1897
1898int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1899	kernel_lb_addr *eloc, uint32_t *elen, int inc)
1900{
1901	int alen;
1902	int8_t etype;
1903	uint8_t *ptr;
1904
1905	if (!epos->bh)
1906	{
1907		if (!epos->offset)
1908			epos->offset = udf_file_entry_alloc_offset(inode);
1909		ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1910		alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1911	}
1912	else
1913	{
1914		if (!epos->offset)
1915			epos->offset = sizeof(struct allocExtDesc);
1916		ptr = epos->bh->b_data + epos->offset;
1917		alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1918	}
1919
1920	switch (UDF_I_ALLOCTYPE(inode))
1921	{
1922		case ICBTAG_FLAG_AD_SHORT:
1923		{
1924			short_ad *sad;
1925
1926			if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1927				return -1;
1928
1929			etype = le32_to_cpu(sad->extLength) >> 30;
1930			eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1931			eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1932			*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1933			break;
1934		}
1935		case ICBTAG_FLAG_AD_LONG:
1936		{
1937			long_ad *lad;
1938
1939			if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
1940				return -1;
1941
1942			etype = le32_to_cpu(lad->extLength) >> 30;
1943			*eloc = lelb_to_cpu(lad->extLocation);
1944			*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1945			break;
1946		}
1947		default:
1948		{
1949			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1950			return -1;
1951		}
1952	}
1953
1954	return etype;
1955}
1956
1957static int8_t
1958udf_insert_aext(struct inode *inode, struct extent_position epos,
1959		kernel_lb_addr neloc, uint32_t nelen)
1960{
1961	kernel_lb_addr oeloc;
1962	uint32_t oelen;
1963	int8_t etype;
1964
1965	if (epos.bh)
1966		get_bh(epos.bh);
1967
1968	while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
1969	{
1970		udf_write_aext(inode, &epos, neloc, nelen, 1);
1971
1972		neloc = oeloc;
1973		nelen = (etype << 30) | oelen;
1974	}
1975	udf_add_aext(inode, &epos, neloc, nelen, 1);
1976	brelse(epos.bh);
1977	return (nelen >> 30);
1978}
1979
1980int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1981	kernel_lb_addr eloc, uint32_t elen)
1982{
1983	struct extent_position oepos;
1984	int adsize;
1985	int8_t etype;
1986	struct allocExtDesc *aed;
1987
1988	if (epos.bh)
1989	{
1990		get_bh(epos.bh);
1991		get_bh(epos.bh);
1992	}
1993
1994	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1995		adsize = sizeof(short_ad);
1996	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1997		adsize = sizeof(long_ad);
1998	else
1999		adsize = 0;
2000
2001	oepos = epos;
2002	if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2003		return -1;
2004
2005	while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
2006	{
2007		udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
2008		if (oepos.bh != epos.bh)
2009		{
2010			oepos.block = epos.block;
2011			brelse(oepos.bh);
2012			get_bh(epos.bh);
2013			oepos.bh = epos.bh;
2014			oepos.offset = epos.offset - adsize;
2015		}
2016	}
2017	memset(&eloc, 0x00, sizeof(kernel_lb_addr));
2018	elen = 0;
2019
2020	if (epos.bh != oepos.bh)
2021	{
2022		udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
2023		udf_write_aext(inode, &oepos, eloc, elen, 1);
2024		udf_write_aext(inode, &oepos, eloc, elen, 1);
2025		if (!oepos.bh)
2026		{
2027			UDF_I_LENALLOC(inode) -= (adsize * 2);
2028			mark_inode_dirty(inode);
2029		}
2030		else
2031		{
2032			aed = (struct allocExtDesc *)oepos.bh->b_data;
2033			aed->lengthAllocDescs =
2034				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2035			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2036				udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
2037			else
2038				udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2039			mark_buffer_dirty_inode(oepos.bh, inode);
2040		}
2041	}
2042	else
2043	{
2044		udf_write_aext(inode, &oepos, eloc, elen, 1);
2045		if (!oepos.bh)
2046		{
2047			UDF_I_LENALLOC(inode) -= adsize;
2048			mark_inode_dirty(inode);
2049		}
2050		else
2051		{
2052			aed = (struct allocExtDesc *)oepos.bh->b_data;
2053			aed->lengthAllocDescs =
2054				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2055			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2056				udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
2057			else
2058				udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2059			mark_buffer_dirty_inode(oepos.bh, inode);
2060		}
2061	}
2062
2063	brelse(epos.bh);
2064	brelse(oepos.bh);
2065	return (elen >> 30);
2066}
2067
2068int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
2069	kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
2070{
2071	loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
2072	int8_t etype;
2073
2074	if (block < 0)
2075	{
2076		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2077		return -1;
2078	}
2079
2080	pos->offset = 0;
2081	pos->block = UDF_I_LOCATION(inode);
2082	pos->bh = NULL;
2083	*elen = 0;
2084
2085	do
2086	{
2087		if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
2088		{
2089			*offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2090			UDF_I_LENEXTENTS(inode) = lbcount;
2091			return -1;
2092		}
2093		lbcount += *elen;
2094	} while (lbcount <= bcount);
2095
2096	*offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
2097
2098	return etype;
2099}
2100
2101long udf_block_map(struct inode *inode, sector_t block)
2102{
2103	kernel_lb_addr eloc;
2104	uint32_t elen;
2105	sector_t offset;
2106	struct extent_position epos = { NULL, 0, { 0, 0}};
2107	int ret;
2108
2109	lock_kernel();
2110
2111	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
2112		ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2113	else
2114		ret = 0;
2115
2116	unlock_kernel();
2117	brelse(epos.bh);
2118
2119	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2120		return udf_fixed_to_variable(ret);
2121	else
2122		return ret;
2123}
2124