do_balan.c revision c3a9c2109f84882b9b3178f6b1838d550d3df0ec
1/* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5/* Now we have all buffers that must be used in balancing of the tree */ 6/* Further calculations can not cause schedule(), and thus the buffer */ 7/* tree will be stable until the balancing will be finished */ 8/* balance the tree according to the analysis made before, */ 9/* and using buffers obtained after all above. */ 10 11/** 12 ** balance_leaf_when_delete 13 ** balance_leaf 14 ** do_balance 15 ** 16 **/ 17 18#include <asm/uaccess.h> 19#include <linux/time.h> 20#include <linux/reiserfs_fs.h> 21#include <linux/buffer_head.h> 22#include <linux/kernel.h> 23 24#ifdef CONFIG_REISERFS_CHECK 25 26struct tree_balance *cur_tb = NULL; /* detects whether more than one 27 copy of tb exists as a means 28 of checking whether schedule 29 is interrupting do_balance */ 30#endif 31 32inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, 33 struct buffer_head *bh, int flag) 34{ 35 journal_mark_dirty(tb->transaction_handle, 36 tb->transaction_handle->t_super, bh); 37} 38 39#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty 40#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty 41 42/* summary: 43 if deleting something ( tb->insert_size[0] < 0 ) 44 return(balance_leaf_when_delete()); (flag d handled here) 45 else 46 if lnum is larger than 0 we put items into the left node 47 if rnum is larger than 0 we put items into the right node 48 if snum1 is larger than 0 we put items into the new node s1 49 if snum2 is larger than 0 we put items into the new node s2 50Note that all *num* count new items being created. 51 52It would be easier to read balance_leaf() if each of these summary 53lines was a separate procedure rather than being inlined. I think 54that there are many passages here and in balance_leaf_when_delete() in 55which two calls to one procedure can replace two passages, and it 56might save cache space and improve software maintenance costs to do so. 57 58Vladimir made the perceptive comment that we should offload most of 59the decision making in this function into fix_nodes/check_balance, and 60then create some sort of structure in tb that says what actions should 61be performed by do_balance. 62 63-Hans */ 64 65/* Balance leaf node in case of delete or cut: insert_size[0] < 0 66 * 67 * lnum, rnum can have values >= -1 68 * -1 means that the neighbor must be joined with S 69 * 0 means that nothing should be done with the neighbor 70 * >0 means to shift entirely or partly the specified number of items to the neighbor 71 */ 72static int balance_leaf_when_delete(struct tree_balance *tb, int flag) 73{ 74 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 75 int item_pos = PATH_LAST_POSITION(tb->tb_path); 76 int pos_in_item = tb->tb_path->pos_in_item; 77 struct buffer_info bi; 78 int n; 79 struct item_head *ih; 80 81 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1, 82 "vs- 12000: level: wrong FR %z", tb->FR[0]); 83 RFALSE(tb->blknum[0] > 1, 84 "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]); 85 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0), 86 "PAP-12010: tree can not be empty"); 87 88 ih = B_N_PITEM_HEAD(tbS0, item_pos); 89 90 /* Delete or truncate the item */ 91 92 switch (flag) { 93 case M_DELETE: /* delete item in S[0] */ 94 95 RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0], 96 "vs-12013: mode Delete, insert size %d, ih to be deleted %h", 97 -tb->insert_size[0], ih); 98 99 bi.tb = tb; 100 bi.bi_bh = tbS0; 101 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 102 bi.bi_position = PATH_H_POSITION(tb->tb_path, 1); 103 leaf_delete_items(&bi, 0, item_pos, 1, -1); 104 105 if (!item_pos && tb->CFL[0]) { 106 if (B_NR_ITEMS(tbS0)) { 107 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 108 0); 109 } else { 110 if (!PATH_H_POSITION(tb->tb_path, 1)) 111 replace_key(tb, tb->CFL[0], tb->lkey[0], 112 PATH_H_PPARENT(tb->tb_path, 113 0), 0); 114 } 115 } 116 117 RFALSE(!item_pos && !tb->CFL[0], 118 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], 119 tb->L[0]); 120 121 break; 122 123 case M_CUT:{ /* cut item in S[0] */ 124 bi.tb = tb; 125 bi.bi_bh = tbS0; 126 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 127 bi.bi_position = PATH_H_POSITION(tb->tb_path, 1); 128 if (is_direntry_le_ih(ih)) { 129 130 /* UFS unlink semantics are such that you can only delete one directory entry at a time. */ 131 /* when we cut a directory tb->insert_size[0] means number of entries to be cut (always 1) */ 132 tb->insert_size[0] = -1; 133 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 134 -tb->insert_size[0]); 135 136 RFALSE(!item_pos && !pos_in_item && !tb->CFL[0], 137 "PAP-12030: can not change delimiting key. CFL[0]=%p", 138 tb->CFL[0]); 139 140 if (!item_pos && !pos_in_item && tb->CFL[0]) { 141 replace_key(tb, tb->CFL[0], tb->lkey[0], 142 tbS0, 0); 143 } 144 } else { 145 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 146 -tb->insert_size[0]); 147 148 RFALSE(!ih_item_len(ih), 149 "PAP-12035: cut must leave non-zero dynamic length of item"); 150 } 151 break; 152 } 153 154 default: 155 print_cur_tb("12040"); 156 reiserfs_panic(tb->tb_sb, "PAP-12040", 157 "unexpected mode: %s(%d)", 158 (flag == 159 M_PASTE) ? "PASTE" : ((flag == 160 M_INSERT) ? "INSERT" : 161 "UNKNOWN"), flag); 162 } 163 164 /* the rule is that no shifting occurs unless by shifting a node can be freed */ 165 n = B_NR_ITEMS(tbS0); 166 if (tb->lnum[0]) { /* L[0] takes part in balancing */ 167 if (tb->lnum[0] == -1) { /* L[0] must be joined with S[0] */ 168 if (tb->rnum[0] == -1) { /* R[0] must be also joined with S[0] */ 169 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) { 170 /* all contents of all the 3 buffers will be in L[0] */ 171 if (PATH_H_POSITION(tb->tb_path, 1) == 0 172 && 1 < B_NR_ITEMS(tb->FR[0])) 173 replace_key(tb, tb->CFL[0], 174 tb->lkey[0], 175 tb->FR[0], 1); 176 177 leaf_move_items(LEAF_FROM_S_TO_L, tb, n, 178 -1, NULL); 179 leaf_move_items(LEAF_FROM_R_TO_L, tb, 180 B_NR_ITEMS(tb->R[0]), 181 -1, NULL); 182 183 reiserfs_invalidate_buffer(tb, tbS0); 184 reiserfs_invalidate_buffer(tb, 185 tb->R[0]); 186 187 return 0; 188 } 189 /* all contents of all the 3 buffers will be in R[0] */ 190 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, 191 NULL); 192 leaf_move_items(LEAF_FROM_L_TO_R, tb, 193 B_NR_ITEMS(tb->L[0]), -1, NULL); 194 195 /* right_delimiting_key is correct in R[0] */ 196 replace_key(tb, tb->CFR[0], tb->rkey[0], 197 tb->R[0], 0); 198 199 reiserfs_invalidate_buffer(tb, tbS0); 200 reiserfs_invalidate_buffer(tb, tb->L[0]); 201 202 return -1; 203 } 204 205 RFALSE(tb->rnum[0] != 0, 206 "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]); 207 /* all contents of L[0] and S[0] will be in L[0] */ 208 leaf_shift_left(tb, n, -1); 209 210 reiserfs_invalidate_buffer(tb, tbS0); 211 212 return 0; 213 } 214 /* a part of contents of S[0] will be in L[0] and the rest part of S[0] will be in R[0] */ 215 216 RFALSE((tb->lnum[0] + tb->rnum[0] < n) || 217 (tb->lnum[0] + tb->rnum[0] > n + 1), 218 "PAP-12050: rnum(%d) and lnum(%d) and item number(%d) in S[0] are not consistent", 219 tb->rnum[0], tb->lnum[0], n); 220 RFALSE((tb->lnum[0] + tb->rnum[0] == n) && 221 (tb->lbytes != -1 || tb->rbytes != -1), 222 "PAP-12055: bad rbytes (%d)/lbytes (%d) parameters when items are not split", 223 tb->rbytes, tb->lbytes); 224 RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) && 225 (tb->lbytes < 1 || tb->rbytes != -1), 226 "PAP-12060: bad rbytes (%d)/lbytes (%d) parameters when items are split", 227 tb->rbytes, tb->lbytes); 228 229 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 230 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 231 232 reiserfs_invalidate_buffer(tb, tbS0); 233 234 return 0; 235 } 236 237 if (tb->rnum[0] == -1) { 238 /* all contents of R[0] and S[0] will be in R[0] */ 239 leaf_shift_right(tb, n, -1); 240 reiserfs_invalidate_buffer(tb, tbS0); 241 return 0; 242 } 243 244 RFALSE(tb->rnum[0], 245 "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]); 246 return 0; 247} 248 249static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item header of inserted item (this is on little endian) */ 250 const char *body, /* body of inserted item or bytes to paste */ 251 int flag, /* i - insert, d - delete, c - cut, p - paste 252 (see comment to do_balance) */ 253 struct item_head *insert_key, /* in our processing of one level we sometimes determine what 254 must be inserted into the next higher level. This insertion 255 consists of a key or two keys and their corresponding 256 pointers */ 257 struct buffer_head **insert_ptr /* inserted node-ptrs for the next level */ 258 ) 259{ 260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 261 int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0] 262 of the affected item */ 263 struct buffer_info bi; 264 struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */ 265 int snum[2]; /* number of items that will be placed 266 into S_new (includes partially shifted 267 items) */ 268 int sbytes[2]; /* if an item is partially shifted into S_new then 269 if it is a directory item 270 it is the number of entries from the item that are shifted into S_new 271 else 272 it is the number of bytes from the item that are shifted into S_new 273 */ 274 int n, i; 275 int ret_val; 276 int pos_in_item; 277 int zeros_num; 278 279 PROC_INFO_INC(tb->tb_sb, balance_at[0]); 280 281 /* Make balance in case insert_size[0] < 0 */ 282 if (tb->insert_size[0] < 0) 283 return balance_leaf_when_delete(tb, flag); 284 285 zeros_num = 0; 286 if (flag == M_INSERT && !body) 287 zeros_num = ih_item_len(ih); 288 289 pos_in_item = tb->tb_path->pos_in_item; 290 /* for indirect item pos_in_item is measured in unformatted node 291 pointers. Recalculate to bytes */ 292 if (flag != M_INSERT 293 && is_indirect_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) 294 pos_in_item *= UNFM_P_SIZE; 295 296 if (tb->lnum[0] > 0) { 297 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 298 if (item_pos < tb->lnum[0]) { 299 /* new item or it part falls to L[0], shift it too */ 300 n = B_NR_ITEMS(tb->L[0]); 301 302 switch (flag) { 303 case M_INSERT: /* insert item into L[0] */ 304 305 if (item_pos == tb->lnum[0] - 1 306 && tb->lbytes != -1) { 307 /* part of new item falls into L[0] */ 308 int new_item_len; 309 int version; 310 311 ret_val = 312 leaf_shift_left(tb, tb->lnum[0] - 1, 313 -1); 314 315 /* Calculate item length to insert to S[0] */ 316 new_item_len = 317 ih_item_len(ih) - tb->lbytes; 318 /* Calculate and check item length to insert to L[0] */ 319 put_ih_item_len(ih, 320 ih_item_len(ih) - 321 new_item_len); 322 323 RFALSE(ih_item_len(ih) <= 0, 324 "PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d", 325 ih_item_len(ih)); 326 327 /* Insert new item into L[0] */ 328 bi.tb = tb; 329 bi.bi_bh = tb->L[0]; 330 bi.bi_parent = tb->FL[0]; 331 bi.bi_position = 332 get_left_neighbor_position(tb, 0); 333 leaf_insert_into_buf(&bi, 334 n + item_pos - 335 ret_val, ih, body, 336 zeros_num > 337 ih_item_len(ih) ? 338 ih_item_len(ih) : 339 zeros_num); 340 341 version = ih_version(ih); 342 343 /* Calculate key component, item length and body to insert into S[0] */ 344 set_le_ih_k_offset(ih, 345 le_ih_k_offset(ih) + 346 (tb-> 347 lbytes << 348 (is_indirect_le_ih 349 (ih) ? tb->tb_sb-> 350 s_blocksize_bits - 351 UNFM_P_SHIFT : 352 0))); 353 354 put_ih_item_len(ih, new_item_len); 355 if (tb->lbytes > zeros_num) { 356 body += 357 (tb->lbytes - zeros_num); 358 zeros_num = 0; 359 } else 360 zeros_num -= tb->lbytes; 361 362 RFALSE(ih_item_len(ih) <= 0, 363 "PAP-12085: there is nothing to insert into S[0]: ih_item_len=%d", 364 ih_item_len(ih)); 365 } else { 366 /* new item in whole falls into L[0] */ 367 /* Shift lnum[0]-1 items to L[0] */ 368 ret_val = 369 leaf_shift_left(tb, tb->lnum[0] - 1, 370 tb->lbytes); 371 /* Insert new item into L[0] */ 372 bi.tb = tb; 373 bi.bi_bh = tb->L[0]; 374 bi.bi_parent = tb->FL[0]; 375 bi.bi_position = 376 get_left_neighbor_position(tb, 0); 377 leaf_insert_into_buf(&bi, 378 n + item_pos - 379 ret_val, ih, body, 380 zeros_num); 381 tb->insert_size[0] = 0; 382 zeros_num = 0; 383 } 384 break; 385 386 case M_PASTE: /* append item in L[0] */ 387 388 if (item_pos == tb->lnum[0] - 1 389 && tb->lbytes != -1) { 390 /* we must shift the part of the appended item */ 391 if (is_direntry_le_ih 392 (B_N_PITEM_HEAD(tbS0, item_pos))) { 393 394 RFALSE(zeros_num, 395 "PAP-12090: invalid parameter in case of a directory"); 396 /* directory item */ 397 if (tb->lbytes > pos_in_item) { 398 /* new directory entry falls into L[0] */ 399 struct item_head 400 *pasted; 401 int l_pos_in_item = 402 pos_in_item; 403 404 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */ 405 ret_val = 406 leaf_shift_left(tb, 407 tb-> 408 lnum 409 [0], 410 tb-> 411 lbytes 412 - 413 1); 414 if (ret_val 415 && !item_pos) { 416 pasted = 417 B_N_PITEM_HEAD 418 (tb->L[0], 419 B_NR_ITEMS 420 (tb-> 421 L[0]) - 422 1); 423 l_pos_in_item += 424 I_ENTRY_COUNT 425 (pasted) - 426 (tb-> 427 lbytes - 428 1); 429 } 430 431 /* Append given directory entry to directory item */ 432 bi.tb = tb; 433 bi.bi_bh = tb->L[0]; 434 bi.bi_parent = 435 tb->FL[0]; 436 bi.bi_position = 437 get_left_neighbor_position 438 (tb, 0); 439 leaf_paste_in_buffer 440 (&bi, 441 n + item_pos - 442 ret_val, 443 l_pos_in_item, 444 tb->insert_size[0], 445 body, zeros_num); 446 447 /* previous string prepared space for pasting new entry, following string pastes this entry */ 448 449 /* when we have merge directory item, pos_in_item has been changed too */ 450 451 /* paste new directory entry. 1 is entry number */ 452 leaf_paste_entries(&bi, 453 n + 454 item_pos 455 - 456 ret_val, 457 l_pos_in_item, 458 1, 459 (struct 460 reiserfs_de_head 461 *) 462 body, 463 body 464 + 465 DEH_SIZE, 466 tb-> 467 insert_size 468 [0] 469 ); 470 tb->insert_size[0] = 0; 471 } else { 472 /* new directory item doesn't fall into L[0] */ 473 /* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */ 474 leaf_shift_left(tb, 475 tb-> 476 lnum[0], 477 tb-> 478 lbytes); 479 } 480 /* Calculate new position to append in item body */ 481 pos_in_item -= tb->lbytes; 482 } else { 483 /* regular object */ 484 RFALSE(tb->lbytes <= 0, 485 "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", 486 tb->lbytes); 487 RFALSE(pos_in_item != 488 ih_item_len 489 (B_N_PITEM_HEAD 490 (tbS0, item_pos)), 491 "PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d", 492 ih_item_len 493 (B_N_PITEM_HEAD 494 (tbS0, item_pos)), 495 pos_in_item); 496 497 if (tb->lbytes >= pos_in_item) { 498 /* appended item will be in L[0] in whole */ 499 int l_n; 500 501 /* this bytes number must be appended to the last item of L[h] */ 502 l_n = 503 tb->lbytes - 504 pos_in_item; 505 506 /* Calculate new insert_size[0] */ 507 tb->insert_size[0] -= 508 l_n; 509 510 RFALSE(tb-> 511 insert_size[0] <= 512 0, 513 "PAP-12105: there is nothing to paste into L[0]. insert_size=%d", 514 tb-> 515 insert_size[0]); 516 ret_val = 517 leaf_shift_left(tb, 518 tb-> 519 lnum 520 [0], 521 ih_item_len 522 (B_N_PITEM_HEAD 523 (tbS0, 524 item_pos))); 525 /* Append to body of item in L[0] */ 526 bi.tb = tb; 527 bi.bi_bh = tb->L[0]; 528 bi.bi_parent = 529 tb->FL[0]; 530 bi.bi_position = 531 get_left_neighbor_position 532 (tb, 0); 533 leaf_paste_in_buffer 534 (&bi, 535 n + item_pos - 536 ret_val, 537 ih_item_len 538 (B_N_PITEM_HEAD 539 (tb->L[0], 540 n + item_pos - 541 ret_val)), l_n, 542 body, 543 zeros_num > 544 l_n ? l_n : 545 zeros_num); 546 /* 0-th item in S0 can be only of DIRECT type when l_n != 0 */ 547 { 548 int version; 549 int temp_l = 550 l_n; 551 552 RFALSE 553 (ih_item_len 554 (B_N_PITEM_HEAD 555 (tbS0, 556 0)), 557 "PAP-12106: item length must be 0"); 558 RFALSE 559 (comp_short_le_keys 560 (B_N_PKEY 561 (tbS0, 0), 562 B_N_PKEY 563 (tb->L[0], 564 n + 565 item_pos 566 - 567 ret_val)), 568 "PAP-12107: items must be of the same file"); 569 if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) { 570 temp_l = 571 l_n 572 << 573 (tb-> 574 tb_sb-> 575 s_blocksize_bits 576 - 577 UNFM_P_SHIFT); 578 } 579 /* update key of first item in S0 */ 580 version = 581 ih_version 582 (B_N_PITEM_HEAD 583 (tbS0, 0)); 584 set_le_key_k_offset 585 (version, 586 B_N_PKEY 587 (tbS0, 0), 588 le_key_k_offset 589 (version, 590 B_N_PKEY 591 (tbS0, 592 0)) + 593 temp_l); 594 /* update left delimiting key */ 595 set_le_key_k_offset 596 (version, 597 B_N_PDELIM_KEY 598 (tb-> 599 CFL[0], 600 tb-> 601 lkey[0]), 602 le_key_k_offset 603 (version, 604 B_N_PDELIM_KEY 605 (tb-> 606 CFL[0], 607 tb-> 608 lkey[0])) 609 + temp_l); 610 } 611 612 /* Calculate new body, position in item and insert_size[0] */ 613 if (l_n > zeros_num) { 614 body += 615 (l_n - 616 zeros_num); 617 zeros_num = 0; 618 } else 619 zeros_num -= 620 l_n; 621 pos_in_item = 0; 622 623 RFALSE 624 (comp_short_le_keys 625 (B_N_PKEY(tbS0, 0), 626 B_N_PKEY(tb->L[0], 627 B_NR_ITEMS 628 (tb-> 629 L[0]) - 630 1)) 631 || 632 !op_is_left_mergeable 633 (B_N_PKEY(tbS0, 0), 634 tbS0->b_size) 635 || 636 !op_is_left_mergeable 637 (B_N_PDELIM_KEY 638 (tb->CFL[0], 639 tb->lkey[0]), 640 tbS0->b_size), 641 "PAP-12120: item must be merge-able with left neighboring item"); 642 } else { /* only part of the appended item will be in L[0] */ 643 644 /* Calculate position in item for append in S[0] */ 645 pos_in_item -= 646 tb->lbytes; 647 648 RFALSE(pos_in_item <= 0, 649 "PAP-12125: no place for paste. pos_in_item=%d", 650 pos_in_item); 651 652 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 653 leaf_shift_left(tb, 654 tb-> 655 lnum[0], 656 tb-> 657 lbytes); 658 } 659 } 660 } else { /* appended item will be in L[0] in whole */ 661 662 struct item_head *pasted; 663 664 if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */ 665 /* then increment pos_in_item by the size of the last item in L[0] */ 666 pasted = 667 B_N_PITEM_HEAD(tb->L[0], 668 n - 1); 669 if (is_direntry_le_ih(pasted)) 670 pos_in_item += 671 ih_entry_count 672 (pasted); 673 else 674 pos_in_item += 675 ih_item_len(pasted); 676 } 677 678 /* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */ 679 ret_val = 680 leaf_shift_left(tb, tb->lnum[0], 681 tb->lbytes); 682 /* Append to body of item in L[0] */ 683 bi.tb = tb; 684 bi.bi_bh = tb->L[0]; 685 bi.bi_parent = tb->FL[0]; 686 bi.bi_position = 687 get_left_neighbor_position(tb, 0); 688 leaf_paste_in_buffer(&bi, 689 n + item_pos - 690 ret_val, 691 pos_in_item, 692 tb->insert_size[0], 693 body, zeros_num); 694 695 /* if appended item is directory, paste entry */ 696 pasted = 697 B_N_PITEM_HEAD(tb->L[0], 698 n + item_pos - 699 ret_val); 700 if (is_direntry_le_ih(pasted)) 701 leaf_paste_entries(&bi, 702 n + 703 item_pos - 704 ret_val, 705 pos_in_item, 706 1, 707 (struct 708 reiserfs_de_head 709 *)body, 710 body + 711 DEH_SIZE, 712 tb-> 713 insert_size 714 [0] 715 ); 716 /* if appended item is indirect item, put unformatted node into un list */ 717 if (is_indirect_le_ih(pasted)) 718 set_ih_free_space(pasted, 0); 719 tb->insert_size[0] = 0; 720 zeros_num = 0; 721 } 722 break; 723 default: /* cases d and t */ 724 reiserfs_panic(tb->tb_sb, "PAP-12130", 725 "lnum > 0: unexpected mode: " 726 " %s(%d)", 727 (flag == 728 M_DELETE) ? "DELETE" : ((flag == 729 M_CUT) 730 ? "CUT" 731 : 732 "UNKNOWN"), 733 flag); 734 } 735 } else { 736 /* new item doesn't fall into L[0] */ 737 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 738 } 739 } 740 741 /* tb->lnum[0] > 0 */ 742 /* Calculate new item position */ 743 item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0)); 744 745 if (tb->rnum[0] > 0) { 746 /* shift rnum[0] items from S[0] to the right neighbor R[0] */ 747 n = B_NR_ITEMS(tbS0); 748 switch (flag) { 749 750 case M_INSERT: /* insert item */ 751 if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */ 752 if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */ 753 loff_t old_key_comp, old_len, 754 r_zeros_number; 755 const char *r_body; 756 int version; 757 loff_t offset; 758 759 leaf_shift_right(tb, tb->rnum[0] - 1, 760 -1); 761 762 version = ih_version(ih); 763 /* Remember key component and item length */ 764 old_key_comp = le_ih_k_offset(ih); 765 old_len = ih_item_len(ih); 766 767 /* Calculate key component and item length to insert into R[0] */ 768 offset = 769 le_ih_k_offset(ih) + 770 ((old_len - 771 tb-> 772 rbytes) << (is_indirect_le_ih(ih) 773 ? tb->tb_sb-> 774 s_blocksize_bits - 775 UNFM_P_SHIFT : 0)); 776 set_le_ih_k_offset(ih, offset); 777 put_ih_item_len(ih, tb->rbytes); 778 /* Insert part of the item into R[0] */ 779 bi.tb = tb; 780 bi.bi_bh = tb->R[0]; 781 bi.bi_parent = tb->FR[0]; 782 bi.bi_position = 783 get_right_neighbor_position(tb, 0); 784 if ((old_len - tb->rbytes) > zeros_num) { 785 r_zeros_number = 0; 786 r_body = 787 body + (old_len - 788 tb->rbytes) - 789 zeros_num; 790 } else { 791 r_body = body; 792 r_zeros_number = 793 zeros_num - (old_len - 794 tb->rbytes); 795 zeros_num -= r_zeros_number; 796 } 797 798 leaf_insert_into_buf(&bi, 0, ih, r_body, 799 r_zeros_number); 800 801 /* Replace right delimiting key by first key in R[0] */ 802 replace_key(tb, tb->CFR[0], tb->rkey[0], 803 tb->R[0], 0); 804 805 /* Calculate key component and item length to insert into S[0] */ 806 set_le_ih_k_offset(ih, old_key_comp); 807 put_ih_item_len(ih, 808 old_len - tb->rbytes); 809 810 tb->insert_size[0] -= tb->rbytes; 811 812 } else { /* whole new item falls into R[0] */ 813 814 /* Shift rnum[0]-1 items to R[0] */ 815 ret_val = 816 leaf_shift_right(tb, 817 tb->rnum[0] - 1, 818 tb->rbytes); 819 /* Insert new item into R[0] */ 820 bi.tb = tb; 821 bi.bi_bh = tb->R[0]; 822 bi.bi_parent = tb->FR[0]; 823 bi.bi_position = 824 get_right_neighbor_position(tb, 0); 825 leaf_insert_into_buf(&bi, 826 item_pos - n + 827 tb->rnum[0] - 1, 828 ih, body, 829 zeros_num); 830 831 if (item_pos - n + tb->rnum[0] - 1 == 0) { 832 replace_key(tb, tb->CFR[0], 833 tb->rkey[0], 834 tb->R[0], 0); 835 836 } 837 zeros_num = tb->insert_size[0] = 0; 838 } 839 } else { /* new item or part of it doesn't fall into R[0] */ 840 841 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 842 } 843 break; 844 845 case M_PASTE: /* append item */ 846 847 if (n - tb->rnum[0] <= item_pos) { /* pasted item or part of it falls to R[0] */ 848 if (item_pos == n - tb->rnum[0] && tb->rbytes != -1) { /* we must shift the part of the appended item */ 849 if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) { /* we append to directory item */ 850 int entry_count; 851 852 RFALSE(zeros_num, 853 "PAP-12145: invalid parameter in case of a directory"); 854 entry_count = 855 I_ENTRY_COUNT(B_N_PITEM_HEAD 856 (tbS0, 857 item_pos)); 858 if (entry_count - tb->rbytes < 859 pos_in_item) 860 /* new directory entry falls into R[0] */ 861 { 862 int paste_entry_position; 863 864 RFALSE(tb->rbytes - 1 >= 865 entry_count 866 || !tb-> 867 insert_size[0], 868 "PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d", 869 tb->rbytes, 870 entry_count); 871 /* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */ 872 leaf_shift_right(tb, 873 tb-> 874 rnum 875 [0], 876 tb-> 877 rbytes 878 - 1); 879 /* Paste given directory entry to directory item */ 880 paste_entry_position = 881 pos_in_item - 882 entry_count + 883 tb->rbytes - 1; 884 bi.tb = tb; 885 bi.bi_bh = tb->R[0]; 886 bi.bi_parent = 887 tb->FR[0]; 888 bi.bi_position = 889 get_right_neighbor_position 890 (tb, 0); 891 leaf_paste_in_buffer 892 (&bi, 0, 893 paste_entry_position, 894 tb->insert_size[0], 895 body, zeros_num); 896 /* paste entry */ 897 leaf_paste_entries(&bi, 898 0, 899 paste_entry_position, 900 1, 901 (struct 902 reiserfs_de_head 903 *) 904 body, 905 body 906 + 907 DEH_SIZE, 908 tb-> 909 insert_size 910 [0] 911 ); 912 913 if (paste_entry_position 914 == 0) { 915 /* change delimiting keys */ 916 replace_key(tb, 917 tb-> 918 CFR 919 [0], 920 tb-> 921 rkey 922 [0], 923 tb-> 924 R 925 [0], 926 0); 927 } 928 929 tb->insert_size[0] = 0; 930 pos_in_item++; 931 } else { /* new directory entry doesn't fall into R[0] */ 932 933 leaf_shift_right(tb, 934 tb-> 935 rnum 936 [0], 937 tb-> 938 rbytes); 939 } 940 } else { /* regular object */ 941 942 int n_shift, n_rem, 943 r_zeros_number; 944 const char *r_body; 945 946 /* Calculate number of bytes which must be shifted from appended item */ 947 if ((n_shift = 948 tb->rbytes - 949 tb->insert_size[0]) < 0) 950 n_shift = 0; 951 952 RFALSE(pos_in_item != 953 ih_item_len 954 (B_N_PITEM_HEAD 955 (tbS0, item_pos)), 956 "PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d", 957 pos_in_item, 958 ih_item_len 959 (B_N_PITEM_HEAD 960 (tbS0, item_pos))); 961 962 leaf_shift_right(tb, 963 tb->rnum[0], 964 n_shift); 965 /* Calculate number of bytes which must remain in body after appending to R[0] */ 966 if ((n_rem = 967 tb->insert_size[0] - 968 tb->rbytes) < 0) 969 n_rem = 0; 970 971 { 972 int version; 973 unsigned long temp_rem = 974 n_rem; 975 976 version = 977 ih_version 978 (B_N_PITEM_HEAD 979 (tb->R[0], 0)); 980 if (is_indirect_le_key 981 (version, 982 B_N_PKEY(tb->R[0], 983 0))) { 984 temp_rem = 985 n_rem << 986 (tb->tb_sb-> 987 s_blocksize_bits 988 - 989 UNFM_P_SHIFT); 990 } 991 set_le_key_k_offset 992 (version, 993 B_N_PKEY(tb->R[0], 994 0), 995 le_key_k_offset 996 (version, 997 B_N_PKEY(tb->R[0], 998 0)) + 999 temp_rem); 1000 set_le_key_k_offset 1001 (version, 1002 B_N_PDELIM_KEY(tb-> 1003 CFR 1004 [0], 1005 tb-> 1006 rkey 1007 [0]), 1008 le_key_k_offset 1009 (version, 1010 B_N_PDELIM_KEY 1011 (tb->CFR[0], 1012 tb->rkey[0])) + 1013 temp_rem); 1014 } 1015/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem; 1016 k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/ 1017 do_balance_mark_internal_dirty 1018 (tb, tb->CFR[0], 0); 1019 1020 /* Append part of body into R[0] */ 1021 bi.tb = tb; 1022 bi.bi_bh = tb->R[0]; 1023 bi.bi_parent = tb->FR[0]; 1024 bi.bi_position = 1025 get_right_neighbor_position 1026 (tb, 0); 1027 if (n_rem > zeros_num) { 1028 r_zeros_number = 0; 1029 r_body = 1030 body + n_rem - 1031 zeros_num; 1032 } else { 1033 r_body = body; 1034 r_zeros_number = 1035 zeros_num - n_rem; 1036 zeros_num -= 1037 r_zeros_number; 1038 } 1039 1040 leaf_paste_in_buffer(&bi, 0, 1041 n_shift, 1042 tb-> 1043 insert_size 1044 [0] - 1045 n_rem, 1046 r_body, 1047 r_zeros_number); 1048 1049 if (is_indirect_le_ih 1050 (B_N_PITEM_HEAD 1051 (tb->R[0], 0))) { 1052#if 0 1053 RFALSE(n_rem, 1054 "PAP-12160: paste more than one unformatted node pointer"); 1055#endif 1056 set_ih_free_space 1057 (B_N_PITEM_HEAD 1058 (tb->R[0], 0), 0); 1059 } 1060 tb->insert_size[0] = n_rem; 1061 if (!n_rem) 1062 pos_in_item++; 1063 } 1064 } else { /* pasted item in whole falls into R[0] */ 1065 1066 struct item_head *pasted; 1067 1068 ret_val = 1069 leaf_shift_right(tb, tb->rnum[0], 1070 tb->rbytes); 1071 /* append item in R[0] */ 1072 if (pos_in_item >= 0) { 1073 bi.tb = tb; 1074 bi.bi_bh = tb->R[0]; 1075 bi.bi_parent = tb->FR[0]; 1076 bi.bi_position = 1077 get_right_neighbor_position 1078 (tb, 0); 1079 leaf_paste_in_buffer(&bi, 1080 item_pos - 1081 n + 1082 tb-> 1083 rnum[0], 1084 pos_in_item, 1085 tb-> 1086 insert_size 1087 [0], body, 1088 zeros_num); 1089 } 1090 1091 /* paste new entry, if item is directory item */ 1092 pasted = 1093 B_N_PITEM_HEAD(tb->R[0], 1094 item_pos - n + 1095 tb->rnum[0]); 1096 if (is_direntry_le_ih(pasted) 1097 && pos_in_item >= 0) { 1098 leaf_paste_entries(&bi, 1099 item_pos - 1100 n + 1101 tb->rnum[0], 1102 pos_in_item, 1103 1, 1104 (struct 1105 reiserfs_de_head 1106 *)body, 1107 body + 1108 DEH_SIZE, 1109 tb-> 1110 insert_size 1111 [0] 1112 ); 1113 if (!pos_in_item) { 1114 1115 RFALSE(item_pos - n + 1116 tb->rnum[0], 1117 "PAP-12165: directory item must be first item of node when pasting is in 0th position"); 1118 1119 /* update delimiting keys */ 1120 replace_key(tb, 1121 tb->CFR[0], 1122 tb->rkey[0], 1123 tb->R[0], 1124 0); 1125 } 1126 } 1127 1128 if (is_indirect_le_ih(pasted)) 1129 set_ih_free_space(pasted, 0); 1130 zeros_num = tb->insert_size[0] = 0; 1131 } 1132 } else { /* new item doesn't fall into R[0] */ 1133 1134 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 1135 } 1136 break; 1137 default: /* cases d and t */ 1138 reiserfs_panic(tb->tb_sb, "PAP-12175", 1139 "rnum > 0: unexpected mode: %s(%d)", 1140 (flag == 1141 M_DELETE) ? "DELETE" : ((flag == 1142 M_CUT) ? "CUT" 1143 : "UNKNOWN"), 1144 flag); 1145 } 1146 1147 } 1148 1149 /* tb->rnum[0] > 0 */ 1150 RFALSE(tb->blknum[0] > 3, 1151 "PAP-12180: blknum can not be %d. It must be <= 3", 1152 tb->blknum[0]); 1153 RFALSE(tb->blknum[0] < 0, 1154 "PAP-12185: blknum can not be %d. It must be >= 0", 1155 tb->blknum[0]); 1156 1157 /* if while adding to a node we discover that it is possible to split 1158 it in two, and merge the left part into the left neighbor and the 1159 right part into the right neighbor, eliminating the node */ 1160 if (tb->blknum[0] == 0) { /* node S[0] is empty now */ 1161 1162 RFALSE(!tb->lnum[0] || !tb->rnum[0], 1163 "PAP-12190: lnum and rnum must not be zero"); 1164 /* if insertion was done before 0-th position in R[0], right 1165 delimiting key of the tb->L[0]'s and left delimiting key are 1166 not set correctly */ 1167 if (tb->CFL[0]) { 1168 if (!tb->CFR[0]) 1169 reiserfs_panic(tb->tb_sb, "vs-12195", 1170 "CFR not initialized"); 1171 copy_key(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), 1172 B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])); 1173 do_balance_mark_internal_dirty(tb, tb->CFL[0], 0); 1174 } 1175 1176 reiserfs_invalidate_buffer(tb, tbS0); 1177 return 0; 1178 } 1179 1180 /* Fill new nodes that appear in place of S[0] */ 1181 1182 /* I am told that this copying is because we need an array to enable 1183 the looping code. -Hans */ 1184 snum[0] = tb->s1num, snum[1] = tb->s2num; 1185 sbytes[0] = tb->s1bytes; 1186 sbytes[1] = tb->s2bytes; 1187 for (i = tb->blknum[0] - 2; i >= 0; i--) { 1188 1189 RFALSE(!snum[i], "PAP-12200: snum[%d] == %d. Must be > 0", i, 1190 snum[i]); 1191 1192 /* here we shift from S to S_new nodes */ 1193 1194 S_new[i] = get_FEB(tb); 1195 1196 /* initialized block type and tree level */ 1197 set_blkh_level(B_BLK_HEAD(S_new[i]), DISK_LEAF_NODE_LEVEL); 1198 1199 n = B_NR_ITEMS(tbS0); 1200 1201 switch (flag) { 1202 case M_INSERT: /* insert item */ 1203 1204 if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */ 1205 if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */ 1206 int old_key_comp, old_len, 1207 r_zeros_number; 1208 const char *r_body; 1209 int version; 1210 1211 /* Move snum[i]-1 items from S[0] to S_new[i] */ 1212 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1213 snum[i] - 1, -1, 1214 S_new[i]); 1215 /* Remember key component and item length */ 1216 version = ih_version(ih); 1217 old_key_comp = le_ih_k_offset(ih); 1218 old_len = ih_item_len(ih); 1219 1220 /* Calculate key component and item length to insert into S_new[i] */ 1221 set_le_ih_k_offset(ih, 1222 le_ih_k_offset(ih) + 1223 ((old_len - 1224 sbytes[i]) << 1225 (is_indirect_le_ih 1226 (ih) ? tb->tb_sb-> 1227 s_blocksize_bits - 1228 UNFM_P_SHIFT : 1229 0))); 1230 1231 put_ih_item_len(ih, sbytes[i]); 1232 1233 /* Insert part of the item into S_new[i] before 0-th item */ 1234 bi.tb = tb; 1235 bi.bi_bh = S_new[i]; 1236 bi.bi_parent = NULL; 1237 bi.bi_position = 0; 1238 1239 if ((old_len - sbytes[i]) > zeros_num) { 1240 r_zeros_number = 0; 1241 r_body = 1242 body + (old_len - 1243 sbytes[i]) - 1244 zeros_num; 1245 } else { 1246 r_body = body; 1247 r_zeros_number = 1248 zeros_num - (old_len - 1249 sbytes[i]); 1250 zeros_num -= r_zeros_number; 1251 } 1252 1253 leaf_insert_into_buf(&bi, 0, ih, r_body, 1254 r_zeros_number); 1255 1256 /* Calculate key component and item length to insert into S[i] */ 1257 set_le_ih_k_offset(ih, old_key_comp); 1258 put_ih_item_len(ih, 1259 old_len - sbytes[i]); 1260 tb->insert_size[0] -= sbytes[i]; 1261 } else { /* whole new item falls into S_new[i] */ 1262 1263 /* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */ 1264 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1265 snum[i] - 1, sbytes[i], 1266 S_new[i]); 1267 1268 /* Insert new item into S_new[i] */ 1269 bi.tb = tb; 1270 bi.bi_bh = S_new[i]; 1271 bi.bi_parent = NULL; 1272 bi.bi_position = 0; 1273 leaf_insert_into_buf(&bi, 1274 item_pos - n + 1275 snum[i] - 1, ih, 1276 body, zeros_num); 1277 1278 zeros_num = tb->insert_size[0] = 0; 1279 } 1280 } 1281 1282 else { /* new item or it part don't falls into S_new[i] */ 1283 1284 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1285 snum[i], sbytes[i], S_new[i]); 1286 } 1287 break; 1288 1289 case M_PASTE: /* append item */ 1290 1291 if (n - snum[i] <= item_pos) { /* pasted item or part if it falls to S_new[i] */ 1292 if (item_pos == n - snum[i] && sbytes[i] != -1) { /* we must shift part of the appended item */ 1293 struct item_head *aux_ih; 1294 1295 RFALSE(ih, "PAP-12210: ih must be 0"); 1296 1297 if (is_direntry_le_ih 1298 (aux_ih = 1299 B_N_PITEM_HEAD(tbS0, item_pos))) { 1300 /* we append to directory item */ 1301 1302 int entry_count; 1303 1304 entry_count = 1305 ih_entry_count(aux_ih); 1306 1307 if (entry_count - sbytes[i] < 1308 pos_in_item 1309 && pos_in_item <= 1310 entry_count) { 1311 /* new directory entry falls into S_new[i] */ 1312 1313 RFALSE(!tb-> 1314 insert_size[0], 1315 "PAP-12215: insert_size is already 0"); 1316 RFALSE(sbytes[i] - 1 >= 1317 entry_count, 1318 "PAP-12220: there are no so much entries (%d), only %d", 1319 sbytes[i] - 1, 1320 entry_count); 1321 1322 /* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */ 1323 leaf_move_items 1324 (LEAF_FROM_S_TO_SNEW, 1325 tb, snum[i], 1326 sbytes[i] - 1, 1327 S_new[i]); 1328 /* Paste given directory entry to directory item */ 1329 bi.tb = tb; 1330 bi.bi_bh = S_new[i]; 1331 bi.bi_parent = NULL; 1332 bi.bi_position = 0; 1333 leaf_paste_in_buffer 1334 (&bi, 0, 1335 pos_in_item - 1336 entry_count + 1337 sbytes[i] - 1, 1338 tb->insert_size[0], 1339 body, zeros_num); 1340 /* paste new directory entry */ 1341 leaf_paste_entries(&bi, 1342 0, 1343 pos_in_item 1344 - 1345 entry_count 1346 + 1347 sbytes 1348 [i] - 1349 1, 1, 1350 (struct 1351 reiserfs_de_head 1352 *) 1353 body, 1354 body 1355 + 1356 DEH_SIZE, 1357 tb-> 1358 insert_size 1359 [0] 1360 ); 1361 tb->insert_size[0] = 0; 1362 pos_in_item++; 1363 } else { /* new directory entry doesn't fall into S_new[i] */ 1364 leaf_move_items 1365 (LEAF_FROM_S_TO_SNEW, 1366 tb, snum[i], 1367 sbytes[i], 1368 S_new[i]); 1369 } 1370 } else { /* regular object */ 1371 1372 int n_shift, n_rem, 1373 r_zeros_number; 1374 const char *r_body; 1375 1376 RFALSE(pos_in_item != 1377 ih_item_len 1378 (B_N_PITEM_HEAD 1379 (tbS0, item_pos)) 1380 || tb->insert_size[0] <= 1381 0, 1382 "PAP-12225: item too short or insert_size <= 0"); 1383 1384 /* Calculate number of bytes which must be shifted from appended item */ 1385 n_shift = 1386 sbytes[i] - 1387 tb->insert_size[0]; 1388 if (n_shift < 0) 1389 n_shift = 0; 1390 leaf_move_items 1391 (LEAF_FROM_S_TO_SNEW, tb, 1392 snum[i], n_shift, 1393 S_new[i]); 1394 1395 /* Calculate number of bytes which must remain in body after append to S_new[i] */ 1396 n_rem = 1397 tb->insert_size[0] - 1398 sbytes[i]; 1399 if (n_rem < 0) 1400 n_rem = 0; 1401 /* Append part of body into S_new[0] */ 1402 bi.tb = tb; 1403 bi.bi_bh = S_new[i]; 1404 bi.bi_parent = NULL; 1405 bi.bi_position = 0; 1406 1407 if (n_rem > zeros_num) { 1408 r_zeros_number = 0; 1409 r_body = 1410 body + n_rem - 1411 zeros_num; 1412 } else { 1413 r_body = body; 1414 r_zeros_number = 1415 zeros_num - n_rem; 1416 zeros_num -= 1417 r_zeros_number; 1418 } 1419 1420 leaf_paste_in_buffer(&bi, 0, 1421 n_shift, 1422 tb-> 1423 insert_size 1424 [0] - 1425 n_rem, 1426 r_body, 1427 r_zeros_number); 1428 { 1429 struct item_head *tmp; 1430 1431 tmp = 1432 B_N_PITEM_HEAD(S_new 1433 [i], 1434 0); 1435 if (is_indirect_le_ih 1436 (tmp)) { 1437 set_ih_free_space 1438 (tmp, 0); 1439 set_le_ih_k_offset 1440 (tmp, 1441 le_ih_k_offset 1442 (tmp) + 1443 (n_rem << 1444 (tb-> 1445 tb_sb-> 1446 s_blocksize_bits 1447 - 1448 UNFM_P_SHIFT))); 1449 } else { 1450 set_le_ih_k_offset 1451 (tmp, 1452 le_ih_k_offset 1453 (tmp) + 1454 n_rem); 1455 } 1456 } 1457 1458 tb->insert_size[0] = n_rem; 1459 if (!n_rem) 1460 pos_in_item++; 1461 } 1462 } else 1463 /* item falls wholly into S_new[i] */ 1464 { 1465 int leaf_mi; 1466 struct item_head *pasted; 1467 1468#ifdef CONFIG_REISERFS_CHECK 1469 struct item_head *ih_check = 1470 B_N_PITEM_HEAD(tbS0, item_pos); 1471 1472 if (!is_direntry_le_ih(ih_check) 1473 && (pos_in_item != ih_item_len(ih_check) 1474 || tb->insert_size[0] <= 0)) 1475 reiserfs_panic(tb->tb_sb, 1476 "PAP-12235", 1477 "pos_in_item " 1478 "must be equal " 1479 "to ih_item_len"); 1480#endif /* CONFIG_REISERFS_CHECK */ 1481 1482 leaf_mi = 1483 leaf_move_items(LEAF_FROM_S_TO_SNEW, 1484 tb, snum[i], 1485 sbytes[i], 1486 S_new[i]); 1487 1488 RFALSE(leaf_mi, 1489 "PAP-12240: unexpected value returned by leaf_move_items (%d)", 1490 leaf_mi); 1491 1492 /* paste into item */ 1493 bi.tb = tb; 1494 bi.bi_bh = S_new[i]; 1495 bi.bi_parent = NULL; 1496 bi.bi_position = 0; 1497 leaf_paste_in_buffer(&bi, 1498 item_pos - n + 1499 snum[i], 1500 pos_in_item, 1501 tb->insert_size[0], 1502 body, zeros_num); 1503 1504 pasted = 1505 B_N_PITEM_HEAD(S_new[i], 1506 item_pos - n + 1507 snum[i]); 1508 if (is_direntry_le_ih(pasted)) { 1509 leaf_paste_entries(&bi, 1510 item_pos - 1511 n + snum[i], 1512 pos_in_item, 1513 1, 1514 (struct 1515 reiserfs_de_head 1516 *)body, 1517 body + 1518 DEH_SIZE, 1519 tb-> 1520 insert_size 1521 [0] 1522 ); 1523 } 1524 1525 /* if we paste to indirect item update ih_free_space */ 1526 if (is_indirect_le_ih(pasted)) 1527 set_ih_free_space(pasted, 0); 1528 zeros_num = tb->insert_size[0] = 0; 1529 } 1530 } 1531 1532 else { /* pasted item doesn't fall into S_new[i] */ 1533 1534 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1535 snum[i], sbytes[i], S_new[i]); 1536 } 1537 break; 1538 default: /* cases d and t */ 1539 reiserfs_panic(tb->tb_sb, "PAP-12245", 1540 "blknum > 2: unexpected mode: %s(%d)", 1541 (flag == 1542 M_DELETE) ? "DELETE" : ((flag == 1543 M_CUT) ? "CUT" 1544 : "UNKNOWN"), 1545 flag); 1546 } 1547 1548 memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE); 1549 insert_ptr[i] = S_new[i]; 1550 1551 RFALSE(!buffer_journaled(S_new[i]) 1552 || buffer_journal_dirty(S_new[i]) 1553 || buffer_dirty(S_new[i]), "PAP-12247: S_new[%d] : (%b)", 1554 i, S_new[i]); 1555 } 1556 1557 /* if the affected item was not wholly shifted then we perform all necessary operations on that part or whole of the 1558 affected item which remains in S */ 1559 if (0 <= item_pos && item_pos < tb->s0num) { /* if we must insert or append into buffer S[0] */ 1560 1561 switch (flag) { 1562 case M_INSERT: /* insert item into S[0] */ 1563 bi.tb = tb; 1564 bi.bi_bh = tbS0; 1565 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 1566 bi.bi_position = PATH_H_POSITION(tb->tb_path, 1); 1567 leaf_insert_into_buf(&bi, item_pos, ih, body, 1568 zeros_num); 1569 1570 /* If we insert the first key change the delimiting key */ 1571 if (item_pos == 0) { 1572 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1573 replace_key(tb, tb->CFL[0], tb->lkey[0], 1574 tbS0, 0); 1575 1576 } 1577 break; 1578 1579 case M_PASTE:{ /* append item in S[0] */ 1580 struct item_head *pasted; 1581 1582 pasted = B_N_PITEM_HEAD(tbS0, item_pos); 1583 /* when directory, may be new entry already pasted */ 1584 if (is_direntry_le_ih(pasted)) { 1585 if (pos_in_item >= 0 && 1586 pos_in_item <= 1587 ih_entry_count(pasted)) { 1588 1589 RFALSE(!tb->insert_size[0], 1590 "PAP-12260: insert_size is 0 already"); 1591 1592 /* prepare space */ 1593 bi.tb = tb; 1594 bi.bi_bh = tbS0; 1595 bi.bi_parent = 1596 PATH_H_PPARENT(tb->tb_path, 1597 0); 1598 bi.bi_position = 1599 PATH_H_POSITION(tb->tb_path, 1600 1); 1601 leaf_paste_in_buffer(&bi, 1602 item_pos, 1603 pos_in_item, 1604 tb-> 1605 insert_size 1606 [0], body, 1607 zeros_num); 1608 1609 /* paste entry */ 1610 leaf_paste_entries(&bi, 1611 item_pos, 1612 pos_in_item, 1613 1, 1614 (struct 1615 reiserfs_de_head 1616 *)body, 1617 body + 1618 DEH_SIZE, 1619 tb-> 1620 insert_size 1621 [0] 1622 ); 1623 if (!item_pos && !pos_in_item) { 1624 RFALSE(!tb->CFL[0] 1625 || !tb->L[0], 1626 "PAP-12270: CFL[0]/L[0] must be specified"); 1627 if (tb->CFL[0]) { 1628 replace_key(tb, 1629 tb-> 1630 CFL 1631 [0], 1632 tb-> 1633 lkey 1634 [0], 1635 tbS0, 1636 0); 1637 1638 } 1639 } 1640 tb->insert_size[0] = 0; 1641 } 1642 } else { /* regular object */ 1643 if (pos_in_item == ih_item_len(pasted)) { 1644 1645 RFALSE(tb->insert_size[0] <= 0, 1646 "PAP-12275: insert size must not be %d", 1647 tb->insert_size[0]); 1648 bi.tb = tb; 1649 bi.bi_bh = tbS0; 1650 bi.bi_parent = 1651 PATH_H_PPARENT(tb->tb_path, 1652 0); 1653 bi.bi_position = 1654 PATH_H_POSITION(tb->tb_path, 1655 1); 1656 leaf_paste_in_buffer(&bi, 1657 item_pos, 1658 pos_in_item, 1659 tb-> 1660 insert_size 1661 [0], body, 1662 zeros_num); 1663 1664 if (is_indirect_le_ih(pasted)) { 1665#if 0 1666 RFALSE(tb-> 1667 insert_size[0] != 1668 UNFM_P_SIZE, 1669 "PAP-12280: insert_size for indirect item must be %d, not %d", 1670 UNFM_P_SIZE, 1671 tb-> 1672 insert_size[0]); 1673#endif 1674 set_ih_free_space 1675 (pasted, 0); 1676 } 1677 tb->insert_size[0] = 0; 1678 } 1679#ifdef CONFIG_REISERFS_CHECK 1680 else { 1681 if (tb->insert_size[0]) { 1682 print_cur_tb("12285"); 1683 reiserfs_panic(tb-> 1684 tb_sb, 1685 "PAP-12285", 1686 "insert_size " 1687 "must be 0 " 1688 "(%d)", 1689 tb->insert_size[0]); 1690 } 1691 } 1692#endif /* CONFIG_REISERFS_CHECK */ 1693 1694 } 1695 } /* case M_PASTE: */ 1696 } 1697 } 1698#ifdef CONFIG_REISERFS_CHECK 1699 if (flag == M_PASTE && tb->insert_size[0]) { 1700 print_cur_tb("12290"); 1701 reiserfs_panic(tb->tb_sb, 1702 "PAP-12290", "insert_size is still not 0 (%d)", 1703 tb->insert_size[0]); 1704 } 1705#endif /* CONFIG_REISERFS_CHECK */ 1706 return 0; 1707} /* Leaf level of the tree is balanced (end of balance_leaf) */ 1708 1709/* Make empty node */ 1710void make_empty_node(struct buffer_info *bi) 1711{ 1712 struct block_head *blkh; 1713 1714 RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL"); 1715 1716 blkh = B_BLK_HEAD(bi->bi_bh); 1717 set_blkh_nr_item(blkh, 0); 1718 set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh)); 1719 1720 if (bi->bi_parent) 1721 B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */ 1722} 1723 1724/* Get first empty buffer */ 1725struct buffer_head *get_FEB(struct tree_balance *tb) 1726{ 1727 int i; 1728 struct buffer_head *first_b; 1729 struct buffer_info bi; 1730 1731 for (i = 0; i < MAX_FEB_SIZE; i++) 1732 if (tb->FEB[i] != NULL) 1733 break; 1734 1735 if (i == MAX_FEB_SIZE) 1736 reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty"); 1737 1738 bi.tb = tb; 1739 bi.bi_bh = first_b = tb->FEB[i]; 1740 bi.bi_parent = NULL; 1741 bi.bi_position = 0; 1742 make_empty_node(&bi); 1743 set_buffer_uptodate(first_b); 1744 tb->FEB[i] = NULL; 1745 tb->used[i] = first_b; 1746 1747 return (first_b); 1748} 1749 1750/* This is now used because reiserfs_free_block has to be able to 1751** schedule. 1752*/ 1753static void store_thrown(struct tree_balance *tb, struct buffer_head *bh) 1754{ 1755 int i; 1756 1757 if (buffer_dirty(bh)) 1758 reiserfs_warning(tb->tb_sb, "reiserfs-12320", 1759 "called with dirty buffer"); 1760 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) 1761 if (!tb->thrown[i]) { 1762 tb->thrown[i] = bh; 1763 get_bh(bh); /* free_thrown puts this */ 1764 return; 1765 } 1766 reiserfs_warning(tb->tb_sb, "reiserfs-12321", 1767 "too many thrown buffers"); 1768} 1769 1770static void free_thrown(struct tree_balance *tb) 1771{ 1772 int i; 1773 b_blocknr_t blocknr; 1774 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) { 1775 if (tb->thrown[i]) { 1776 blocknr = tb->thrown[i]->b_blocknr; 1777 if (buffer_dirty(tb->thrown[i])) 1778 reiserfs_warning(tb->tb_sb, "reiserfs-12322", 1779 "called with dirty buffer %d", 1780 blocknr); 1781 brelse(tb->thrown[i]); /* incremented in store_thrown */ 1782 reiserfs_free_block(tb->transaction_handle, NULL, 1783 blocknr, 0); 1784 } 1785 } 1786} 1787 1788void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh) 1789{ 1790 struct block_head *blkh; 1791 blkh = B_BLK_HEAD(bh); 1792 set_blkh_level(blkh, FREE_LEVEL); 1793 set_blkh_nr_item(blkh, 0); 1794 1795 clear_buffer_dirty(bh); 1796 store_thrown(tb, bh); 1797} 1798 1799/* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/ 1800void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest, 1801 struct buffer_head *src, int n_src) 1802{ 1803 1804 RFALSE(dest == NULL || src == NULL, 1805 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)", 1806 src, dest); 1807 RFALSE(!B_IS_KEYS_LEVEL(dest), 1808 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf", 1809 dest); 1810 RFALSE(n_dest < 0 || n_src < 0, 1811 "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest); 1812 RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src), 1813 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big", 1814 n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest)); 1815 1816 if (B_IS_ITEMS_LEVEL(src)) 1817 /* source buffer contains leaf node */ 1818 memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PITEM_HEAD(src, n_src), 1819 KEY_SIZE); 1820 else 1821 memcpy(B_N_PDELIM_KEY(dest, n_dest), B_N_PDELIM_KEY(src, n_src), 1822 KEY_SIZE); 1823 1824 do_balance_mark_internal_dirty(tb, dest, 0); 1825} 1826 1827int get_left_neighbor_position(struct tree_balance *tb, int h) 1828{ 1829 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1830 1831 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL, 1832 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist", 1833 h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h)); 1834 1835 if (Sh_position == 0) 1836 return B_NR_ITEMS(tb->FL[h]); 1837 else 1838 return Sh_position - 1; 1839} 1840 1841int get_right_neighbor_position(struct tree_balance *tb, int h) 1842{ 1843 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1844 1845 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL, 1846 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist", 1847 h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]); 1848 1849 if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h))) 1850 return 0; 1851 else 1852 return Sh_position + 1; 1853} 1854 1855#ifdef CONFIG_REISERFS_CHECK 1856 1857int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value); 1858static void check_internal_node(struct super_block *s, struct buffer_head *bh, 1859 char *mes) 1860{ 1861 struct disk_child *dc; 1862 int i; 1863 1864 RFALSE(!bh, "PAP-12336: bh == 0"); 1865 1866 if (!bh || !B_IS_IN_TREE(bh)) 1867 return; 1868 1869 RFALSE(!buffer_dirty(bh) && 1870 !(buffer_journaled(bh) || buffer_journal_dirty(bh)), 1871 "PAP-12337: buffer (%b) must be dirty", bh); 1872 dc = B_N_CHILD(bh, 0); 1873 1874 for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) { 1875 if (!is_reusable(s, dc_block_number(dc), 1)) { 1876 print_cur_tb(mes); 1877 reiserfs_panic(s, "PAP-12338", 1878 "invalid child pointer %y in %b", 1879 dc, bh); 1880 } 1881 } 1882} 1883 1884static int locked_or_not_in_tree(struct tree_balance *tb, 1885 struct buffer_head *bh, char *which) 1886{ 1887 if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) || 1888 !B_IS_IN_TREE(bh)) { 1889 reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh); 1890 return 1; 1891 } 1892 return 0; 1893} 1894 1895static int check_before_balancing(struct tree_balance *tb) 1896{ 1897 int retval = 0; 1898 1899 if (cur_tb) { 1900 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule " 1901 "occurred based on cur_tb not being null at " 1902 "this point in code. do_balance cannot properly " 1903 "handle schedule occurring while it runs."); 1904 } 1905 1906 /* double check that buffers that we will modify are unlocked. (fix_nodes should already have 1907 prepped all of these for us). */ 1908 if (tb->lnum[0]) { 1909 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]"); 1910 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]"); 1911 retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]"); 1912 check_leaf(tb->L[0]); 1913 } 1914 if (tb->rnum[0]) { 1915 retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]"); 1916 retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]"); 1917 retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]"); 1918 check_leaf(tb->R[0]); 1919 } 1920 retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path), 1921 "S[0]"); 1922 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1923 1924 return retval; 1925} 1926 1927static void check_after_balance_leaf(struct tree_balance *tb) 1928{ 1929 if (tb->lnum[0]) { 1930 if (B_FREE_SPACE(tb->L[0]) != 1931 MAX_CHILD_SIZE(tb->L[0]) - 1932 dc_size(B_N_CHILD 1933 (tb->FL[0], get_left_neighbor_position(tb, 0)))) { 1934 print_cur_tb("12221"); 1935 reiserfs_panic(tb->tb_sb, "PAP-12355", 1936 "shift to left was incorrect"); 1937 } 1938 } 1939 if (tb->rnum[0]) { 1940 if (B_FREE_SPACE(tb->R[0]) != 1941 MAX_CHILD_SIZE(tb->R[0]) - 1942 dc_size(B_N_CHILD 1943 (tb->FR[0], get_right_neighbor_position(tb, 0)))) { 1944 print_cur_tb("12222"); 1945 reiserfs_panic(tb->tb_sb, "PAP-12360", 1946 "shift to right was incorrect"); 1947 } 1948 } 1949 if (PATH_H_PBUFFER(tb->tb_path, 1) && 1950 (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) != 1951 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1952 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1953 PATH_H_POSITION(tb->tb_path, 1)))))) { 1954 int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)); 1955 int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1956 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1957 PATH_H_POSITION(tb->tb_path, 1958 1)))); 1959 print_cur_tb("12223"); 1960 reiserfs_warning(tb->tb_sb, "reiserfs-12363", 1961 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; " 1962 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d", 1963 left, 1964 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)), 1965 PATH_H_PBUFFER(tb->tb_path, 1), 1966 PATH_H_POSITION(tb->tb_path, 1), 1967 dc_size(B_N_CHILD 1968 (PATH_H_PBUFFER(tb->tb_path, 1), 1969 PATH_H_POSITION(tb->tb_path, 1))), 1970 right); 1971 reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect"); 1972 } 1973} 1974 1975static void check_leaf_level(struct tree_balance *tb) 1976{ 1977 check_leaf(tb->L[0]); 1978 check_leaf(tb->R[0]); 1979 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1980} 1981 1982static void check_internal_levels(struct tree_balance *tb) 1983{ 1984 int h; 1985 1986 /* check all internal nodes */ 1987 for (h = 1; tb->insert_size[h]; h++) { 1988 check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h), 1989 "BAD BUFFER ON PATH"); 1990 if (tb->lnum[h]) 1991 check_internal_node(tb->tb_sb, tb->L[h], "BAD L"); 1992 if (tb->rnum[h]) 1993 check_internal_node(tb->tb_sb, tb->R[h], "BAD R"); 1994 } 1995 1996} 1997 1998#endif 1999 2000/* Now we have all of the buffers that must be used in balancing of 2001 the tree. We rely on the assumption that schedule() will not occur 2002 while do_balance works. ( Only interrupt handlers are acceptable.) 2003 We balance the tree according to the analysis made before this, 2004 using buffers already obtained. For SMP support it will someday be 2005 necessary to add ordered locking of tb. */ 2006 2007/* Some interesting rules of balancing: 2008 2009 we delete a maximum of two nodes per level per balancing: we never 2010 delete R, when we delete two of three nodes L, S, R then we move 2011 them into R. 2012 2013 we only delete L if we are deleting two nodes, if we delete only 2014 one node we delete S 2015 2016 if we shift leaves then we shift as much as we can: this is a 2017 deliberate policy of extremism in node packing which results in 2018 higher average utilization after repeated random balance operations 2019 at the cost of more memory copies and more balancing as a result of 2020 small insertions to full nodes. 2021 2022 if we shift internal nodes we try to evenly balance the node 2023 utilization, with consequent less balancing at the cost of lower 2024 utilization. 2025 2026 one could argue that the policy for directories in leaves should be 2027 that of internal nodes, but we will wait until another day to 2028 evaluate this.... It would be nice to someday measure and prove 2029 these assumptions as to what is optimal.... 2030 2031*/ 2032 2033static inline void do_balance_starts(struct tree_balance *tb) 2034{ 2035 /* use print_cur_tb() to see initial state of struct 2036 tree_balance */ 2037 2038 /* store_print_tb (tb); */ 2039 2040 /* do not delete, just comment it out */ 2041/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb, 2042 "check");*/ 2043 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 2044#ifdef CONFIG_REISERFS_CHECK 2045 cur_tb = tb; 2046#endif 2047} 2048 2049static inline void do_balance_completed(struct tree_balance *tb) 2050{ 2051 2052#ifdef CONFIG_REISERFS_CHECK 2053 check_leaf_level(tb); 2054 check_internal_levels(tb); 2055 cur_tb = NULL; 2056#endif 2057 2058 /* reiserfs_free_block is no longer schedule safe. So, we need to 2059 ** put the buffers we want freed on the thrown list during do_balance, 2060 ** and then free them now 2061 */ 2062 2063 REISERFS_SB(tb->tb_sb)->s_do_balance++; 2064 2065 /* release all nodes hold to perform the balancing */ 2066 unfix_nodes(tb); 2067 2068 free_thrown(tb); 2069} 2070 2071void do_balance(struct tree_balance *tb, /* tree_balance structure */ 2072 struct item_head *ih, /* item header of inserted item */ 2073 const char *body, /* body of inserted item or bytes to paste */ 2074 int flag) 2075{ /* i - insert, d - delete 2076 c - cut, p - paste 2077 2078 Cut means delete part of an item 2079 (includes removing an entry from a 2080 directory). 2081 2082 Delete means delete whole item. 2083 2084 Insert means add a new item into the 2085 tree. 2086 2087 Paste means to append to the end of an 2088 existing file or to insert a directory 2089 entry. */ 2090 int child_pos, /* position of a child node in its parent */ 2091 h; /* level of the tree being processed */ 2092 struct item_head insert_key[2]; /* in our processing of one level 2093 we sometimes determine what 2094 must be inserted into the next 2095 higher level. This insertion 2096 consists of a key or two keys 2097 and their corresponding 2098 pointers */ 2099 struct buffer_head *insert_ptr[2]; /* inserted node-ptrs for the next 2100 level */ 2101 2102 tb->tb_mode = flag; 2103 tb->need_balance_dirty = 0; 2104 2105 if (FILESYSTEM_CHANGED_TB(tb)) { 2106 reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has " 2107 "changed"); 2108 } 2109 /* if we have no real work to do */ 2110 if (!tb->insert_size[0]) { 2111 reiserfs_warning(tb->tb_sb, "PAP-12350", 2112 "insert_size == 0, mode == %c", flag); 2113 unfix_nodes(tb); 2114 return; 2115 } 2116 2117 atomic_inc(&(fs_generation(tb->tb_sb))); 2118 do_balance_starts(tb); 2119 2120 /* balance leaf returns 0 except if combining L R and S into 2121 one node. see balance_internal() for explanation of this 2122 line of code. */ 2123 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) + 2124 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr); 2125 2126#ifdef CONFIG_REISERFS_CHECK 2127 check_after_balance_leaf(tb); 2128#endif 2129 2130 /* Balance internal level of the tree. */ 2131 for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++) 2132 child_pos = 2133 balance_internal(tb, h, child_pos, insert_key, insert_ptr); 2134 2135 do_balance_completed(tb); 2136 2137} 2138