drbd_nl.c revision 8979d9c9e0bc8e54cf5bd7a89abb2145f087b5e1
1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_req.h" 37#include "drbd_wrappers.h" 38#include <asm/unaligned.h> 39#include <linux/drbd_tag_magic.h> 40#include <linux/drbd_limits.h> 41#include <linux/compiler.h> 42#include <linux/kthread.h> 43 44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 47 48/* see get_sb_bdev and bd_claim */ 49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 50 51/* Generate the tag_list to struct functions */ 52#define NL_PACKET(name, number, fields) \ 53static int name ## _from_tags(struct drbd_conf *mdev, \ 54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 55static int name ## _from_tags(struct drbd_conf *mdev, \ 56 unsigned short *tags, struct name *arg) \ 57{ \ 58 int tag; \ 59 int dlen; \ 60 \ 61 while ((tag = get_unaligned(tags++)) != TT_END) { \ 62 dlen = get_unaligned(tags++); \ 63 switch (tag_number(tag)) { \ 64 fields \ 65 default: \ 66 if (tag & T_MANDATORY) { \ 67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 68 return 0; \ 69 } \ 70 } \ 71 tags = (unsigned short *)((char *)tags + dlen); \ 72 } \ 73 return 1; \ 74} 75#define NL_INTEGER(pn, pr, member) \ 76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 77 arg->member = get_unaligned((int *)(tags)); \ 78 break; 79#define NL_INT64(pn, pr, member) \ 80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 81 arg->member = get_unaligned((u64 *)(tags)); \ 82 break; 83#define NL_BIT(pn, pr, member) \ 84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 85 arg->member = *(char *)(tags) ? 1 : 0; \ 86 break; 87#define NL_STRING(pn, pr, member, len) \ 88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 89 if (dlen > len) { \ 90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 91 #member, dlen, (unsigned int)len); \ 92 return 0; \ 93 } \ 94 arg->member ## _len = dlen; \ 95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 96 break; 97#include "linux/drbd_nl.h" 98 99/* Generate the struct to tag_list functions */ 100#define NL_PACKET(name, number, fields) \ 101static unsigned short* \ 102name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 104static unsigned short* \ 105name ## _to_tags(struct drbd_conf *mdev, \ 106 struct name *arg, unsigned short *tags) \ 107{ \ 108 fields \ 109 return tags; \ 110} 111 112#define NL_INTEGER(pn, pr, member) \ 113 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 114 put_unaligned(sizeof(int), tags++); \ 115 put_unaligned(arg->member, (int *)tags); \ 116 tags = (unsigned short *)((char *)tags+sizeof(int)); 117#define NL_INT64(pn, pr, member) \ 118 put_unaligned(pn | pr | TT_INT64, tags++); \ 119 put_unaligned(sizeof(u64), tags++); \ 120 put_unaligned(arg->member, (u64 *)tags); \ 121 tags = (unsigned short *)((char *)tags+sizeof(u64)); 122#define NL_BIT(pn, pr, member) \ 123 put_unaligned(pn | pr | TT_BIT, tags++); \ 124 put_unaligned(sizeof(char), tags++); \ 125 *(char *)tags = arg->member; \ 126 tags = (unsigned short *)((char *)tags+sizeof(char)); 127#define NL_STRING(pn, pr, member, len) \ 128 put_unaligned(pn | pr | TT_STRING, tags++); \ 129 put_unaligned(arg->member ## _len, tags++); \ 130 memcpy(tags, arg->member, arg->member ## _len); \ 131 tags = (unsigned short *)((char *)tags + arg->member ## _len); 132#include "linux/drbd_nl.h" 133 134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 135void drbd_nl_send_reply(struct cn_msg *, int); 136 137int drbd_khelper(struct drbd_conf *mdev, char *cmd) 138{ 139 char *envp[] = { "HOME=/", 140 "TERM=linux", 141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 142 NULL, /* Will be set to address family */ 143 NULL, /* Will be set to address */ 144 NULL }; 145 146 char mb[12], af[20], ad[60], *afs; 147 char *argv[] = {usermode_helper, cmd, mb, NULL }; 148 int ret; 149 150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 151 152 if (get_net_conf(mdev)) { 153 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 154 case AF_INET6: 155 afs = "ipv6"; 156 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 157 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 158 break; 159 case AF_INET: 160 afs = "ipv4"; 161 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 162 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 163 break; 164 default: 165 afs = "ssocks"; 166 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 167 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 168 } 169 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 170 envp[3]=af; 171 envp[4]=ad; 172 put_net_conf(mdev); 173 } 174 175 /* The helper may take some time. 176 * write out any unsynced meta data changes now */ 177 drbd_md_sync(mdev); 178 179 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 180 181 drbd_bcast_ev_helper(mdev, cmd); 182 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 183 if (ret) 184 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 185 usermode_helper, cmd, mb, 186 (ret >> 8) & 0xff, ret); 187 else 188 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 189 usermode_helper, cmd, mb, 190 (ret >> 8) & 0xff, ret); 191 192 if (ret < 0) /* Ignore any ERRNOs we got. */ 193 ret = 0; 194 195 return ret; 196} 197 198enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 199{ 200 char *ex_to_string; 201 int r; 202 enum drbd_disk_state nps; 203 enum drbd_fencing_p fp; 204 205 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 206 207 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 208 fp = mdev->ldev->dc.fencing; 209 put_ldev(mdev); 210 } else { 211 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 212 nps = mdev->state.pdsk; 213 goto out; 214 } 215 216 r = drbd_khelper(mdev, "fence-peer"); 217 218 switch ((r>>8) & 0xff) { 219 case 3: /* peer is inconsistent */ 220 ex_to_string = "peer is inconsistent or worse"; 221 nps = D_INCONSISTENT; 222 break; 223 case 4: /* peer got outdated, or was already outdated */ 224 ex_to_string = "peer was fenced"; 225 nps = D_OUTDATED; 226 break; 227 case 5: /* peer was down */ 228 if (mdev->state.disk == D_UP_TO_DATE) { 229 /* we will(have) create(d) a new UUID anyways... */ 230 ex_to_string = "peer is unreachable, assumed to be dead"; 231 nps = D_OUTDATED; 232 } else { 233 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 234 nps = mdev->state.pdsk; 235 } 236 break; 237 case 6: /* Peer is primary, voluntarily outdate myself. 238 * This is useful when an unconnected R_SECONDARY is asked to 239 * become R_PRIMARY, but finds the other peer being active. */ 240 ex_to_string = "peer is active"; 241 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 242 nps = D_UNKNOWN; 243 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 244 break; 245 case 7: 246 if (fp != FP_STONITH) 247 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 248 ex_to_string = "peer was stonithed"; 249 nps = D_OUTDATED; 250 break; 251 default: 252 /* The script is broken ... */ 253 nps = D_UNKNOWN; 254 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 255 return nps; 256 } 257 258 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 259 (r>>8) & 0xff, ex_to_string); 260 261out: 262 if (mdev->state.susp_fen && nps >= D_UNKNOWN) { 263 /* The handler was not successful... unfreeze here, the 264 state engine can not unfreeze... */ 265 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE); 266 } 267 268 return nps; 269} 270 271static int _try_outdate_peer_async(void *data) 272{ 273 struct drbd_conf *mdev = (struct drbd_conf *)data; 274 enum drbd_disk_state nps; 275 276 nps = drbd_try_outdate_peer(mdev); 277 drbd_request_state(mdev, NS(pdsk, nps)); 278 279 return 0; 280} 281 282void drbd_try_outdate_peer_async(struct drbd_conf *mdev) 283{ 284 struct task_struct *opa; 285 286 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev)); 287 if (IS_ERR(opa)) 288 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); 289} 290 291int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 292{ 293 const int max_tries = 4; 294 int r = 0; 295 int try = 0; 296 int forced = 0; 297 union drbd_state mask, val; 298 enum drbd_disk_state nps; 299 300 if (new_role == R_PRIMARY) 301 request_ping(mdev); /* Detect a dead peer ASAP */ 302 303 mutex_lock(&mdev->state_mutex); 304 305 mask.i = 0; mask.role = R_MASK; 306 val.i = 0; val.role = new_role; 307 308 while (try++ < max_tries) { 309 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 310 311 /* in case we first succeeded to outdate, 312 * but now suddenly could establish a connection */ 313 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 314 val.pdsk = 0; 315 mask.pdsk = 0; 316 continue; 317 } 318 319 if (r == SS_NO_UP_TO_DATE_DISK && force && 320 (mdev->state.disk < D_UP_TO_DATE && 321 mdev->state.disk >= D_INCONSISTENT)) { 322 mask.disk = D_MASK; 323 val.disk = D_UP_TO_DATE; 324 forced = 1; 325 continue; 326 } 327 328 if (r == SS_NO_UP_TO_DATE_DISK && 329 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 330 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 331 nps = drbd_try_outdate_peer(mdev); 332 333 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 334 val.disk = D_UP_TO_DATE; 335 mask.disk = D_MASK; 336 } 337 338 val.pdsk = nps; 339 mask.pdsk = D_MASK; 340 341 continue; 342 } 343 344 if (r == SS_NOTHING_TO_DO) 345 goto fail; 346 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 347 nps = drbd_try_outdate_peer(mdev); 348 349 if (force && nps > D_OUTDATED) { 350 dev_warn(DEV, "Forced into split brain situation!\n"); 351 nps = D_OUTDATED; 352 } 353 354 mask.pdsk = D_MASK; 355 val.pdsk = nps; 356 357 continue; 358 } 359 if (r == SS_TWO_PRIMARIES) { 360 /* Maybe the peer is detected as dead very soon... 361 retry at most once more in this case. */ 362 __set_current_state(TASK_INTERRUPTIBLE); 363 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 364 if (try < max_tries) 365 try = max_tries - 1; 366 continue; 367 } 368 if (r < SS_SUCCESS) { 369 r = _drbd_request_state(mdev, mask, val, 370 CS_VERBOSE + CS_WAIT_COMPLETE); 371 if (r < SS_SUCCESS) 372 goto fail; 373 } 374 break; 375 } 376 377 if (r < SS_SUCCESS) 378 goto fail; 379 380 if (forced) 381 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 382 383 /* Wait until nothing is on the fly :) */ 384 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 385 386 if (new_role == R_SECONDARY) { 387 set_disk_ro(mdev->vdisk, TRUE); 388 if (get_ldev(mdev)) { 389 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 390 put_ldev(mdev); 391 } 392 } else { 393 if (get_net_conf(mdev)) { 394 mdev->net_conf->want_lose = 0; 395 put_net_conf(mdev); 396 } 397 set_disk_ro(mdev->vdisk, FALSE); 398 if (get_ldev(mdev)) { 399 if (((mdev->state.conn < C_CONNECTED || 400 mdev->state.pdsk <= D_FAILED) 401 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 402 drbd_uuid_new_current(mdev); 403 404 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 405 put_ldev(mdev); 406 } 407 } 408 409 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 410 drbd_al_to_on_disk_bm(mdev); 411 put_ldev(mdev); 412 } 413 414 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 415 /* if this was forced, we should consider sync */ 416 if (forced) 417 drbd_send_uuids(mdev); 418 drbd_send_state(mdev); 419 } 420 421 drbd_md_sync(mdev); 422 423 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 424 fail: 425 mutex_unlock(&mdev->state_mutex); 426 return r; 427} 428 429static struct drbd_conf *ensure_mdev(int minor, int create) 430{ 431 struct drbd_conf *mdev; 432 433 if (minor >= minor_count) 434 return NULL; 435 436 mdev = minor_to_mdev(minor); 437 438 if (!mdev && create) { 439 struct gendisk *disk = NULL; 440 mdev = drbd_new_device(minor); 441 442 spin_lock_irq(&drbd_pp_lock); 443 if (minor_table[minor] == NULL) { 444 minor_table[minor] = mdev; 445 disk = mdev->vdisk; 446 mdev = NULL; 447 } /* else: we lost the race */ 448 spin_unlock_irq(&drbd_pp_lock); 449 450 if (disk) /* we won the race above */ 451 /* in case we ever add a drbd_delete_device(), 452 * don't forget the del_gendisk! */ 453 add_disk(disk); 454 else /* we lost the race above */ 455 drbd_free_mdev(mdev); 456 457 mdev = minor_to_mdev(minor); 458 } 459 460 return mdev; 461} 462 463static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 464 struct drbd_nl_cfg_reply *reply) 465{ 466 struct primary primary_args; 467 468 memset(&primary_args, 0, sizeof(struct primary)); 469 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 470 reply->ret_code = ERR_MANDATORY_TAG; 471 return 0; 472 } 473 474 reply->ret_code = 475 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); 476 477 return 0; 478} 479 480static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 481 struct drbd_nl_cfg_reply *reply) 482{ 483 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 484 485 return 0; 486} 487 488/* initializes the md.*_offset members, so we are able to find 489 * the on disk meta data */ 490static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 491 struct drbd_backing_dev *bdev) 492{ 493 sector_t md_size_sect = 0; 494 switch (bdev->dc.meta_dev_idx) { 495 default: 496 /* v07 style fixed size indexed meta data */ 497 bdev->md.md_size_sect = MD_RESERVED_SECT; 498 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 499 bdev->md.al_offset = MD_AL_OFFSET; 500 bdev->md.bm_offset = MD_BM_OFFSET; 501 break; 502 case DRBD_MD_INDEX_FLEX_EXT: 503 /* just occupy the full device; unit: sectors */ 504 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 505 bdev->md.md_offset = 0; 506 bdev->md.al_offset = MD_AL_OFFSET; 507 bdev->md.bm_offset = MD_BM_OFFSET; 508 break; 509 case DRBD_MD_INDEX_INTERNAL: 510 case DRBD_MD_INDEX_FLEX_INT: 511 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 512 /* al size is still fixed */ 513 bdev->md.al_offset = -MD_AL_MAX_SIZE; 514 /* we need (slightly less than) ~ this much bitmap sectors: */ 515 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 516 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 517 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 518 md_size_sect = ALIGN(md_size_sect, 8); 519 520 /* plus the "drbd meta data super block", 521 * and the activity log; */ 522 md_size_sect += MD_BM_OFFSET; 523 524 bdev->md.md_size_sect = md_size_sect; 525 /* bitmap offset is adjusted by 'super' block size */ 526 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 527 break; 528 } 529} 530 531char *ppsize(char *buf, unsigned long long size) 532{ 533 /* Needs 9 bytes at max. */ 534 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 535 int base = 0; 536 while (size >= 10000) { 537 /* shift + round */ 538 size = (size >> 10) + !!(size & (1<<9)); 539 base++; 540 } 541 sprintf(buf, "%lu %cB", (long)size, units[base]); 542 543 return buf; 544} 545 546/* there is still a theoretical deadlock when called from receiver 547 * on an D_INCONSISTENT R_PRIMARY: 548 * remote READ does inc_ap_bio, receiver would need to receive answer 549 * packet from remote to dec_ap_bio again. 550 * receiver receive_sizes(), comes here, 551 * waits for ap_bio_cnt == 0. -> deadlock. 552 * but this cannot happen, actually, because: 553 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 554 * (not connected, or bad/no disk on peer): 555 * see drbd_fail_request_early, ap_bio_cnt is zero. 556 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 557 * peer may not initiate a resize. 558 */ 559void drbd_suspend_io(struct drbd_conf *mdev) 560{ 561 set_bit(SUSPEND_IO, &mdev->flags); 562 if (is_susp(mdev->state)) 563 return; 564 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 565} 566 567void drbd_resume_io(struct drbd_conf *mdev) 568{ 569 clear_bit(SUSPEND_IO, &mdev->flags); 570 wake_up(&mdev->misc_wait); 571} 572 573/** 574 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 575 * @mdev: DRBD device. 576 * 577 * Returns 0 on success, negative return values indicate errors. 578 * You should call drbd_md_sync() after calling this function. 579 */ 580enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 581{ 582 sector_t prev_first_sect, prev_size; /* previous meta location */ 583 sector_t la_size; 584 sector_t size; 585 char ppb[10]; 586 587 int md_moved, la_size_changed; 588 enum determine_dev_size rv = unchanged; 589 590 /* race: 591 * application request passes inc_ap_bio, 592 * but then cannot get an AL-reference. 593 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 594 * 595 * to avoid that: 596 * Suspend IO right here. 597 * still lock the act_log to not trigger ASSERTs there. 598 */ 599 drbd_suspend_io(mdev); 600 601 /* no wait necessary anymore, actually we could assert that */ 602 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 603 604 prev_first_sect = drbd_md_first_sector(mdev->ldev); 605 prev_size = mdev->ldev->md.md_size_sect; 606 la_size = mdev->ldev->md.la_size_sect; 607 608 /* TODO: should only be some assert here, not (re)init... */ 609 drbd_md_set_sector_offsets(mdev, mdev->ldev); 610 611 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); 612 613 if (drbd_get_capacity(mdev->this_bdev) != size || 614 drbd_bm_capacity(mdev) != size) { 615 int err; 616 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); 617 if (unlikely(err)) { 618 /* currently there is only one error: ENOMEM! */ 619 size = drbd_bm_capacity(mdev)>>1; 620 if (size == 0) { 621 dev_err(DEV, "OUT OF MEMORY! " 622 "Could not allocate bitmap!\n"); 623 } else { 624 dev_err(DEV, "BM resizing failed. " 625 "Leaving size unchanged at size = %lu KB\n", 626 (unsigned long)size); 627 } 628 rv = dev_size_error; 629 } 630 /* racy, see comments above. */ 631 drbd_set_my_capacity(mdev, size); 632 mdev->ldev->md.la_size_sect = size; 633 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 634 (unsigned long long)size>>1); 635 } 636 if (rv == dev_size_error) 637 goto out; 638 639 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 640 641 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 642 || prev_size != mdev->ldev->md.md_size_sect; 643 644 if (la_size_changed || md_moved) { 645 drbd_al_shrink(mdev); /* All extents inactive. */ 646 dev_info(DEV, "Writing the whole bitmap, %s\n", 647 la_size_changed && md_moved ? "size changed and md moved" : 648 la_size_changed ? "size changed" : "md moved"); 649 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 650 drbd_md_mark_dirty(mdev); 651 } 652 653 if (size > la_size) 654 rv = grew; 655 if (size < la_size) 656 rv = shrunk; 657out: 658 lc_unlock(mdev->act_log); 659 wake_up(&mdev->al_wait); 660 drbd_resume_io(mdev); 661 662 return rv; 663} 664 665sector_t 666drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 667{ 668 sector_t p_size = mdev->p_size; /* partner's disk size. */ 669 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 670 sector_t m_size; /* my size */ 671 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 672 sector_t size = 0; 673 674 m_size = drbd_get_max_capacity(bdev); 675 676 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 677 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 678 p_size = m_size; 679 } 680 681 if (p_size && m_size) { 682 size = min_t(sector_t, p_size, m_size); 683 } else { 684 if (la_size) { 685 size = la_size; 686 if (m_size && m_size < size) 687 size = m_size; 688 if (p_size && p_size < size) 689 size = p_size; 690 } else { 691 if (m_size) 692 size = m_size; 693 if (p_size) 694 size = p_size; 695 } 696 } 697 698 if (size == 0) 699 dev_err(DEV, "Both nodes diskless!\n"); 700 701 if (u_size) { 702 if (u_size > size) 703 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 704 (unsigned long)u_size>>1, (unsigned long)size>>1); 705 else 706 size = u_size; 707 } 708 709 return size; 710} 711 712/** 713 * drbd_check_al_size() - Ensures that the AL is of the right size 714 * @mdev: DRBD device. 715 * 716 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 717 * failed, and 0 on success. You should call drbd_md_sync() after you called 718 * this function. 719 */ 720static int drbd_check_al_size(struct drbd_conf *mdev) 721{ 722 struct lru_cache *n, *t; 723 struct lc_element *e; 724 unsigned int in_use; 725 int i; 726 727 ERR_IF(mdev->sync_conf.al_extents < 7) 728 mdev->sync_conf.al_extents = 127; 729 730 if (mdev->act_log && 731 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 732 return 0; 733 734 in_use = 0; 735 t = mdev->act_log; 736 n = lc_create("act_log", drbd_al_ext_cache, 737 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 738 739 if (n == NULL) { 740 dev_err(DEV, "Cannot allocate act_log lru!\n"); 741 return -ENOMEM; 742 } 743 spin_lock_irq(&mdev->al_lock); 744 if (t) { 745 for (i = 0; i < t->nr_elements; i++) { 746 e = lc_element_by_index(t, i); 747 if (e->refcnt) 748 dev_err(DEV, "refcnt(%d)==%d\n", 749 e->lc_number, e->refcnt); 750 in_use += e->refcnt; 751 } 752 } 753 if (!in_use) 754 mdev->act_log = n; 755 spin_unlock_irq(&mdev->al_lock); 756 if (in_use) { 757 dev_err(DEV, "Activity log still in use!\n"); 758 lc_destroy(n); 759 return -EBUSY; 760 } else { 761 if (t) 762 lc_destroy(t); 763 } 764 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 765 return 0; 766} 767 768void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 769{ 770 struct request_queue * const q = mdev->rq_queue; 771 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 772 int max_segments = mdev->ldev->dc.max_bio_bvecs; 773 774 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 775 776 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 777 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 778 blk_queue_max_segment_size(q, max_seg_s); 779 blk_queue_logical_block_size(q, 512); 780 blk_queue_segment_boundary(q, PAGE_SIZE-1); 781 blk_stack_limits(&q->limits, &b->limits, 0); 782 783 if (b->merge_bvec_fn) 784 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 785 b->merge_bvec_fn); 786 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 787 788 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 789 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 790 q->backing_dev_info.ra_pages, 791 b->backing_dev_info.ra_pages); 792 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 793 } 794} 795 796/* serialize deconfig (worker exiting, doing cleanup) 797 * and reconfig (drbdsetup disk, drbdsetup net) 798 * 799 * Wait for a potentially exiting worker, then restart it, 800 * or start a new one. Flush any pending work, there may still be an 801 * after_state_change queued. 802 */ 803static void drbd_reconfig_start(struct drbd_conf *mdev) 804{ 805 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 806 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 807 drbd_thread_start(&mdev->worker); 808 drbd_flush_workqueue(mdev); 809} 810 811/* if still unconfigured, stops worker again. 812 * if configured now, clears CONFIG_PENDING. 813 * wakes potential waiters */ 814static void drbd_reconfig_done(struct drbd_conf *mdev) 815{ 816 spin_lock_irq(&mdev->req_lock); 817 if (mdev->state.disk == D_DISKLESS && 818 mdev->state.conn == C_STANDALONE && 819 mdev->state.role == R_SECONDARY) { 820 set_bit(DEVICE_DYING, &mdev->flags); 821 drbd_thread_stop_nowait(&mdev->worker); 822 } else 823 clear_bit(CONFIG_PENDING, &mdev->flags); 824 spin_unlock_irq(&mdev->req_lock); 825 wake_up(&mdev->state_wait); 826} 827 828/* Make sure IO is suspended before calling this function(). */ 829static void drbd_suspend_al(struct drbd_conf *mdev) 830{ 831 int s = 0; 832 833 if (lc_try_lock(mdev->act_log)) { 834 drbd_al_shrink(mdev); 835 lc_unlock(mdev->act_log); 836 } else { 837 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n"); 838 return; 839 } 840 841 spin_lock_irq(&mdev->req_lock); 842 if (mdev->state.conn < C_CONNECTED) 843 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); 844 845 spin_unlock_irq(&mdev->req_lock); 846 847 if (s) 848 dev_info(DEV, "Suspended AL updates\n"); 849} 850 851/* does always return 0; 852 * interesting return code is in reply->ret_code */ 853static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 854 struct drbd_nl_cfg_reply *reply) 855{ 856 enum drbd_ret_codes retcode; 857 enum determine_dev_size dd; 858 sector_t max_possible_sectors; 859 sector_t min_md_device_sectors; 860 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 861 struct inode *inode, *inode2; 862 struct lru_cache *resync_lru = NULL; 863 union drbd_state ns, os; 864 unsigned int max_seg_s; 865 int rv; 866 int cp_discovered = 0; 867 int logical_block_size; 868 869 drbd_reconfig_start(mdev); 870 871 /* if you want to reconfigure, please tear down first */ 872 if (mdev->state.disk > D_DISKLESS) { 873 retcode = ERR_DISK_CONFIGURED; 874 goto fail; 875 } 876 877 /* allocation not in the IO path, cqueue thread context */ 878 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 879 if (!nbc) { 880 retcode = ERR_NOMEM; 881 goto fail; 882 } 883 884 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 885 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 886 nbc->dc.fencing = DRBD_FENCING_DEF; 887 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 888 889 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 890 retcode = ERR_MANDATORY_TAG; 891 goto fail; 892 } 893 894 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 895 retcode = ERR_MD_IDX_INVALID; 896 goto fail; 897 } 898 899 if (get_net_conf(mdev)) { 900 int prot = mdev->net_conf->wire_protocol; 901 put_net_conf(mdev); 902 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) { 903 retcode = ERR_STONITH_AND_PROT_A; 904 goto fail; 905 } 906 } 907 908 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 909 if (IS_ERR(nbc->lo_file)) { 910 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 911 PTR_ERR(nbc->lo_file)); 912 nbc->lo_file = NULL; 913 retcode = ERR_OPEN_DISK; 914 goto fail; 915 } 916 917 inode = nbc->lo_file->f_dentry->d_inode; 918 919 if (!S_ISBLK(inode->i_mode)) { 920 retcode = ERR_DISK_NOT_BDEV; 921 goto fail; 922 } 923 924 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 925 if (IS_ERR(nbc->md_file)) { 926 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 927 PTR_ERR(nbc->md_file)); 928 nbc->md_file = NULL; 929 retcode = ERR_OPEN_MD_DISK; 930 goto fail; 931 } 932 933 inode2 = nbc->md_file->f_dentry->d_inode; 934 935 if (!S_ISBLK(inode2->i_mode)) { 936 retcode = ERR_MD_NOT_BDEV; 937 goto fail; 938 } 939 940 nbc->backing_bdev = inode->i_bdev; 941 if (bd_claim(nbc->backing_bdev, mdev)) { 942 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 943 nbc->backing_bdev, mdev, 944 nbc->backing_bdev->bd_holder, 945 nbc->backing_bdev->bd_contains->bd_holder, 946 nbc->backing_bdev->bd_holders); 947 retcode = ERR_BDCLAIM_DISK; 948 goto fail; 949 } 950 951 resync_lru = lc_create("resync", drbd_bm_ext_cache, 952 61, sizeof(struct bm_extent), 953 offsetof(struct bm_extent, lce)); 954 if (!resync_lru) { 955 retcode = ERR_NOMEM; 956 goto release_bdev_fail; 957 } 958 959 /* meta_dev_idx >= 0: external fixed size, 960 * possibly multiple drbd sharing one meta device. 961 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 962 * not yet used by some other drbd minor! 963 * (if you use drbd.conf + drbdadm, 964 * that should check it for you already; but if you don't, or someone 965 * fooled it, we need to double check here) */ 966 nbc->md_bdev = inode2->i_bdev; 967 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 968 : (void *) drbd_m_holder)) { 969 retcode = ERR_BDCLAIM_MD_DISK; 970 goto release_bdev_fail; 971 } 972 973 if ((nbc->backing_bdev == nbc->md_bdev) != 974 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 975 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 976 retcode = ERR_MD_IDX_INVALID; 977 goto release_bdev2_fail; 978 } 979 980 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 981 drbd_md_set_sector_offsets(mdev, nbc); 982 983 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 984 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 985 (unsigned long long) drbd_get_max_capacity(nbc), 986 (unsigned long long) nbc->dc.disk_size); 987 retcode = ERR_DISK_TO_SMALL; 988 goto release_bdev2_fail; 989 } 990 991 if (nbc->dc.meta_dev_idx < 0) { 992 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 993 /* at least one MB, otherwise it does not make sense */ 994 min_md_device_sectors = (2<<10); 995 } else { 996 max_possible_sectors = DRBD_MAX_SECTORS; 997 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 998 } 999 1000 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 1001 retcode = ERR_MD_DISK_TO_SMALL; 1002 dev_warn(DEV, "refusing attach: md-device too small, " 1003 "at least %llu sectors needed for this meta-disk type\n", 1004 (unsigned long long) min_md_device_sectors); 1005 goto release_bdev2_fail; 1006 } 1007 1008 /* Make sure the new disk is big enough 1009 * (we may currently be R_PRIMARY with no local disk...) */ 1010 if (drbd_get_max_capacity(nbc) < 1011 drbd_get_capacity(mdev->this_bdev)) { 1012 retcode = ERR_DISK_TO_SMALL; 1013 goto release_bdev2_fail; 1014 } 1015 1016 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 1017 1018 if (nbc->known_size > max_possible_sectors) { 1019 dev_warn(DEV, "==> truncating very big lower level device " 1020 "to currently maximum possible %llu sectors <==\n", 1021 (unsigned long long) max_possible_sectors); 1022 if (nbc->dc.meta_dev_idx >= 0) 1023 dev_warn(DEV, "==>> using internal or flexible " 1024 "meta data may help <<==\n"); 1025 } 1026 1027 drbd_suspend_io(mdev); 1028 /* also wait for the last barrier ack. */ 1029 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state)); 1030 /* and for any other previously queued work */ 1031 drbd_flush_workqueue(mdev); 1032 1033 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 1034 drbd_resume_io(mdev); 1035 if (retcode < SS_SUCCESS) 1036 goto release_bdev2_fail; 1037 1038 if (!get_ldev_if_state(mdev, D_ATTACHING)) 1039 goto force_diskless; 1040 1041 drbd_md_set_sector_offsets(mdev, nbc); 1042 1043 /* allocate a second IO page if logical_block_size != 512 */ 1044 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 1045 if (logical_block_size == 0) 1046 logical_block_size = MD_SECTOR_SIZE; 1047 1048 if (logical_block_size != MD_SECTOR_SIZE) { 1049 if (!mdev->md_io_tmpp) { 1050 struct page *page = alloc_page(GFP_NOIO); 1051 if (!page) 1052 goto force_diskless_dec; 1053 1054 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 1055 logical_block_size, MD_SECTOR_SIZE); 1056 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 1057 1058 mdev->md_io_tmpp = page; 1059 } 1060 } 1061 1062 if (!mdev->bitmap) { 1063 if (drbd_bm_init(mdev)) { 1064 retcode = ERR_NOMEM; 1065 goto force_diskless_dec; 1066 } 1067 } 1068 1069 retcode = drbd_md_read(mdev, nbc); 1070 if (retcode != NO_ERROR) 1071 goto force_diskless_dec; 1072 1073 if (mdev->state.conn < C_CONNECTED && 1074 mdev->state.role == R_PRIMARY && 1075 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 1076 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 1077 (unsigned long long)mdev->ed_uuid); 1078 retcode = ERR_DATA_NOT_CURRENT; 1079 goto force_diskless_dec; 1080 } 1081 1082 /* Since we are diskless, fix the activity log first... */ 1083 if (drbd_check_al_size(mdev)) { 1084 retcode = ERR_NOMEM; 1085 goto force_diskless_dec; 1086 } 1087 1088 /* Prevent shrinking of consistent devices ! */ 1089 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1090 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 1091 dev_warn(DEV, "refusing to truncate a consistent device\n"); 1092 retcode = ERR_DISK_TO_SMALL; 1093 goto force_diskless_dec; 1094 } 1095 1096 if (!drbd_al_read_log(mdev, nbc)) { 1097 retcode = ERR_IO_MD_DISK; 1098 goto force_diskless_dec; 1099 } 1100 1101 /* Reset the "barriers don't work" bits here, then force meta data to 1102 * be written, to ensure we determine if barriers are supported. */ 1103 if (nbc->dc.no_md_flush) 1104 set_bit(MD_NO_BARRIER, &mdev->flags); 1105 else 1106 clear_bit(MD_NO_BARRIER, &mdev->flags); 1107 1108 /* Point of no return reached. 1109 * Devices and memory are no longer released by error cleanup below. 1110 * now mdev takes over responsibility, and the state engine should 1111 * clean it up somewhere. */ 1112 D_ASSERT(mdev->ldev == NULL); 1113 mdev->ldev = nbc; 1114 mdev->resync = resync_lru; 1115 nbc = NULL; 1116 resync_lru = NULL; 1117 1118 mdev->write_ordering = WO_bio_barrier; 1119 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1120 1121 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1122 set_bit(CRASHED_PRIMARY, &mdev->flags); 1123 else 1124 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1125 1126 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1127 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) { 1128 set_bit(CRASHED_PRIMARY, &mdev->flags); 1129 cp_discovered = 1; 1130 } 1131 1132 mdev->send_cnt = 0; 1133 mdev->recv_cnt = 0; 1134 mdev->read_cnt = 0; 1135 mdev->writ_cnt = 0; 1136 1137 max_seg_s = DRBD_MAX_SEGMENT_SIZE; 1138 if (mdev->state.conn == C_CONNECTED) { 1139 /* We are Primary, Connected, and now attach a new local 1140 * backing store. We must not increase the user visible maximum 1141 * bio size on this device to something the peer may not be 1142 * able to handle. */ 1143 if (mdev->agreed_pro_version < 94) 1144 max_seg_s = queue_max_segment_size(mdev->rq_queue); 1145 else if (mdev->agreed_pro_version == 94) 1146 max_seg_s = DRBD_MAX_SIZE_H80_PACKET; 1147 /* else: drbd 8.3.9 and later, stay with default */ 1148 } 1149 1150 drbd_setup_queue_param(mdev, max_seg_s); 1151 1152 /* If I am currently not R_PRIMARY, 1153 * but meta data primary indicator is set, 1154 * I just now recover from a hard crash, 1155 * and have been R_PRIMARY before that crash. 1156 * 1157 * Now, if I had no connection before that crash 1158 * (have been degraded R_PRIMARY), chances are that 1159 * I won't find my peer now either. 1160 * 1161 * In that case, and _only_ in that case, 1162 * we use the degr-wfc-timeout instead of the default, 1163 * so we can automatically recover from a crash of a 1164 * degraded but active "cluster" after a certain timeout. 1165 */ 1166 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1167 if (mdev->state.role != R_PRIMARY && 1168 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1169 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1170 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1171 1172 dd = drbd_determin_dev_size(mdev, 0); 1173 if (dd == dev_size_error) { 1174 retcode = ERR_NOMEM_BITMAP; 1175 goto force_diskless_dec; 1176 } else if (dd == grew) 1177 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1178 1179 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1180 dev_info(DEV, "Assuming that all blocks are out of sync " 1181 "(aka FullSync)\n"); 1182 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1183 retcode = ERR_IO_MD_DISK; 1184 goto force_diskless_dec; 1185 } 1186 } else { 1187 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1188 retcode = ERR_IO_MD_DISK; 1189 goto force_diskless_dec; 1190 } 1191 } 1192 1193 if (cp_discovered) { 1194 drbd_al_apply_to_bm(mdev); 1195 drbd_al_to_on_disk_bm(mdev); 1196 } 1197 1198 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) 1199 drbd_suspend_al(mdev); /* IO is still suspended here... */ 1200 1201 spin_lock_irq(&mdev->req_lock); 1202 os = mdev->state; 1203 ns.i = os.i; 1204 /* If MDF_CONSISTENT is not set go into inconsistent state, 1205 otherwise investigate MDF_WasUpToDate... 1206 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1207 otherwise into D_CONSISTENT state. 1208 */ 1209 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1210 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1211 ns.disk = D_CONSISTENT; 1212 else 1213 ns.disk = D_OUTDATED; 1214 } else { 1215 ns.disk = D_INCONSISTENT; 1216 } 1217 1218 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1219 ns.pdsk = D_OUTDATED; 1220 1221 if ( ns.disk == D_CONSISTENT && 1222 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1223 ns.disk = D_UP_TO_DATE; 1224 1225 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1226 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1227 this point, because drbd_request_state() modifies these 1228 flags. */ 1229 1230 /* In case we are C_CONNECTED postpone any decision on the new disk 1231 state after the negotiation phase. */ 1232 if (mdev->state.conn == C_CONNECTED) { 1233 mdev->new_state_tmp.i = ns.i; 1234 ns.i = os.i; 1235 ns.disk = D_NEGOTIATING; 1236 1237 /* We expect to receive up-to-date UUIDs soon. 1238 To avoid a race in receive_state, free p_uuid while 1239 holding req_lock. I.e. atomic with the state change */ 1240 kfree(mdev->p_uuid); 1241 mdev->p_uuid = NULL; 1242 } 1243 1244 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1245 ns = mdev->state; 1246 spin_unlock_irq(&mdev->req_lock); 1247 1248 if (rv < SS_SUCCESS) 1249 goto force_diskless_dec; 1250 1251 if (mdev->state.role == R_PRIMARY) 1252 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1253 else 1254 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1255 1256 drbd_md_mark_dirty(mdev); 1257 drbd_md_sync(mdev); 1258 1259 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1260 put_ldev(mdev); 1261 reply->ret_code = retcode; 1262 drbd_reconfig_done(mdev); 1263 return 0; 1264 1265 force_diskless_dec: 1266 put_ldev(mdev); 1267 force_diskless: 1268 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1269 drbd_md_sync(mdev); 1270 release_bdev2_fail: 1271 if (nbc) 1272 bd_release(nbc->md_bdev); 1273 release_bdev_fail: 1274 if (nbc) 1275 bd_release(nbc->backing_bdev); 1276 fail: 1277 if (nbc) { 1278 if (nbc->lo_file) 1279 fput(nbc->lo_file); 1280 if (nbc->md_file) 1281 fput(nbc->md_file); 1282 kfree(nbc); 1283 } 1284 lc_destroy(resync_lru); 1285 1286 reply->ret_code = retcode; 1287 drbd_reconfig_done(mdev); 1288 return 0; 1289} 1290 1291static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1292 struct drbd_nl_cfg_reply *reply) 1293{ 1294 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1295 return 0; 1296} 1297 1298static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1299 struct drbd_nl_cfg_reply *reply) 1300{ 1301 int i, ns; 1302 enum drbd_ret_codes retcode; 1303 struct net_conf *new_conf = NULL; 1304 struct crypto_hash *tfm = NULL; 1305 struct crypto_hash *integrity_w_tfm = NULL; 1306 struct crypto_hash *integrity_r_tfm = NULL; 1307 struct hlist_head *new_tl_hash = NULL; 1308 struct hlist_head *new_ee_hash = NULL; 1309 struct drbd_conf *odev; 1310 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1311 void *int_dig_out = NULL; 1312 void *int_dig_in = NULL; 1313 void *int_dig_vv = NULL; 1314 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1315 1316 drbd_reconfig_start(mdev); 1317 1318 if (mdev->state.conn > C_STANDALONE) { 1319 retcode = ERR_NET_CONFIGURED; 1320 goto fail; 1321 } 1322 1323 /* allocation not in the IO path, cqueue thread context */ 1324 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1325 if (!new_conf) { 1326 retcode = ERR_NOMEM; 1327 goto fail; 1328 } 1329 1330 new_conf->timeout = DRBD_TIMEOUT_DEF; 1331 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1332 new_conf->ping_int = DRBD_PING_INT_DEF; 1333 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1334 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1335 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1336 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1337 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1338 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1339 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1340 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1341 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1342 new_conf->want_lose = 0; 1343 new_conf->two_primaries = 0; 1344 new_conf->wire_protocol = DRBD_PROT_C; 1345 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1346 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1347 1348 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1349 retcode = ERR_MANDATORY_TAG; 1350 goto fail; 1351 } 1352 1353 if (new_conf->two_primaries 1354 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1355 retcode = ERR_NOT_PROTO_C; 1356 goto fail; 1357 } 1358 1359 if (get_ldev(mdev)) { 1360 enum drbd_fencing_p fp = mdev->ldev->dc.fencing; 1361 put_ldev(mdev); 1362 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) { 1363 retcode = ERR_STONITH_AND_PROT_A; 1364 goto fail; 1365 } 1366 } 1367 1368 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1369 retcode = ERR_DISCARD; 1370 goto fail; 1371 } 1372 1373 retcode = NO_ERROR; 1374 1375 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1376 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1377 for (i = 0; i < minor_count; i++) { 1378 odev = minor_to_mdev(i); 1379 if (!odev || odev == mdev) 1380 continue; 1381 if (get_net_conf(odev)) { 1382 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1383 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1384 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1385 retcode = ERR_LOCAL_ADDR; 1386 1387 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1388 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1389 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1390 retcode = ERR_PEER_ADDR; 1391 1392 put_net_conf(odev); 1393 if (retcode != NO_ERROR) 1394 goto fail; 1395 } 1396 } 1397 1398 if (new_conf->cram_hmac_alg[0] != 0) { 1399 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1400 new_conf->cram_hmac_alg); 1401 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1402 if (IS_ERR(tfm)) { 1403 tfm = NULL; 1404 retcode = ERR_AUTH_ALG; 1405 goto fail; 1406 } 1407 1408 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1409 retcode = ERR_AUTH_ALG_ND; 1410 goto fail; 1411 } 1412 } 1413 1414 if (new_conf->integrity_alg[0]) { 1415 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1416 if (IS_ERR(integrity_w_tfm)) { 1417 integrity_w_tfm = NULL; 1418 retcode=ERR_INTEGRITY_ALG; 1419 goto fail; 1420 } 1421 1422 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1423 retcode=ERR_INTEGRITY_ALG_ND; 1424 goto fail; 1425 } 1426 1427 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1428 if (IS_ERR(integrity_r_tfm)) { 1429 integrity_r_tfm = NULL; 1430 retcode=ERR_INTEGRITY_ALG; 1431 goto fail; 1432 } 1433 } 1434 1435 ns = new_conf->max_epoch_size/8; 1436 if (mdev->tl_hash_s != ns) { 1437 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1438 if (!new_tl_hash) { 1439 retcode = ERR_NOMEM; 1440 goto fail; 1441 } 1442 } 1443 1444 ns = new_conf->max_buffers/8; 1445 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1446 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1447 if (!new_ee_hash) { 1448 retcode = ERR_NOMEM; 1449 goto fail; 1450 } 1451 } 1452 1453 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1454 1455 if (integrity_w_tfm) { 1456 i = crypto_hash_digestsize(integrity_w_tfm); 1457 int_dig_out = kmalloc(i, GFP_KERNEL); 1458 if (!int_dig_out) { 1459 retcode = ERR_NOMEM; 1460 goto fail; 1461 } 1462 int_dig_in = kmalloc(i, GFP_KERNEL); 1463 if (!int_dig_in) { 1464 retcode = ERR_NOMEM; 1465 goto fail; 1466 } 1467 int_dig_vv = kmalloc(i, GFP_KERNEL); 1468 if (!int_dig_vv) { 1469 retcode = ERR_NOMEM; 1470 goto fail; 1471 } 1472 } 1473 1474 if (!mdev->bitmap) { 1475 if(drbd_bm_init(mdev)) { 1476 retcode = ERR_NOMEM; 1477 goto fail; 1478 } 1479 } 1480 1481 drbd_flush_workqueue(mdev); 1482 spin_lock_irq(&mdev->req_lock); 1483 if (mdev->net_conf != NULL) { 1484 retcode = ERR_NET_CONFIGURED; 1485 spin_unlock_irq(&mdev->req_lock); 1486 goto fail; 1487 } 1488 mdev->net_conf = new_conf; 1489 1490 mdev->send_cnt = 0; 1491 mdev->recv_cnt = 0; 1492 1493 if (new_tl_hash) { 1494 kfree(mdev->tl_hash); 1495 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1496 mdev->tl_hash = new_tl_hash; 1497 } 1498 1499 if (new_ee_hash) { 1500 kfree(mdev->ee_hash); 1501 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1502 mdev->ee_hash = new_ee_hash; 1503 } 1504 1505 crypto_free_hash(mdev->cram_hmac_tfm); 1506 mdev->cram_hmac_tfm = tfm; 1507 1508 crypto_free_hash(mdev->integrity_w_tfm); 1509 mdev->integrity_w_tfm = integrity_w_tfm; 1510 1511 crypto_free_hash(mdev->integrity_r_tfm); 1512 mdev->integrity_r_tfm = integrity_r_tfm; 1513 1514 kfree(mdev->int_dig_out); 1515 kfree(mdev->int_dig_in); 1516 kfree(mdev->int_dig_vv); 1517 mdev->int_dig_out=int_dig_out; 1518 mdev->int_dig_in=int_dig_in; 1519 mdev->int_dig_vv=int_dig_vv; 1520 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL); 1521 spin_unlock_irq(&mdev->req_lock); 1522 1523 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1524 reply->ret_code = retcode; 1525 drbd_reconfig_done(mdev); 1526 return 0; 1527 1528fail: 1529 kfree(int_dig_out); 1530 kfree(int_dig_in); 1531 kfree(int_dig_vv); 1532 crypto_free_hash(tfm); 1533 crypto_free_hash(integrity_w_tfm); 1534 crypto_free_hash(integrity_r_tfm); 1535 kfree(new_tl_hash); 1536 kfree(new_ee_hash); 1537 kfree(new_conf); 1538 1539 reply->ret_code = retcode; 1540 drbd_reconfig_done(mdev); 1541 return 0; 1542} 1543 1544static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1545 struct drbd_nl_cfg_reply *reply) 1546{ 1547 int retcode; 1548 1549 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1550 1551 if (retcode == SS_NOTHING_TO_DO) 1552 goto done; 1553 else if (retcode == SS_ALREADY_STANDALONE) 1554 goto done; 1555 else if (retcode == SS_PRIMARY_NOP) { 1556 /* Our statche checking code wants to see the peer outdated. */ 1557 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1558 pdsk, D_OUTDATED)); 1559 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1560 /* The peer probably wants to see us outdated. */ 1561 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1562 disk, D_OUTDATED), 1563 CS_ORDERED); 1564 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1565 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1566 retcode = SS_SUCCESS; 1567 } 1568 } 1569 1570 if (retcode < SS_SUCCESS) 1571 goto fail; 1572 1573 if (wait_event_interruptible(mdev->state_wait, 1574 mdev->state.conn != C_DISCONNECTING)) { 1575 /* Do not test for mdev->state.conn == C_STANDALONE, since 1576 someone else might connect us in the mean time! */ 1577 retcode = ERR_INTR; 1578 goto fail; 1579 } 1580 1581 done: 1582 retcode = NO_ERROR; 1583 fail: 1584 drbd_md_sync(mdev); 1585 reply->ret_code = retcode; 1586 return 0; 1587} 1588 1589void resync_after_online_grow(struct drbd_conf *mdev) 1590{ 1591 int iass; /* I am sync source */ 1592 1593 dev_info(DEV, "Resync of new storage after online grow\n"); 1594 if (mdev->state.role != mdev->state.peer) 1595 iass = (mdev->state.role == R_PRIMARY); 1596 else 1597 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1598 1599 if (iass) 1600 drbd_start_resync(mdev, C_SYNC_SOURCE); 1601 else 1602 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1603} 1604 1605static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1606 struct drbd_nl_cfg_reply *reply) 1607{ 1608 struct resize rs; 1609 int retcode = NO_ERROR; 1610 enum determine_dev_size dd; 1611 enum dds_flags ddsf; 1612 1613 memset(&rs, 0, sizeof(struct resize)); 1614 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1615 retcode = ERR_MANDATORY_TAG; 1616 goto fail; 1617 } 1618 1619 if (mdev->state.conn > C_CONNECTED) { 1620 retcode = ERR_RESIZE_RESYNC; 1621 goto fail; 1622 } 1623 1624 if (mdev->state.role == R_SECONDARY && 1625 mdev->state.peer == R_SECONDARY) { 1626 retcode = ERR_NO_PRIMARY; 1627 goto fail; 1628 } 1629 1630 if (!get_ldev(mdev)) { 1631 retcode = ERR_NO_DISK; 1632 goto fail; 1633 } 1634 1635 if (rs.no_resync && mdev->agreed_pro_version < 93) { 1636 retcode = ERR_NEED_APV_93; 1637 goto fail; 1638 } 1639 1640 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 1641 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1642 1643 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1644 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1645 dd = drbd_determin_dev_size(mdev, ddsf); 1646 drbd_md_sync(mdev); 1647 put_ldev(mdev); 1648 if (dd == dev_size_error) { 1649 retcode = ERR_NOMEM_BITMAP; 1650 goto fail; 1651 } 1652 1653 if (mdev->state.conn == C_CONNECTED) { 1654 if (dd == grew) 1655 set_bit(RESIZE_PENDING, &mdev->flags); 1656 1657 drbd_send_uuids(mdev); 1658 drbd_send_sizes(mdev, 1, ddsf); 1659 } 1660 1661 fail: 1662 reply->ret_code = retcode; 1663 return 0; 1664} 1665 1666static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1667 struct drbd_nl_cfg_reply *reply) 1668{ 1669 int retcode = NO_ERROR; 1670 int err; 1671 int ovr; /* online verify running */ 1672 int rsr; /* re-sync running */ 1673 struct crypto_hash *verify_tfm = NULL; 1674 struct crypto_hash *csums_tfm = NULL; 1675 struct syncer_conf sc; 1676 cpumask_var_t new_cpu_mask; 1677 int *rs_plan_s = NULL; 1678 int fifo_size; 1679 1680 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1681 retcode = ERR_NOMEM; 1682 goto fail; 1683 } 1684 1685 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1686 memset(&sc, 0, sizeof(struct syncer_conf)); 1687 sc.rate = DRBD_RATE_DEF; 1688 sc.after = DRBD_AFTER_DEF; 1689 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1690 sc.on_no_data = DRBD_ON_NO_DATA_DEF; 1691 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF; 1692 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF; 1693 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF; 1694 sc.c_max_rate = DRBD_C_MAX_RATE_DEF; 1695 sc.c_min_rate = DRBD_C_MIN_RATE_DEF; 1696 } else 1697 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1698 1699 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1700 retcode = ERR_MANDATORY_TAG; 1701 goto fail; 1702 } 1703 1704 /* re-sync running */ 1705 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1706 mdev->state.conn == C_SYNC_TARGET || 1707 mdev->state.conn == C_PAUSED_SYNC_S || 1708 mdev->state.conn == C_PAUSED_SYNC_T ); 1709 1710 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1711 retcode = ERR_CSUMS_RESYNC_RUNNING; 1712 goto fail; 1713 } 1714 1715 if (!rsr && sc.csums_alg[0]) { 1716 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1717 if (IS_ERR(csums_tfm)) { 1718 csums_tfm = NULL; 1719 retcode = ERR_CSUMS_ALG; 1720 goto fail; 1721 } 1722 1723 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1724 retcode = ERR_CSUMS_ALG_ND; 1725 goto fail; 1726 } 1727 } 1728 1729 /* online verify running */ 1730 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1731 1732 if (ovr) { 1733 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1734 retcode = ERR_VERIFY_RUNNING; 1735 goto fail; 1736 } 1737 } 1738 1739 if (!ovr && sc.verify_alg[0]) { 1740 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1741 if (IS_ERR(verify_tfm)) { 1742 verify_tfm = NULL; 1743 retcode = ERR_VERIFY_ALG; 1744 goto fail; 1745 } 1746 1747 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1748 retcode = ERR_VERIFY_ALG_ND; 1749 goto fail; 1750 } 1751 } 1752 1753 /* silently ignore cpu mask on UP kernel */ 1754 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1755 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1756 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1757 if (err) { 1758 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1759 retcode = ERR_CPU_MASK_PARSE; 1760 goto fail; 1761 } 1762 } 1763 1764 ERR_IF (sc.rate < 1) sc.rate = 1; 1765 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1766#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1767 if (sc.al_extents > AL_MAX) { 1768 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1769 sc.al_extents = AL_MAX; 1770 } 1771#undef AL_MAX 1772 1773 /* to avoid spurious errors when configuring minors before configuring 1774 * the minors they depend on: if necessary, first create the minor we 1775 * depend on */ 1776 if (sc.after >= 0) 1777 ensure_mdev(sc.after, 1); 1778 1779 /* most sanity checks done, try to assign the new sync-after 1780 * dependency. need to hold the global lock in there, 1781 * to avoid a race in the dependency loop check. */ 1782 retcode = drbd_alter_sa(mdev, sc.after); 1783 if (retcode != NO_ERROR) 1784 goto fail; 1785 1786 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 1787 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 1788 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 1789 if (!rs_plan_s) { 1790 dev_err(DEV, "kmalloc of fifo_buffer failed"); 1791 retcode = ERR_NOMEM; 1792 goto fail; 1793 } 1794 } 1795 1796 /* ok, assign the rest of it as well. 1797 * lock against receive_SyncParam() */ 1798 spin_lock(&mdev->peer_seq_lock); 1799 mdev->sync_conf = sc; 1800 1801 if (!rsr) { 1802 crypto_free_hash(mdev->csums_tfm); 1803 mdev->csums_tfm = csums_tfm; 1804 csums_tfm = NULL; 1805 } 1806 1807 if (!ovr) { 1808 crypto_free_hash(mdev->verify_tfm); 1809 mdev->verify_tfm = verify_tfm; 1810 verify_tfm = NULL; 1811 } 1812 1813 if (fifo_size != mdev->rs_plan_s.size) { 1814 kfree(mdev->rs_plan_s.values); 1815 mdev->rs_plan_s.values = rs_plan_s; 1816 mdev->rs_plan_s.size = fifo_size; 1817 mdev->rs_planed = 0; 1818 rs_plan_s = NULL; 1819 } 1820 1821 spin_unlock(&mdev->peer_seq_lock); 1822 1823 if (get_ldev(mdev)) { 1824 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1825 drbd_al_shrink(mdev); 1826 err = drbd_check_al_size(mdev); 1827 lc_unlock(mdev->act_log); 1828 wake_up(&mdev->al_wait); 1829 1830 put_ldev(mdev); 1831 drbd_md_sync(mdev); 1832 1833 if (err) { 1834 retcode = ERR_NOMEM; 1835 goto fail; 1836 } 1837 } 1838 1839 if (mdev->state.conn >= C_CONNECTED) 1840 drbd_send_sync_param(mdev, &sc); 1841 1842 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1843 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1844 drbd_calc_cpu_mask(mdev); 1845 mdev->receiver.reset_cpu_mask = 1; 1846 mdev->asender.reset_cpu_mask = 1; 1847 mdev->worker.reset_cpu_mask = 1; 1848 } 1849 1850 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1851fail: 1852 kfree(rs_plan_s); 1853 free_cpumask_var(new_cpu_mask); 1854 crypto_free_hash(csums_tfm); 1855 crypto_free_hash(verify_tfm); 1856 reply->ret_code = retcode; 1857 return 0; 1858} 1859 1860static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1861 struct drbd_nl_cfg_reply *reply) 1862{ 1863 int retcode; 1864 1865 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1866 1867 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1868 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1869 1870 while (retcode == SS_NEED_CONNECTION) { 1871 spin_lock_irq(&mdev->req_lock); 1872 if (mdev->state.conn < C_CONNECTED) 1873 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1874 spin_unlock_irq(&mdev->req_lock); 1875 1876 if (retcode != SS_NEED_CONNECTION) 1877 break; 1878 1879 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1880 } 1881 1882 reply->ret_code = retcode; 1883 return 0; 1884} 1885 1886static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) 1887{ 1888 int rv; 1889 1890 rv = drbd_bmio_set_n_write(mdev); 1891 drbd_suspend_al(mdev); 1892 return rv; 1893} 1894 1895static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1896 struct drbd_nl_cfg_reply *reply) 1897{ 1898 int retcode; 1899 1900 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 1901 1902 if (retcode < SS_SUCCESS) { 1903 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) { 1904 /* The peer will get a resync upon connect anyways. Just make that 1905 into a full resync. */ 1906 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); 1907 if (retcode >= SS_SUCCESS) { 1908 /* open coded drbd_bitmap_io() */ 1909 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, 1910 "set_n_write from invalidate_peer")) 1911 retcode = ERR_IO_MD_DISK; 1912 } 1913 } else 1914 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1915 } 1916 1917 reply->ret_code = retcode; 1918 return 0; 1919} 1920 1921static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1922 struct drbd_nl_cfg_reply *reply) 1923{ 1924 int retcode = NO_ERROR; 1925 1926 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1927 retcode = ERR_PAUSE_IS_SET; 1928 1929 reply->ret_code = retcode; 1930 return 0; 1931} 1932 1933static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1934 struct drbd_nl_cfg_reply *reply) 1935{ 1936 int retcode = NO_ERROR; 1937 1938 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1939 retcode = ERR_PAUSE_IS_CLEAR; 1940 1941 reply->ret_code = retcode; 1942 return 0; 1943} 1944 1945static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1946 struct drbd_nl_cfg_reply *reply) 1947{ 1948 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1949 1950 return 0; 1951} 1952 1953static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1954 struct drbd_nl_cfg_reply *reply) 1955{ 1956 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1957 drbd_uuid_new_current(mdev); 1958 clear_bit(NEW_CUR_UUID, &mdev->flags); 1959 drbd_md_sync(mdev); 1960 } 1961 drbd_suspend_io(mdev); 1962 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 1963 if (reply->ret_code == SS_SUCCESS) { 1964 if (mdev->state.conn < C_CONNECTED) 1965 tl_clear(mdev); 1966 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) 1967 tl_restart(mdev, fail_frozen_disk_io); 1968 } 1969 drbd_resume_io(mdev); 1970 1971 return 0; 1972} 1973 1974static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1975 struct drbd_nl_cfg_reply *reply) 1976{ 1977 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1978 return 0; 1979} 1980 1981static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1982 struct drbd_nl_cfg_reply *reply) 1983{ 1984 unsigned short *tl; 1985 1986 tl = reply->tag_list; 1987 1988 if (get_ldev(mdev)) { 1989 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1990 put_ldev(mdev); 1991 } 1992 1993 if (get_net_conf(mdev)) { 1994 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1995 put_net_conf(mdev); 1996 } 1997 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1998 1999 put_unaligned(TT_END, tl++); /* Close the tag list */ 2000 2001 return (int)((char *)tl - (char *)reply->tag_list); 2002} 2003 2004static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2005 struct drbd_nl_cfg_reply *reply) 2006{ 2007 unsigned short *tl = reply->tag_list; 2008 union drbd_state s = mdev->state; 2009 unsigned long rs_left; 2010 unsigned int res; 2011 2012 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 2013 2014 /* no local ref, no bitmap, no syncer progress. */ 2015 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 2016 if (get_ldev(mdev)) { 2017 drbd_get_syncer_progress(mdev, &rs_left, &res); 2018 tl = tl_add_int(tl, T_sync_progress, &res); 2019 put_ldev(mdev); 2020 } 2021 } 2022 put_unaligned(TT_END, tl++); /* Close the tag list */ 2023 2024 return (int)((char *)tl - (char *)reply->tag_list); 2025} 2026 2027static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2028 struct drbd_nl_cfg_reply *reply) 2029{ 2030 unsigned short *tl; 2031 2032 tl = reply->tag_list; 2033 2034 if (get_ldev(mdev)) { 2035 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 2036 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 2037 put_ldev(mdev); 2038 } 2039 put_unaligned(TT_END, tl++); /* Close the tag list */ 2040 2041 return (int)((char *)tl - (char *)reply->tag_list); 2042} 2043 2044/** 2045 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 2046 * @mdev: DRBD device. 2047 * @nlp: Netlink/connector packet from drbdsetup 2048 * @reply: Reply packet for drbdsetup 2049 */ 2050static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2051 struct drbd_nl_cfg_reply *reply) 2052{ 2053 unsigned short *tl; 2054 char rv; 2055 2056 tl = reply->tag_list; 2057 2058 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 2059 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 2060 2061 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 2062 put_unaligned(TT_END, tl++); /* Close the tag list */ 2063 2064 return (int)((char *)tl - (char *)reply->tag_list); 2065} 2066 2067static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2068 struct drbd_nl_cfg_reply *reply) 2069{ 2070 /* default to resume from last known position, if possible */ 2071 struct start_ov args = 2072 { .start_sector = mdev->ov_start_sector }; 2073 2074 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 2075 reply->ret_code = ERR_MANDATORY_TAG; 2076 return 0; 2077 } 2078 /* w_make_ov_request expects position to be aligned */ 2079 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 2080 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 2081 return 0; 2082} 2083 2084 2085static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2086 struct drbd_nl_cfg_reply *reply) 2087{ 2088 int retcode = NO_ERROR; 2089 int skip_initial_sync = 0; 2090 int err; 2091 2092 struct new_c_uuid args; 2093 2094 memset(&args, 0, sizeof(struct new_c_uuid)); 2095 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 2096 reply->ret_code = ERR_MANDATORY_TAG; 2097 return 0; 2098 } 2099 2100 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 2101 2102 if (!get_ldev(mdev)) { 2103 retcode = ERR_NO_DISK; 2104 goto out; 2105 } 2106 2107 /* this is "skip initial sync", assume to be clean */ 2108 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 2109 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 2110 dev_info(DEV, "Preparing to skip initial sync\n"); 2111 skip_initial_sync = 1; 2112 } else if (mdev->state.conn != C_STANDALONE) { 2113 retcode = ERR_CONNECTED; 2114 goto out_dec; 2115 } 2116 2117 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 2118 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 2119 2120 if (args.clear_bm) { 2121 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 2122 if (err) { 2123 dev_err(DEV, "Writing bitmap failed with %d\n",err); 2124 retcode = ERR_IO_MD_DISK; 2125 } 2126 if (skip_initial_sync) { 2127 drbd_send_uuids_skip_initial_sync(mdev); 2128 _drbd_uuid_set(mdev, UI_BITMAP, 0); 2129 spin_lock_irq(&mdev->req_lock); 2130 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 2131 CS_VERBOSE, NULL); 2132 spin_unlock_irq(&mdev->req_lock); 2133 } 2134 } 2135 2136 drbd_md_sync(mdev); 2137out_dec: 2138 put_ldev(mdev); 2139out: 2140 mutex_unlock(&mdev->state_mutex); 2141 2142 reply->ret_code = retcode; 2143 return 0; 2144} 2145 2146struct cn_handler_struct { 2147 int (*function)(struct drbd_conf *, 2148 struct drbd_nl_cfg_req *, 2149 struct drbd_nl_cfg_reply *); 2150 int reply_body_size; 2151}; 2152 2153static struct cn_handler_struct cnd_table[] = { 2154 [ P_primary ] = { &drbd_nl_primary, 0 }, 2155 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 2156 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 2157 [ P_detach ] = { &drbd_nl_detach, 0 }, 2158 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 2159 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 2160 [ P_resize ] = { &drbd_nl_resize, 0 }, 2161 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 2162 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 2163 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 2164 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 2165 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 2166 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 2167 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 2168 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 2169 [ P_get_config ] = { &drbd_nl_get_config, 2170 sizeof(struct syncer_conf_tag_len_struct) + 2171 sizeof(struct disk_conf_tag_len_struct) + 2172 sizeof(struct net_conf_tag_len_struct) }, 2173 [ P_get_state ] = { &drbd_nl_get_state, 2174 sizeof(struct get_state_tag_len_struct) + 2175 sizeof(struct sync_progress_tag_len_struct) }, 2176 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2177 sizeof(struct get_uuids_tag_len_struct) }, 2178 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2179 sizeof(struct get_timeout_flag_tag_len_struct)}, 2180 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2181 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2182}; 2183 2184static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2185{ 2186 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2187 struct cn_handler_struct *cm; 2188 struct cn_msg *cn_reply; 2189 struct drbd_nl_cfg_reply *reply; 2190 struct drbd_conf *mdev; 2191 int retcode, rr; 2192 int reply_size = sizeof(struct cn_msg) 2193 + sizeof(struct drbd_nl_cfg_reply) 2194 + sizeof(short int); 2195 2196 if (!try_module_get(THIS_MODULE)) { 2197 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2198 return; 2199 } 2200 2201 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2202 retcode = ERR_PERM; 2203 goto fail; 2204 } 2205 2206 mdev = ensure_mdev(nlp->drbd_minor, 2207 (nlp->flags & DRBD_NL_CREATE_DEVICE)); 2208 if (!mdev) { 2209 retcode = ERR_MINOR_INVALID; 2210 goto fail; 2211 } 2212 2213 if (nlp->packet_type >= P_nl_after_last_packet) { 2214 retcode = ERR_PACKET_NR; 2215 goto fail; 2216 } 2217 2218 cm = cnd_table + nlp->packet_type; 2219 2220 /* This may happen if packet number is 0: */ 2221 if (cm->function == NULL) { 2222 retcode = ERR_PACKET_NR; 2223 goto fail; 2224 } 2225 2226 reply_size += cm->reply_body_size; 2227 2228 /* allocation not in the IO path, cqueue thread context */ 2229 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2230 if (!cn_reply) { 2231 retcode = ERR_NOMEM; 2232 goto fail; 2233 } 2234 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2235 2236 reply->packet_type = 2237 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2238 reply->minor = nlp->drbd_minor; 2239 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2240 /* reply->tag_list; might be modified by cm->function. */ 2241 2242 rr = cm->function(mdev, nlp, reply); 2243 2244 cn_reply->id = req->id; 2245 cn_reply->seq = req->seq; 2246 cn_reply->ack = req->ack + 1; 2247 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2248 cn_reply->flags = 0; 2249 2250 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2251 if (rr && rr != -ESRCH) 2252 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2253 2254 kfree(cn_reply); 2255 module_put(THIS_MODULE); 2256 return; 2257 fail: 2258 drbd_nl_send_reply(req, retcode); 2259 module_put(THIS_MODULE); 2260} 2261 2262static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2263 2264static unsigned short * 2265__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2266 unsigned short len, int nul_terminated) 2267{ 2268 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2269 len = (len < l) ? len : l; 2270 put_unaligned(tag, tl++); 2271 put_unaligned(len, tl++); 2272 memcpy(tl, data, len); 2273 tl = (unsigned short*)((char*)tl + len); 2274 if (nul_terminated) 2275 *((char*)tl - 1) = 0; 2276 return tl; 2277} 2278 2279static unsigned short * 2280tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2281{ 2282 return __tl_add_blob(tl, tag, data, len, 0); 2283} 2284 2285static unsigned short * 2286tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2287{ 2288 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2289} 2290 2291static unsigned short * 2292tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2293{ 2294 put_unaligned(tag, tl++); 2295 switch(tag_type(tag)) { 2296 case TT_INTEGER: 2297 put_unaligned(sizeof(int), tl++); 2298 put_unaligned(*(int *)val, (int *)tl); 2299 tl = (unsigned short*)((char*)tl+sizeof(int)); 2300 break; 2301 case TT_INT64: 2302 put_unaligned(sizeof(u64), tl++); 2303 put_unaligned(*(u64 *)val, (u64 *)tl); 2304 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2305 break; 2306 default: 2307 /* someone did something stupid. */ 2308 ; 2309 } 2310 return tl; 2311} 2312 2313void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2314{ 2315 char buffer[sizeof(struct cn_msg)+ 2316 sizeof(struct drbd_nl_cfg_reply)+ 2317 sizeof(struct get_state_tag_len_struct)+ 2318 sizeof(short int)]; 2319 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2320 struct drbd_nl_cfg_reply *reply = 2321 (struct drbd_nl_cfg_reply *)cn_reply->data; 2322 unsigned short *tl = reply->tag_list; 2323 2324 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2325 2326 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2327 2328 put_unaligned(TT_END, tl++); /* Close the tag list */ 2329 2330 cn_reply->id.idx = CN_IDX_DRBD; 2331 cn_reply->id.val = CN_VAL_DRBD; 2332 2333 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2334 cn_reply->ack = 0; /* not used here. */ 2335 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2336 (int)((char *)tl - (char *)reply->tag_list); 2337 cn_reply->flags = 0; 2338 2339 reply->packet_type = P_get_state; 2340 reply->minor = mdev_to_minor(mdev); 2341 reply->ret_code = NO_ERROR; 2342 2343 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2344} 2345 2346void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2347{ 2348 char buffer[sizeof(struct cn_msg)+ 2349 sizeof(struct drbd_nl_cfg_reply)+ 2350 sizeof(struct call_helper_tag_len_struct)+ 2351 sizeof(short int)]; 2352 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2353 struct drbd_nl_cfg_reply *reply = 2354 (struct drbd_nl_cfg_reply *)cn_reply->data; 2355 unsigned short *tl = reply->tag_list; 2356 2357 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2358 2359 tl = tl_add_str(tl, T_helper, helper_name); 2360 put_unaligned(TT_END, tl++); /* Close the tag list */ 2361 2362 cn_reply->id.idx = CN_IDX_DRBD; 2363 cn_reply->id.val = CN_VAL_DRBD; 2364 2365 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2366 cn_reply->ack = 0; /* not used here. */ 2367 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2368 (int)((char *)tl - (char *)reply->tag_list); 2369 cn_reply->flags = 0; 2370 2371 reply->packet_type = P_call_helper; 2372 reply->minor = mdev_to_minor(mdev); 2373 reply->ret_code = NO_ERROR; 2374 2375 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2376} 2377 2378void drbd_bcast_ee(struct drbd_conf *mdev, 2379 const char *reason, const int dgs, 2380 const char* seen_hash, const char* calc_hash, 2381 const struct drbd_epoch_entry* e) 2382{ 2383 struct cn_msg *cn_reply; 2384 struct drbd_nl_cfg_reply *reply; 2385 unsigned short *tl; 2386 struct page *page; 2387 unsigned len; 2388 2389 if (!e) 2390 return; 2391 if (!reason || !reason[0]) 2392 return; 2393 2394 /* apparently we have to memcpy twice, first to prepare the data for the 2395 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2396 * netlink skb. */ 2397 /* receiver thread context, which is not in the writeout path (of this node), 2398 * but may be in the writeout path of the _other_ node. 2399 * GFP_NOIO to avoid potential "distributed deadlock". */ 2400 cn_reply = kmalloc( 2401 sizeof(struct cn_msg)+ 2402 sizeof(struct drbd_nl_cfg_reply)+ 2403 sizeof(struct dump_ee_tag_len_struct)+ 2404 sizeof(short int), 2405 GFP_NOIO); 2406 2407 if (!cn_reply) { 2408 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2409 (unsigned long long)e->sector, e->size); 2410 return; 2411 } 2412 2413 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2414 tl = reply->tag_list; 2415 2416 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2417 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2418 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2419 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2420 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2421 2422 put_unaligned(T_ee_data, tl++); 2423 put_unaligned(e->size, tl++); 2424 2425 len = e->size; 2426 page = e->pages; 2427 page_chain_for_each(page) { 2428 void *d = kmap_atomic(page, KM_USER0); 2429 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2430 memcpy(tl, d, l); 2431 kunmap_atomic(d, KM_USER0); 2432 tl = (unsigned short*)((char*)tl + l); 2433 len -= l; 2434 } 2435 put_unaligned(TT_END, tl++); /* Close the tag list */ 2436 2437 cn_reply->id.idx = CN_IDX_DRBD; 2438 cn_reply->id.val = CN_VAL_DRBD; 2439 2440 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2441 cn_reply->ack = 0; // not used here. 2442 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2443 (int)((char*)tl - (char*)reply->tag_list); 2444 cn_reply->flags = 0; 2445 2446 reply->packet_type = P_dump_ee; 2447 reply->minor = mdev_to_minor(mdev); 2448 reply->ret_code = NO_ERROR; 2449 2450 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2451 kfree(cn_reply); 2452} 2453 2454void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2455{ 2456 char buffer[sizeof(struct cn_msg)+ 2457 sizeof(struct drbd_nl_cfg_reply)+ 2458 sizeof(struct sync_progress_tag_len_struct)+ 2459 sizeof(short int)]; 2460 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2461 struct drbd_nl_cfg_reply *reply = 2462 (struct drbd_nl_cfg_reply *)cn_reply->data; 2463 unsigned short *tl = reply->tag_list; 2464 unsigned long rs_left; 2465 unsigned int res; 2466 2467 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2468 if (!get_ldev(mdev)) 2469 return; 2470 drbd_get_syncer_progress(mdev, &rs_left, &res); 2471 put_ldev(mdev); 2472 2473 tl = tl_add_int(tl, T_sync_progress, &res); 2474 put_unaligned(TT_END, tl++); /* Close the tag list */ 2475 2476 cn_reply->id.idx = CN_IDX_DRBD; 2477 cn_reply->id.val = CN_VAL_DRBD; 2478 2479 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2480 cn_reply->ack = 0; /* not used here. */ 2481 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2482 (int)((char *)tl - (char *)reply->tag_list); 2483 cn_reply->flags = 0; 2484 2485 reply->packet_type = P_sync_progress; 2486 reply->minor = mdev_to_minor(mdev); 2487 reply->ret_code = NO_ERROR; 2488 2489 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2490} 2491 2492int __init drbd_nl_init(void) 2493{ 2494 static struct cb_id cn_id_drbd; 2495 int err, try=10; 2496 2497 cn_id_drbd.val = CN_VAL_DRBD; 2498 do { 2499 cn_id_drbd.idx = cn_idx; 2500 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2501 if (!err) 2502 break; 2503 cn_idx = (cn_idx + CN_IDX_STEP); 2504 } while (try--); 2505 2506 if (err) { 2507 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2508 return err; 2509 } 2510 2511 return 0; 2512} 2513 2514void drbd_nl_cleanup(void) 2515{ 2516 static struct cb_id cn_id_drbd; 2517 2518 cn_id_drbd.idx = cn_idx; 2519 cn_id_drbd.val = CN_VAL_DRBD; 2520 2521 cn_del_callback(&cn_id_drbd); 2522} 2523 2524void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2525{ 2526 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2527 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2528 struct drbd_nl_cfg_reply *reply = 2529 (struct drbd_nl_cfg_reply *)cn_reply->data; 2530 int rr; 2531 2532 cn_reply->id = req->id; 2533 2534 cn_reply->seq = req->seq; 2535 cn_reply->ack = req->ack + 1; 2536 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2537 cn_reply->flags = 0; 2538 2539 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2540 reply->ret_code = ret_code; 2541 2542 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2543 if (rr && rr != -ESRCH) 2544 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2545} 2546 2547