drbd_nl.c revision 19f843aa08e2d8f87a09b4c2edc43b00638423a8
1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_req.h" 37#include "drbd_wrappers.h" 38#include <asm/unaligned.h> 39#include <linux/drbd_tag_magic.h> 40#include <linux/drbd_limits.h> 41#include <linux/compiler.h> 42#include <linux/kthread.h> 43 44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 47 48/* see get_sb_bdev and bd_claim */ 49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 50 51/* Generate the tag_list to struct functions */ 52#define NL_PACKET(name, number, fields) \ 53static int name ## _from_tags(struct drbd_conf *mdev, \ 54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 55static int name ## _from_tags(struct drbd_conf *mdev, \ 56 unsigned short *tags, struct name *arg) \ 57{ \ 58 int tag; \ 59 int dlen; \ 60 \ 61 while ((tag = get_unaligned(tags++)) != TT_END) { \ 62 dlen = get_unaligned(tags++); \ 63 switch (tag_number(tag)) { \ 64 fields \ 65 default: \ 66 if (tag & T_MANDATORY) { \ 67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 68 return 0; \ 69 } \ 70 } \ 71 tags = (unsigned short *)((char *)tags + dlen); \ 72 } \ 73 return 1; \ 74} 75#define NL_INTEGER(pn, pr, member) \ 76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 77 arg->member = get_unaligned((int *)(tags)); \ 78 break; 79#define NL_INT64(pn, pr, member) \ 80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 81 arg->member = get_unaligned((u64 *)(tags)); \ 82 break; 83#define NL_BIT(pn, pr, member) \ 84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 85 arg->member = *(char *)(tags) ? 1 : 0; \ 86 break; 87#define NL_STRING(pn, pr, member, len) \ 88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 89 if (dlen > len) { \ 90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 91 #member, dlen, (unsigned int)len); \ 92 return 0; \ 93 } \ 94 arg->member ## _len = dlen; \ 95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 96 break; 97#include "linux/drbd_nl.h" 98 99/* Generate the struct to tag_list functions */ 100#define NL_PACKET(name, number, fields) \ 101static unsigned short* \ 102name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 104static unsigned short* \ 105name ## _to_tags(struct drbd_conf *mdev, \ 106 struct name *arg, unsigned short *tags) \ 107{ \ 108 fields \ 109 return tags; \ 110} 111 112#define NL_INTEGER(pn, pr, member) \ 113 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 114 put_unaligned(sizeof(int), tags++); \ 115 put_unaligned(arg->member, (int *)tags); \ 116 tags = (unsigned short *)((char *)tags+sizeof(int)); 117#define NL_INT64(pn, pr, member) \ 118 put_unaligned(pn | pr | TT_INT64, tags++); \ 119 put_unaligned(sizeof(u64), tags++); \ 120 put_unaligned(arg->member, (u64 *)tags); \ 121 tags = (unsigned short *)((char *)tags+sizeof(u64)); 122#define NL_BIT(pn, pr, member) \ 123 put_unaligned(pn | pr | TT_BIT, tags++); \ 124 put_unaligned(sizeof(char), tags++); \ 125 *(char *)tags = arg->member; \ 126 tags = (unsigned short *)((char *)tags+sizeof(char)); 127#define NL_STRING(pn, pr, member, len) \ 128 put_unaligned(pn | pr | TT_STRING, tags++); \ 129 put_unaligned(arg->member ## _len, tags++); \ 130 memcpy(tags, arg->member, arg->member ## _len); \ 131 tags = (unsigned short *)((char *)tags + arg->member ## _len); 132#include "linux/drbd_nl.h" 133 134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 135void drbd_nl_send_reply(struct cn_msg *, int); 136 137int drbd_khelper(struct drbd_conf *mdev, char *cmd) 138{ 139 char *envp[] = { "HOME=/", 140 "TERM=linux", 141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 142 NULL, /* Will be set to address family */ 143 NULL, /* Will be set to address */ 144 NULL }; 145 146 char mb[12], af[20], ad[60], *afs; 147 char *argv[] = {usermode_helper, cmd, mb, NULL }; 148 int ret; 149 150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 151 152 if (get_net_conf(mdev)) { 153 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 154 case AF_INET6: 155 afs = "ipv6"; 156 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 157 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 158 break; 159 case AF_INET: 160 afs = "ipv4"; 161 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 162 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 163 break; 164 default: 165 afs = "ssocks"; 166 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 167 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 168 } 169 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 170 envp[3]=af; 171 envp[4]=ad; 172 put_net_conf(mdev); 173 } 174 175 /* The helper may take some time. 176 * write out any unsynced meta data changes now */ 177 drbd_md_sync(mdev); 178 179 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 180 181 drbd_bcast_ev_helper(mdev, cmd); 182 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 183 if (ret) 184 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 185 usermode_helper, cmd, mb, 186 (ret >> 8) & 0xff, ret); 187 else 188 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 189 usermode_helper, cmd, mb, 190 (ret >> 8) & 0xff, ret); 191 192 if (ret < 0) /* Ignore any ERRNOs we got. */ 193 ret = 0; 194 195 return ret; 196} 197 198enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 199{ 200 char *ex_to_string; 201 int r; 202 enum drbd_disk_state nps; 203 enum drbd_fencing_p fp; 204 205 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 206 207 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 208 fp = mdev->ldev->dc.fencing; 209 put_ldev(mdev); 210 } else { 211 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 212 nps = mdev->state.pdsk; 213 goto out; 214 } 215 216 r = drbd_khelper(mdev, "fence-peer"); 217 218 switch ((r>>8) & 0xff) { 219 case 3: /* peer is inconsistent */ 220 ex_to_string = "peer is inconsistent or worse"; 221 nps = D_INCONSISTENT; 222 break; 223 case 4: /* peer got outdated, or was already outdated */ 224 ex_to_string = "peer was fenced"; 225 nps = D_OUTDATED; 226 break; 227 case 5: /* peer was down */ 228 if (mdev->state.disk == D_UP_TO_DATE) { 229 /* we will(have) create(d) a new UUID anyways... */ 230 ex_to_string = "peer is unreachable, assumed to be dead"; 231 nps = D_OUTDATED; 232 } else { 233 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 234 nps = mdev->state.pdsk; 235 } 236 break; 237 case 6: /* Peer is primary, voluntarily outdate myself. 238 * This is useful when an unconnected R_SECONDARY is asked to 239 * become R_PRIMARY, but finds the other peer being active. */ 240 ex_to_string = "peer is active"; 241 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 242 nps = D_UNKNOWN; 243 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 244 break; 245 case 7: 246 if (fp != FP_STONITH) 247 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 248 ex_to_string = "peer was stonithed"; 249 nps = D_OUTDATED; 250 break; 251 default: 252 /* The script is broken ... */ 253 nps = D_UNKNOWN; 254 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 255 return nps; 256 } 257 258 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 259 (r>>8) & 0xff, ex_to_string); 260 261out: 262 if (mdev->state.susp_fen && nps >= D_UNKNOWN) { 263 /* The handler was not successful... unfreeze here, the 264 state engine can not unfreeze... */ 265 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE); 266 } 267 268 return nps; 269} 270 271static int _try_outdate_peer_async(void *data) 272{ 273 struct drbd_conf *mdev = (struct drbd_conf *)data; 274 enum drbd_disk_state nps; 275 276 nps = drbd_try_outdate_peer(mdev); 277 drbd_request_state(mdev, NS(pdsk, nps)); 278 279 return 0; 280} 281 282void drbd_try_outdate_peer_async(struct drbd_conf *mdev) 283{ 284 struct task_struct *opa; 285 286 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev)); 287 if (IS_ERR(opa)) 288 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); 289} 290 291enum drbd_state_rv 292drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 293{ 294 const int max_tries = 4; 295 enum drbd_state_rv rv = SS_UNKNOWN_ERROR; 296 int try = 0; 297 int forced = 0; 298 union drbd_state mask, val; 299 enum drbd_disk_state nps; 300 301 if (new_role == R_PRIMARY) 302 request_ping(mdev); /* Detect a dead peer ASAP */ 303 304 mutex_lock(&mdev->state_mutex); 305 306 mask.i = 0; mask.role = R_MASK; 307 val.i = 0; val.role = new_role; 308 309 while (try++ < max_tries) { 310 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 311 312 /* in case we first succeeded to outdate, 313 * but now suddenly could establish a connection */ 314 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 315 val.pdsk = 0; 316 mask.pdsk = 0; 317 continue; 318 } 319 320 if (rv == SS_NO_UP_TO_DATE_DISK && force && 321 (mdev->state.disk < D_UP_TO_DATE && 322 mdev->state.disk >= D_INCONSISTENT)) { 323 mask.disk = D_MASK; 324 val.disk = D_UP_TO_DATE; 325 forced = 1; 326 continue; 327 } 328 329 if (rv == SS_NO_UP_TO_DATE_DISK && 330 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 331 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 332 nps = drbd_try_outdate_peer(mdev); 333 334 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 335 val.disk = D_UP_TO_DATE; 336 mask.disk = D_MASK; 337 } 338 339 val.pdsk = nps; 340 mask.pdsk = D_MASK; 341 342 continue; 343 } 344 345 if (rv == SS_NOTHING_TO_DO) 346 goto fail; 347 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { 348 nps = drbd_try_outdate_peer(mdev); 349 350 if (force && nps > D_OUTDATED) { 351 dev_warn(DEV, "Forced into split brain situation!\n"); 352 nps = D_OUTDATED; 353 } 354 355 mask.pdsk = D_MASK; 356 val.pdsk = nps; 357 358 continue; 359 } 360 if (rv == SS_TWO_PRIMARIES) { 361 /* Maybe the peer is detected as dead very soon... 362 retry at most once more in this case. */ 363 __set_current_state(TASK_INTERRUPTIBLE); 364 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 365 if (try < max_tries) 366 try = max_tries - 1; 367 continue; 368 } 369 if (rv < SS_SUCCESS) { 370 rv = _drbd_request_state(mdev, mask, val, 371 CS_VERBOSE + CS_WAIT_COMPLETE); 372 if (rv < SS_SUCCESS) 373 goto fail; 374 } 375 break; 376 } 377 378 if (rv < SS_SUCCESS) 379 goto fail; 380 381 if (forced) 382 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 383 384 /* Wait until nothing is on the fly :) */ 385 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 386 387 if (new_role == R_SECONDARY) { 388 set_disk_ro(mdev->vdisk, true); 389 if (get_ldev(mdev)) { 390 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 391 put_ldev(mdev); 392 } 393 } else { 394 if (get_net_conf(mdev)) { 395 mdev->net_conf->want_lose = 0; 396 put_net_conf(mdev); 397 } 398 set_disk_ro(mdev->vdisk, false); 399 if (get_ldev(mdev)) { 400 if (((mdev->state.conn < C_CONNECTED || 401 mdev->state.pdsk <= D_FAILED) 402 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 403 drbd_uuid_new_current(mdev); 404 405 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 406 put_ldev(mdev); 407 } 408 } 409 410 /* writeout of activity log covered areas of the bitmap 411 * to stable storage done in after state change already */ 412 413 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 414 /* if this was forced, we should consider sync */ 415 if (forced) 416 drbd_send_uuids(mdev); 417 drbd_send_state(mdev); 418 } 419 420 drbd_md_sync(mdev); 421 422 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 423 fail: 424 mutex_unlock(&mdev->state_mutex); 425 return rv; 426} 427 428static struct drbd_conf *ensure_mdev(int minor, int create) 429{ 430 struct drbd_conf *mdev; 431 432 if (minor >= minor_count) 433 return NULL; 434 435 mdev = minor_to_mdev(minor); 436 437 if (!mdev && create) { 438 struct gendisk *disk = NULL; 439 mdev = drbd_new_device(minor); 440 441 spin_lock_irq(&drbd_pp_lock); 442 if (minor_table[minor] == NULL) { 443 minor_table[minor] = mdev; 444 disk = mdev->vdisk; 445 mdev = NULL; 446 } /* else: we lost the race */ 447 spin_unlock_irq(&drbd_pp_lock); 448 449 if (disk) /* we won the race above */ 450 /* in case we ever add a drbd_delete_device(), 451 * don't forget the del_gendisk! */ 452 add_disk(disk); 453 else /* we lost the race above */ 454 drbd_free_mdev(mdev); 455 456 mdev = minor_to_mdev(minor); 457 } 458 459 return mdev; 460} 461 462static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 463 struct drbd_nl_cfg_reply *reply) 464{ 465 struct primary primary_args; 466 467 memset(&primary_args, 0, sizeof(struct primary)); 468 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 469 reply->ret_code = ERR_MANDATORY_TAG; 470 return 0; 471 } 472 473 reply->ret_code = 474 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); 475 476 return 0; 477} 478 479static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 480 struct drbd_nl_cfg_reply *reply) 481{ 482 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 483 484 return 0; 485} 486 487/* initializes the md.*_offset members, so we are able to find 488 * the on disk meta data */ 489static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 490 struct drbd_backing_dev *bdev) 491{ 492 sector_t md_size_sect = 0; 493 switch (bdev->dc.meta_dev_idx) { 494 default: 495 /* v07 style fixed size indexed meta data */ 496 bdev->md.md_size_sect = MD_RESERVED_SECT; 497 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 498 bdev->md.al_offset = MD_AL_OFFSET; 499 bdev->md.bm_offset = MD_BM_OFFSET; 500 break; 501 case DRBD_MD_INDEX_FLEX_EXT: 502 /* just occupy the full device; unit: sectors */ 503 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 504 bdev->md.md_offset = 0; 505 bdev->md.al_offset = MD_AL_OFFSET; 506 bdev->md.bm_offset = MD_BM_OFFSET; 507 break; 508 case DRBD_MD_INDEX_INTERNAL: 509 case DRBD_MD_INDEX_FLEX_INT: 510 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 511 /* al size is still fixed */ 512 bdev->md.al_offset = -MD_AL_MAX_SIZE; 513 /* we need (slightly less than) ~ this much bitmap sectors: */ 514 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 515 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 516 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 517 md_size_sect = ALIGN(md_size_sect, 8); 518 519 /* plus the "drbd meta data super block", 520 * and the activity log; */ 521 md_size_sect += MD_BM_OFFSET; 522 523 bdev->md.md_size_sect = md_size_sect; 524 /* bitmap offset is adjusted by 'super' block size */ 525 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 526 break; 527 } 528} 529 530char *ppsize(char *buf, unsigned long long size) 531{ 532 /* Needs 9 bytes at max. */ 533 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 534 int base = 0; 535 while (size >= 10000) { 536 /* shift + round */ 537 size = (size >> 10) + !!(size & (1<<9)); 538 base++; 539 } 540 sprintf(buf, "%lu %cB", (long)size, units[base]); 541 542 return buf; 543} 544 545/* there is still a theoretical deadlock when called from receiver 546 * on an D_INCONSISTENT R_PRIMARY: 547 * remote READ does inc_ap_bio, receiver would need to receive answer 548 * packet from remote to dec_ap_bio again. 549 * receiver receive_sizes(), comes here, 550 * waits for ap_bio_cnt == 0. -> deadlock. 551 * but this cannot happen, actually, because: 552 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 553 * (not connected, or bad/no disk on peer): 554 * see drbd_fail_request_early, ap_bio_cnt is zero. 555 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 556 * peer may not initiate a resize. 557 */ 558void drbd_suspend_io(struct drbd_conf *mdev) 559{ 560 set_bit(SUSPEND_IO, &mdev->flags); 561 if (is_susp(mdev->state)) 562 return; 563 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 564} 565 566void drbd_resume_io(struct drbd_conf *mdev) 567{ 568 clear_bit(SUSPEND_IO, &mdev->flags); 569 wake_up(&mdev->misc_wait); 570} 571 572/** 573 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 574 * @mdev: DRBD device. 575 * 576 * Returns 0 on success, negative return values indicate errors. 577 * You should call drbd_md_sync() after calling this function. 578 */ 579enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 580{ 581 sector_t prev_first_sect, prev_size; /* previous meta location */ 582 sector_t la_size; 583 sector_t size; 584 char ppb[10]; 585 586 int md_moved, la_size_changed; 587 enum determine_dev_size rv = unchanged; 588 589 /* race: 590 * application request passes inc_ap_bio, 591 * but then cannot get an AL-reference. 592 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 593 * 594 * to avoid that: 595 * Suspend IO right here. 596 * still lock the act_log to not trigger ASSERTs there. 597 */ 598 drbd_suspend_io(mdev); 599 600 /* no wait necessary anymore, actually we could assert that */ 601 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 602 603 prev_first_sect = drbd_md_first_sector(mdev->ldev); 604 prev_size = mdev->ldev->md.md_size_sect; 605 la_size = mdev->ldev->md.la_size_sect; 606 607 /* TODO: should only be some assert here, not (re)init... */ 608 drbd_md_set_sector_offsets(mdev, mdev->ldev); 609 610 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); 611 612 if (drbd_get_capacity(mdev->this_bdev) != size || 613 drbd_bm_capacity(mdev) != size) { 614 int err; 615 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); 616 if (unlikely(err)) { 617 /* currently there is only one error: ENOMEM! */ 618 size = drbd_bm_capacity(mdev)>>1; 619 if (size == 0) { 620 dev_err(DEV, "OUT OF MEMORY! " 621 "Could not allocate bitmap!\n"); 622 } else { 623 dev_err(DEV, "BM resizing failed. " 624 "Leaving size unchanged at size = %lu KB\n", 625 (unsigned long)size); 626 } 627 rv = dev_size_error; 628 } 629 /* racy, see comments above. */ 630 drbd_set_my_capacity(mdev, size); 631 mdev->ldev->md.la_size_sect = size; 632 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 633 (unsigned long long)size>>1); 634 } 635 if (rv == dev_size_error) 636 goto out; 637 638 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 639 640 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 641 || prev_size != mdev->ldev->md.md_size_sect; 642 643 if (la_size_changed || md_moved) { 644 int err; 645 646 drbd_al_shrink(mdev); /* All extents inactive. */ 647 dev_info(DEV, "Writing the whole bitmap, %s\n", 648 la_size_changed && md_moved ? "size changed and md moved" : 649 la_size_changed ? "size changed" : "md moved"); 650 err = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 651 if (err) { 652 rv = dev_size_error; 653 goto out; 654 } 655 drbd_md_mark_dirty(mdev); 656 } 657 658 if (size > la_size) 659 rv = grew; 660 if (size < la_size) 661 rv = shrunk; 662out: 663 lc_unlock(mdev->act_log); 664 wake_up(&mdev->al_wait); 665 drbd_resume_io(mdev); 666 667 return rv; 668} 669 670sector_t 671drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 672{ 673 sector_t p_size = mdev->p_size; /* partner's disk size. */ 674 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 675 sector_t m_size; /* my size */ 676 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 677 sector_t size = 0; 678 679 m_size = drbd_get_max_capacity(bdev); 680 681 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 682 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 683 p_size = m_size; 684 } 685 686 if (p_size && m_size) { 687 size = min_t(sector_t, p_size, m_size); 688 } else { 689 if (la_size) { 690 size = la_size; 691 if (m_size && m_size < size) 692 size = m_size; 693 if (p_size && p_size < size) 694 size = p_size; 695 } else { 696 if (m_size) 697 size = m_size; 698 if (p_size) 699 size = p_size; 700 } 701 } 702 703 if (size == 0) 704 dev_err(DEV, "Both nodes diskless!\n"); 705 706 if (u_size) { 707 if (u_size > size) 708 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 709 (unsigned long)u_size>>1, (unsigned long)size>>1); 710 else 711 size = u_size; 712 } 713 714 return size; 715} 716 717/** 718 * drbd_check_al_size() - Ensures that the AL is of the right size 719 * @mdev: DRBD device. 720 * 721 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 722 * failed, and 0 on success. You should call drbd_md_sync() after you called 723 * this function. 724 */ 725static int drbd_check_al_size(struct drbd_conf *mdev) 726{ 727 struct lru_cache *n, *t; 728 struct lc_element *e; 729 unsigned int in_use; 730 int i; 731 732 ERR_IF(mdev->sync_conf.al_extents < 7) 733 mdev->sync_conf.al_extents = 127; 734 735 if (mdev->act_log && 736 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 737 return 0; 738 739 in_use = 0; 740 t = mdev->act_log; 741 n = lc_create("act_log", drbd_al_ext_cache, 742 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 743 744 if (n == NULL) { 745 dev_err(DEV, "Cannot allocate act_log lru!\n"); 746 return -ENOMEM; 747 } 748 spin_lock_irq(&mdev->al_lock); 749 if (t) { 750 for (i = 0; i < t->nr_elements; i++) { 751 e = lc_element_by_index(t, i); 752 if (e->refcnt) 753 dev_err(DEV, "refcnt(%d)==%d\n", 754 e->lc_number, e->refcnt); 755 in_use += e->refcnt; 756 } 757 } 758 if (!in_use) 759 mdev->act_log = n; 760 spin_unlock_irq(&mdev->al_lock); 761 if (in_use) { 762 dev_err(DEV, "Activity log still in use!\n"); 763 lc_destroy(n); 764 return -EBUSY; 765 } else { 766 if (t) 767 lc_destroy(t); 768 } 769 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 770 return 0; 771} 772 773void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) 774{ 775 struct request_queue * const q = mdev->rq_queue; 776 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 777 int max_segments = mdev->ldev->dc.max_bio_bvecs; 778 int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 779 780 blk_queue_logical_block_size(q, 512); 781 blk_queue_max_hw_sectors(q, max_hw_sectors); 782 /* This is the workaround for "bio would need to, but cannot, be split" */ 783 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 784 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 785 blk_queue_stack_limits(q, b); 786 787 dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); 788 789 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 790 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 791 q->backing_dev_info.ra_pages, 792 b->backing_dev_info.ra_pages); 793 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 794 } 795} 796 797/* serialize deconfig (worker exiting, doing cleanup) 798 * and reconfig (drbdsetup disk, drbdsetup net) 799 * 800 * Wait for a potentially exiting worker, then restart it, 801 * or start a new one. Flush any pending work, there may still be an 802 * after_state_change queued. 803 */ 804static void drbd_reconfig_start(struct drbd_conf *mdev) 805{ 806 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 807 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 808 drbd_thread_start(&mdev->worker); 809 drbd_flush_workqueue(mdev); 810} 811 812/* if still unconfigured, stops worker again. 813 * if configured now, clears CONFIG_PENDING. 814 * wakes potential waiters */ 815static void drbd_reconfig_done(struct drbd_conf *mdev) 816{ 817 spin_lock_irq(&mdev->req_lock); 818 if (mdev->state.disk == D_DISKLESS && 819 mdev->state.conn == C_STANDALONE && 820 mdev->state.role == R_SECONDARY) { 821 set_bit(DEVICE_DYING, &mdev->flags); 822 drbd_thread_stop_nowait(&mdev->worker); 823 } else 824 clear_bit(CONFIG_PENDING, &mdev->flags); 825 spin_unlock_irq(&mdev->req_lock); 826 wake_up(&mdev->state_wait); 827} 828 829/* Make sure IO is suspended before calling this function(). */ 830static void drbd_suspend_al(struct drbd_conf *mdev) 831{ 832 int s = 0; 833 834 if (lc_try_lock(mdev->act_log)) { 835 drbd_al_shrink(mdev); 836 lc_unlock(mdev->act_log); 837 } else { 838 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n"); 839 return; 840 } 841 842 spin_lock_irq(&mdev->req_lock); 843 if (mdev->state.conn < C_CONNECTED) 844 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); 845 846 spin_unlock_irq(&mdev->req_lock); 847 848 if (s) 849 dev_info(DEV, "Suspended AL updates\n"); 850} 851 852/* does always return 0; 853 * interesting return code is in reply->ret_code */ 854static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 855 struct drbd_nl_cfg_reply *reply) 856{ 857 enum drbd_ret_code retcode; 858 enum determine_dev_size dd; 859 sector_t max_possible_sectors; 860 sector_t min_md_device_sectors; 861 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 862 struct block_device *bdev; 863 struct lru_cache *resync_lru = NULL; 864 union drbd_state ns, os; 865 unsigned int max_bio_size; 866 enum drbd_state_rv rv; 867 int cp_discovered = 0; 868 int logical_block_size; 869 870 drbd_reconfig_start(mdev); 871 872 /* if you want to reconfigure, please tear down first */ 873 if (mdev->state.disk > D_DISKLESS) { 874 retcode = ERR_DISK_CONFIGURED; 875 goto fail; 876 } 877 /* It may just now have detached because of IO error. Make sure 878 * drbd_ldev_destroy is done already, we may end up here very fast, 879 * e.g. if someone calls attach from the on-io-error handler, 880 * to realize a "hot spare" feature (not that I'd recommend that) */ 881 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 882 883 /* allocation not in the IO path, cqueue thread context */ 884 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 885 if (!nbc) { 886 retcode = ERR_NOMEM; 887 goto fail; 888 } 889 890 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 891 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 892 nbc->dc.fencing = DRBD_FENCING_DEF; 893 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 894 895 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 896 retcode = ERR_MANDATORY_TAG; 897 goto fail; 898 } 899 900 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 901 retcode = ERR_MD_IDX_INVALID; 902 goto fail; 903 } 904 905 if (get_net_conf(mdev)) { 906 int prot = mdev->net_conf->wire_protocol; 907 put_net_conf(mdev); 908 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) { 909 retcode = ERR_STONITH_AND_PROT_A; 910 goto fail; 911 } 912 } 913 914 bdev = blkdev_get_by_path(nbc->dc.backing_dev, 915 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev); 916 if (IS_ERR(bdev)) { 917 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 918 PTR_ERR(bdev)); 919 retcode = ERR_OPEN_DISK; 920 goto fail; 921 } 922 nbc->backing_bdev = bdev; 923 924 /* 925 * meta_dev_idx >= 0: external fixed size, possibly multiple 926 * drbd sharing one meta device. TODO in that case, paranoia 927 * check that [md_bdev, meta_dev_idx] is not yet used by some 928 * other drbd minor! (if you use drbd.conf + drbdadm, that 929 * should check it for you already; but if you don't, or 930 * someone fooled it, we need to double check here) 931 */ 932 bdev = blkdev_get_by_path(nbc->dc.meta_dev, 933 FMODE_READ | FMODE_WRITE | FMODE_EXCL, 934 (nbc->dc.meta_dev_idx < 0) ? 935 (void *)mdev : (void *)drbd_m_holder); 936 if (IS_ERR(bdev)) { 937 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 938 PTR_ERR(bdev)); 939 retcode = ERR_OPEN_MD_DISK; 940 goto fail; 941 } 942 nbc->md_bdev = bdev; 943 944 if ((nbc->backing_bdev == nbc->md_bdev) != 945 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 946 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 947 retcode = ERR_MD_IDX_INVALID; 948 goto fail; 949 } 950 951 resync_lru = lc_create("resync", drbd_bm_ext_cache, 952 61, sizeof(struct bm_extent), 953 offsetof(struct bm_extent, lce)); 954 if (!resync_lru) { 955 retcode = ERR_NOMEM; 956 goto fail; 957 } 958 959 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 960 drbd_md_set_sector_offsets(mdev, nbc); 961 962 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 963 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 964 (unsigned long long) drbd_get_max_capacity(nbc), 965 (unsigned long long) nbc->dc.disk_size); 966 retcode = ERR_DISK_TO_SMALL; 967 goto fail; 968 } 969 970 if (nbc->dc.meta_dev_idx < 0) { 971 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 972 /* at least one MB, otherwise it does not make sense */ 973 min_md_device_sectors = (2<<10); 974 } else { 975 max_possible_sectors = DRBD_MAX_SECTORS; 976 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 977 } 978 979 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 980 retcode = ERR_MD_DISK_TO_SMALL; 981 dev_warn(DEV, "refusing attach: md-device too small, " 982 "at least %llu sectors needed for this meta-disk type\n", 983 (unsigned long long) min_md_device_sectors); 984 goto fail; 985 } 986 987 /* Make sure the new disk is big enough 988 * (we may currently be R_PRIMARY with no local disk...) */ 989 if (drbd_get_max_capacity(nbc) < 990 drbd_get_capacity(mdev->this_bdev)) { 991 retcode = ERR_DISK_TO_SMALL; 992 goto fail; 993 } 994 995 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 996 997 if (nbc->known_size > max_possible_sectors) { 998 dev_warn(DEV, "==> truncating very big lower level device " 999 "to currently maximum possible %llu sectors <==\n", 1000 (unsigned long long) max_possible_sectors); 1001 if (nbc->dc.meta_dev_idx >= 0) 1002 dev_warn(DEV, "==>> using internal or flexible " 1003 "meta data may help <<==\n"); 1004 } 1005 1006 drbd_suspend_io(mdev); 1007 /* also wait for the last barrier ack. */ 1008 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state)); 1009 /* and for any other previously queued work */ 1010 drbd_flush_workqueue(mdev); 1011 1012 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 1013 retcode = rv; /* FIXME: Type mismatch. */ 1014 drbd_resume_io(mdev); 1015 if (rv < SS_SUCCESS) 1016 goto fail; 1017 1018 if (!get_ldev_if_state(mdev, D_ATTACHING)) 1019 goto force_diskless; 1020 1021 drbd_md_set_sector_offsets(mdev, nbc); 1022 1023 /* allocate a second IO page if logical_block_size != 512 */ 1024 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 1025 if (logical_block_size == 0) 1026 logical_block_size = MD_SECTOR_SIZE; 1027 1028 if (logical_block_size != MD_SECTOR_SIZE) { 1029 if (!mdev->md_io_tmpp) { 1030 struct page *page = alloc_page(GFP_NOIO); 1031 if (!page) 1032 goto force_diskless_dec; 1033 1034 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 1035 logical_block_size, MD_SECTOR_SIZE); 1036 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 1037 1038 mdev->md_io_tmpp = page; 1039 } 1040 } 1041 1042 if (!mdev->bitmap) { 1043 if (drbd_bm_init(mdev)) { 1044 retcode = ERR_NOMEM; 1045 goto force_diskless_dec; 1046 } 1047 } 1048 1049 retcode = drbd_md_read(mdev, nbc); 1050 if (retcode != NO_ERROR) 1051 goto force_diskless_dec; 1052 1053 if (mdev->state.conn < C_CONNECTED && 1054 mdev->state.role == R_PRIMARY && 1055 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 1056 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 1057 (unsigned long long)mdev->ed_uuid); 1058 retcode = ERR_DATA_NOT_CURRENT; 1059 goto force_diskless_dec; 1060 } 1061 1062 /* Since we are diskless, fix the activity log first... */ 1063 if (drbd_check_al_size(mdev)) { 1064 retcode = ERR_NOMEM; 1065 goto force_diskless_dec; 1066 } 1067 1068 /* Prevent shrinking of consistent devices ! */ 1069 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1070 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 1071 dev_warn(DEV, "refusing to truncate a consistent device\n"); 1072 retcode = ERR_DISK_TO_SMALL; 1073 goto force_diskless_dec; 1074 } 1075 1076 if (!drbd_al_read_log(mdev, nbc)) { 1077 retcode = ERR_IO_MD_DISK; 1078 goto force_diskless_dec; 1079 } 1080 1081 /* Reset the "barriers don't work" bits here, then force meta data to 1082 * be written, to ensure we determine if barriers are supported. */ 1083 if (nbc->dc.no_md_flush) 1084 set_bit(MD_NO_FUA, &mdev->flags); 1085 else 1086 clear_bit(MD_NO_FUA, &mdev->flags); 1087 1088 /* Point of no return reached. 1089 * Devices and memory are no longer released by error cleanup below. 1090 * now mdev takes over responsibility, and the state engine should 1091 * clean it up somewhere. */ 1092 D_ASSERT(mdev->ldev == NULL); 1093 mdev->ldev = nbc; 1094 mdev->resync = resync_lru; 1095 nbc = NULL; 1096 resync_lru = NULL; 1097 1098 mdev->write_ordering = WO_bdev_flush; 1099 drbd_bump_write_ordering(mdev, WO_bdev_flush); 1100 1101 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1102 set_bit(CRASHED_PRIMARY, &mdev->flags); 1103 else 1104 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1105 1106 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1107 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) { 1108 set_bit(CRASHED_PRIMARY, &mdev->flags); 1109 cp_discovered = 1; 1110 } 1111 1112 mdev->send_cnt = 0; 1113 mdev->recv_cnt = 0; 1114 mdev->read_cnt = 0; 1115 mdev->writ_cnt = 0; 1116 1117 max_bio_size = DRBD_MAX_BIO_SIZE; 1118 if (mdev->state.conn == C_CONNECTED) { 1119 /* We are Primary, Connected, and now attach a new local 1120 * backing store. We must not increase the user visible maximum 1121 * bio size on this device to something the peer may not be 1122 * able to handle. */ 1123 if (mdev->agreed_pro_version < 94) 1124 max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; 1125 else if (mdev->agreed_pro_version == 94) 1126 max_bio_size = DRBD_MAX_SIZE_H80_PACKET; 1127 /* else: drbd 8.3.9 and later, stay with default */ 1128 } 1129 1130 drbd_setup_queue_param(mdev, max_bio_size); 1131 1132 /* If I am currently not R_PRIMARY, 1133 * but meta data primary indicator is set, 1134 * I just now recover from a hard crash, 1135 * and have been R_PRIMARY before that crash. 1136 * 1137 * Now, if I had no connection before that crash 1138 * (have been degraded R_PRIMARY), chances are that 1139 * I won't find my peer now either. 1140 * 1141 * In that case, and _only_ in that case, 1142 * we use the degr-wfc-timeout instead of the default, 1143 * so we can automatically recover from a crash of a 1144 * degraded but active "cluster" after a certain timeout. 1145 */ 1146 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1147 if (mdev->state.role != R_PRIMARY && 1148 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1149 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1150 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1151 1152 dd = drbd_determin_dev_size(mdev, 0); 1153 if (dd == dev_size_error) { 1154 retcode = ERR_NOMEM_BITMAP; 1155 goto force_diskless_dec; 1156 } else if (dd == grew) 1157 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1158 1159 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1160 dev_info(DEV, "Assuming that all blocks are out of sync " 1161 "(aka FullSync)\n"); 1162 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1163 retcode = ERR_IO_MD_DISK; 1164 goto force_diskless_dec; 1165 } 1166 } else { 1167 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1168 retcode = ERR_IO_MD_DISK; 1169 goto force_diskless_dec; 1170 } 1171 } 1172 1173 if (cp_discovered) { 1174 drbd_al_apply_to_bm(mdev); 1175 if (drbd_bitmap_io(mdev, &drbd_bm_write, "crashed primary apply AL")) { 1176 retcode = ERR_IO_MD_DISK; 1177 goto force_diskless_dec; 1178 } 1179 } 1180 1181 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) 1182 drbd_suspend_al(mdev); /* IO is still suspended here... */ 1183 1184 spin_lock_irq(&mdev->req_lock); 1185 os = mdev->state; 1186 ns.i = os.i; 1187 /* If MDF_CONSISTENT is not set go into inconsistent state, 1188 otherwise investigate MDF_WasUpToDate... 1189 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1190 otherwise into D_CONSISTENT state. 1191 */ 1192 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1193 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1194 ns.disk = D_CONSISTENT; 1195 else 1196 ns.disk = D_OUTDATED; 1197 } else { 1198 ns.disk = D_INCONSISTENT; 1199 } 1200 1201 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1202 ns.pdsk = D_OUTDATED; 1203 1204 if ( ns.disk == D_CONSISTENT && 1205 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1206 ns.disk = D_UP_TO_DATE; 1207 1208 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1209 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1210 this point, because drbd_request_state() modifies these 1211 flags. */ 1212 1213 /* In case we are C_CONNECTED postpone any decision on the new disk 1214 state after the negotiation phase. */ 1215 if (mdev->state.conn == C_CONNECTED) { 1216 mdev->new_state_tmp.i = ns.i; 1217 ns.i = os.i; 1218 ns.disk = D_NEGOTIATING; 1219 1220 /* We expect to receive up-to-date UUIDs soon. 1221 To avoid a race in receive_state, free p_uuid while 1222 holding req_lock. I.e. atomic with the state change */ 1223 kfree(mdev->p_uuid); 1224 mdev->p_uuid = NULL; 1225 } 1226 1227 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1228 ns = mdev->state; 1229 spin_unlock_irq(&mdev->req_lock); 1230 1231 if (rv < SS_SUCCESS) 1232 goto force_diskless_dec; 1233 1234 if (mdev->state.role == R_PRIMARY) 1235 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1236 else 1237 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1238 1239 drbd_md_mark_dirty(mdev); 1240 drbd_md_sync(mdev); 1241 1242 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1243 put_ldev(mdev); 1244 reply->ret_code = retcode; 1245 drbd_reconfig_done(mdev); 1246 return 0; 1247 1248 force_diskless_dec: 1249 put_ldev(mdev); 1250 force_diskless: 1251 drbd_force_state(mdev, NS(disk, D_FAILED)); 1252 drbd_md_sync(mdev); 1253 fail: 1254 if (nbc) { 1255 if (nbc->backing_bdev) 1256 blkdev_put(nbc->backing_bdev, 1257 FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1258 if (nbc->md_bdev) 1259 blkdev_put(nbc->md_bdev, 1260 FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1261 kfree(nbc); 1262 } 1263 lc_destroy(resync_lru); 1264 1265 reply->ret_code = retcode; 1266 drbd_reconfig_done(mdev); 1267 return 0; 1268} 1269 1270/* Detaching the disk is a process in multiple stages. First we need to lock 1271 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io. 1272 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all 1273 * internal references as well. 1274 * Only then we have finally detached. */ 1275static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1276 struct drbd_nl_cfg_reply *reply) 1277{ 1278 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ 1279 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1280 if (mdev->state.disk == D_DISKLESS) 1281 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 1282 drbd_resume_io(mdev); 1283 return 0; 1284} 1285 1286static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1287 struct drbd_nl_cfg_reply *reply) 1288{ 1289 int i, ns; 1290 enum drbd_ret_code retcode; 1291 struct net_conf *new_conf = NULL; 1292 struct crypto_hash *tfm = NULL; 1293 struct crypto_hash *integrity_w_tfm = NULL; 1294 struct crypto_hash *integrity_r_tfm = NULL; 1295 struct hlist_head *new_tl_hash = NULL; 1296 struct hlist_head *new_ee_hash = NULL; 1297 struct drbd_conf *odev; 1298 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1299 void *int_dig_out = NULL; 1300 void *int_dig_in = NULL; 1301 void *int_dig_vv = NULL; 1302 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1303 1304 drbd_reconfig_start(mdev); 1305 1306 if (mdev->state.conn > C_STANDALONE) { 1307 retcode = ERR_NET_CONFIGURED; 1308 goto fail; 1309 } 1310 1311 /* allocation not in the IO path, cqueue thread context */ 1312 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1313 if (!new_conf) { 1314 retcode = ERR_NOMEM; 1315 goto fail; 1316 } 1317 1318 new_conf->timeout = DRBD_TIMEOUT_DEF; 1319 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1320 new_conf->ping_int = DRBD_PING_INT_DEF; 1321 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1322 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1323 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1324 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1325 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1326 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1327 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1328 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1329 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1330 new_conf->want_lose = 0; 1331 new_conf->two_primaries = 0; 1332 new_conf->wire_protocol = DRBD_PROT_C; 1333 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1334 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1335 new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; 1336 new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; 1337 1338 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1339 retcode = ERR_MANDATORY_TAG; 1340 goto fail; 1341 } 1342 1343 if (new_conf->two_primaries 1344 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1345 retcode = ERR_NOT_PROTO_C; 1346 goto fail; 1347 } 1348 1349 if (get_ldev(mdev)) { 1350 enum drbd_fencing_p fp = mdev->ldev->dc.fencing; 1351 put_ldev(mdev); 1352 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) { 1353 retcode = ERR_STONITH_AND_PROT_A; 1354 goto fail; 1355 } 1356 } 1357 1358 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) { 1359 retcode = ERR_CONG_NOT_PROTO_A; 1360 goto fail; 1361 } 1362 1363 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1364 retcode = ERR_DISCARD; 1365 goto fail; 1366 } 1367 1368 retcode = NO_ERROR; 1369 1370 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1371 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1372 for (i = 0; i < minor_count; i++) { 1373 odev = minor_to_mdev(i); 1374 if (!odev || odev == mdev) 1375 continue; 1376 if (get_net_conf(odev)) { 1377 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1378 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1379 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1380 retcode = ERR_LOCAL_ADDR; 1381 1382 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1383 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1384 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1385 retcode = ERR_PEER_ADDR; 1386 1387 put_net_conf(odev); 1388 if (retcode != NO_ERROR) 1389 goto fail; 1390 } 1391 } 1392 1393 if (new_conf->cram_hmac_alg[0] != 0) { 1394 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1395 new_conf->cram_hmac_alg); 1396 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1397 if (IS_ERR(tfm)) { 1398 tfm = NULL; 1399 retcode = ERR_AUTH_ALG; 1400 goto fail; 1401 } 1402 1403 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1404 retcode = ERR_AUTH_ALG_ND; 1405 goto fail; 1406 } 1407 } 1408 1409 if (new_conf->integrity_alg[0]) { 1410 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1411 if (IS_ERR(integrity_w_tfm)) { 1412 integrity_w_tfm = NULL; 1413 retcode=ERR_INTEGRITY_ALG; 1414 goto fail; 1415 } 1416 1417 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1418 retcode=ERR_INTEGRITY_ALG_ND; 1419 goto fail; 1420 } 1421 1422 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1423 if (IS_ERR(integrity_r_tfm)) { 1424 integrity_r_tfm = NULL; 1425 retcode=ERR_INTEGRITY_ALG; 1426 goto fail; 1427 } 1428 } 1429 1430 ns = new_conf->max_epoch_size/8; 1431 if (mdev->tl_hash_s != ns) { 1432 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1433 if (!new_tl_hash) { 1434 retcode = ERR_NOMEM; 1435 goto fail; 1436 } 1437 } 1438 1439 ns = new_conf->max_buffers/8; 1440 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1441 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1442 if (!new_ee_hash) { 1443 retcode = ERR_NOMEM; 1444 goto fail; 1445 } 1446 } 1447 1448 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1449 1450 if (integrity_w_tfm) { 1451 i = crypto_hash_digestsize(integrity_w_tfm); 1452 int_dig_out = kmalloc(i, GFP_KERNEL); 1453 if (!int_dig_out) { 1454 retcode = ERR_NOMEM; 1455 goto fail; 1456 } 1457 int_dig_in = kmalloc(i, GFP_KERNEL); 1458 if (!int_dig_in) { 1459 retcode = ERR_NOMEM; 1460 goto fail; 1461 } 1462 int_dig_vv = kmalloc(i, GFP_KERNEL); 1463 if (!int_dig_vv) { 1464 retcode = ERR_NOMEM; 1465 goto fail; 1466 } 1467 } 1468 1469 if (!mdev->bitmap) { 1470 if(drbd_bm_init(mdev)) { 1471 retcode = ERR_NOMEM; 1472 goto fail; 1473 } 1474 } 1475 1476 drbd_flush_workqueue(mdev); 1477 spin_lock_irq(&mdev->req_lock); 1478 if (mdev->net_conf != NULL) { 1479 retcode = ERR_NET_CONFIGURED; 1480 spin_unlock_irq(&mdev->req_lock); 1481 goto fail; 1482 } 1483 mdev->net_conf = new_conf; 1484 1485 mdev->send_cnt = 0; 1486 mdev->recv_cnt = 0; 1487 1488 if (new_tl_hash) { 1489 kfree(mdev->tl_hash); 1490 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1491 mdev->tl_hash = new_tl_hash; 1492 } 1493 1494 if (new_ee_hash) { 1495 kfree(mdev->ee_hash); 1496 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1497 mdev->ee_hash = new_ee_hash; 1498 } 1499 1500 crypto_free_hash(mdev->cram_hmac_tfm); 1501 mdev->cram_hmac_tfm = tfm; 1502 1503 crypto_free_hash(mdev->integrity_w_tfm); 1504 mdev->integrity_w_tfm = integrity_w_tfm; 1505 1506 crypto_free_hash(mdev->integrity_r_tfm); 1507 mdev->integrity_r_tfm = integrity_r_tfm; 1508 1509 kfree(mdev->int_dig_out); 1510 kfree(mdev->int_dig_in); 1511 kfree(mdev->int_dig_vv); 1512 mdev->int_dig_out=int_dig_out; 1513 mdev->int_dig_in=int_dig_in; 1514 mdev->int_dig_vv=int_dig_vv; 1515 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL); 1516 spin_unlock_irq(&mdev->req_lock); 1517 1518 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1519 reply->ret_code = retcode; 1520 drbd_reconfig_done(mdev); 1521 return 0; 1522 1523fail: 1524 kfree(int_dig_out); 1525 kfree(int_dig_in); 1526 kfree(int_dig_vv); 1527 crypto_free_hash(tfm); 1528 crypto_free_hash(integrity_w_tfm); 1529 crypto_free_hash(integrity_r_tfm); 1530 kfree(new_tl_hash); 1531 kfree(new_ee_hash); 1532 kfree(new_conf); 1533 1534 reply->ret_code = retcode; 1535 drbd_reconfig_done(mdev); 1536 return 0; 1537} 1538 1539static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1540 struct drbd_nl_cfg_reply *reply) 1541{ 1542 int retcode; 1543 struct disconnect dc; 1544 1545 memset(&dc, 0, sizeof(struct disconnect)); 1546 if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) { 1547 retcode = ERR_MANDATORY_TAG; 1548 goto fail; 1549 } 1550 1551 if (dc.force) { 1552 spin_lock_irq(&mdev->req_lock); 1553 if (mdev->state.conn >= C_WF_CONNECTION) 1554 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL); 1555 spin_unlock_irq(&mdev->req_lock); 1556 goto done; 1557 } 1558 1559 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1560 1561 if (retcode == SS_NOTHING_TO_DO) 1562 goto done; 1563 else if (retcode == SS_ALREADY_STANDALONE) 1564 goto done; 1565 else if (retcode == SS_PRIMARY_NOP) { 1566 /* Our statche checking code wants to see the peer outdated. */ 1567 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1568 pdsk, D_OUTDATED)); 1569 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1570 /* The peer probably wants to see us outdated. */ 1571 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1572 disk, D_OUTDATED), 1573 CS_ORDERED); 1574 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1575 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1576 retcode = SS_SUCCESS; 1577 } 1578 } 1579 1580 if (retcode < SS_SUCCESS) 1581 goto fail; 1582 1583 if (wait_event_interruptible(mdev->state_wait, 1584 mdev->state.conn != C_DISCONNECTING)) { 1585 /* Do not test for mdev->state.conn == C_STANDALONE, since 1586 someone else might connect us in the mean time! */ 1587 retcode = ERR_INTR; 1588 goto fail; 1589 } 1590 1591 done: 1592 retcode = NO_ERROR; 1593 fail: 1594 drbd_md_sync(mdev); 1595 reply->ret_code = retcode; 1596 return 0; 1597} 1598 1599void resync_after_online_grow(struct drbd_conf *mdev) 1600{ 1601 int iass; /* I am sync source */ 1602 1603 dev_info(DEV, "Resync of new storage after online grow\n"); 1604 if (mdev->state.role != mdev->state.peer) 1605 iass = (mdev->state.role == R_PRIMARY); 1606 else 1607 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1608 1609 if (iass) 1610 drbd_start_resync(mdev, C_SYNC_SOURCE); 1611 else 1612 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1613} 1614 1615static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1616 struct drbd_nl_cfg_reply *reply) 1617{ 1618 struct resize rs; 1619 int retcode = NO_ERROR; 1620 enum determine_dev_size dd; 1621 enum dds_flags ddsf; 1622 1623 memset(&rs, 0, sizeof(struct resize)); 1624 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1625 retcode = ERR_MANDATORY_TAG; 1626 goto fail; 1627 } 1628 1629 if (mdev->state.conn > C_CONNECTED) { 1630 retcode = ERR_RESIZE_RESYNC; 1631 goto fail; 1632 } 1633 1634 if (mdev->state.role == R_SECONDARY && 1635 mdev->state.peer == R_SECONDARY) { 1636 retcode = ERR_NO_PRIMARY; 1637 goto fail; 1638 } 1639 1640 if (!get_ldev(mdev)) { 1641 retcode = ERR_NO_DISK; 1642 goto fail; 1643 } 1644 1645 if (rs.no_resync && mdev->agreed_pro_version < 93) { 1646 retcode = ERR_NEED_APV_93; 1647 goto fail; 1648 } 1649 1650 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 1651 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1652 1653 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1654 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1655 dd = drbd_determin_dev_size(mdev, ddsf); 1656 drbd_md_sync(mdev); 1657 put_ldev(mdev); 1658 if (dd == dev_size_error) { 1659 retcode = ERR_NOMEM_BITMAP; 1660 goto fail; 1661 } 1662 1663 if (mdev->state.conn == C_CONNECTED) { 1664 if (dd == grew) 1665 set_bit(RESIZE_PENDING, &mdev->flags); 1666 1667 drbd_send_uuids(mdev); 1668 drbd_send_sizes(mdev, 1, ddsf); 1669 } 1670 1671 fail: 1672 reply->ret_code = retcode; 1673 return 0; 1674} 1675 1676static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1677 struct drbd_nl_cfg_reply *reply) 1678{ 1679 int retcode = NO_ERROR; 1680 int err; 1681 int ovr; /* online verify running */ 1682 int rsr; /* re-sync running */ 1683 struct crypto_hash *verify_tfm = NULL; 1684 struct crypto_hash *csums_tfm = NULL; 1685 struct syncer_conf sc; 1686 cpumask_var_t new_cpu_mask; 1687 int *rs_plan_s = NULL; 1688 int fifo_size; 1689 1690 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1691 retcode = ERR_NOMEM; 1692 goto fail; 1693 } 1694 1695 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1696 memset(&sc, 0, sizeof(struct syncer_conf)); 1697 sc.rate = DRBD_RATE_DEF; 1698 sc.after = DRBD_AFTER_DEF; 1699 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1700 sc.on_no_data = DRBD_ON_NO_DATA_DEF; 1701 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF; 1702 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF; 1703 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF; 1704 sc.c_max_rate = DRBD_C_MAX_RATE_DEF; 1705 sc.c_min_rate = DRBD_C_MIN_RATE_DEF; 1706 } else 1707 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1708 1709 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1710 retcode = ERR_MANDATORY_TAG; 1711 goto fail; 1712 } 1713 1714 /* re-sync running */ 1715 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1716 mdev->state.conn == C_SYNC_TARGET || 1717 mdev->state.conn == C_PAUSED_SYNC_S || 1718 mdev->state.conn == C_PAUSED_SYNC_T ); 1719 1720 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1721 retcode = ERR_CSUMS_RESYNC_RUNNING; 1722 goto fail; 1723 } 1724 1725 if (!rsr && sc.csums_alg[0]) { 1726 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1727 if (IS_ERR(csums_tfm)) { 1728 csums_tfm = NULL; 1729 retcode = ERR_CSUMS_ALG; 1730 goto fail; 1731 } 1732 1733 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1734 retcode = ERR_CSUMS_ALG_ND; 1735 goto fail; 1736 } 1737 } 1738 1739 /* online verify running */ 1740 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1741 1742 if (ovr) { 1743 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1744 retcode = ERR_VERIFY_RUNNING; 1745 goto fail; 1746 } 1747 } 1748 1749 if (!ovr && sc.verify_alg[0]) { 1750 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1751 if (IS_ERR(verify_tfm)) { 1752 verify_tfm = NULL; 1753 retcode = ERR_VERIFY_ALG; 1754 goto fail; 1755 } 1756 1757 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1758 retcode = ERR_VERIFY_ALG_ND; 1759 goto fail; 1760 } 1761 } 1762 1763 /* silently ignore cpu mask on UP kernel */ 1764 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1765 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1766 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1767 if (err) { 1768 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1769 retcode = ERR_CPU_MASK_PARSE; 1770 goto fail; 1771 } 1772 } 1773 1774 ERR_IF (sc.rate < 1) sc.rate = 1; 1775 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1776#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1777 if (sc.al_extents > AL_MAX) { 1778 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1779 sc.al_extents = AL_MAX; 1780 } 1781#undef AL_MAX 1782 1783 /* to avoid spurious errors when configuring minors before configuring 1784 * the minors they depend on: if necessary, first create the minor we 1785 * depend on */ 1786 if (sc.after >= 0) 1787 ensure_mdev(sc.after, 1); 1788 1789 /* most sanity checks done, try to assign the new sync-after 1790 * dependency. need to hold the global lock in there, 1791 * to avoid a race in the dependency loop check. */ 1792 retcode = drbd_alter_sa(mdev, sc.after); 1793 if (retcode != NO_ERROR) 1794 goto fail; 1795 1796 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 1797 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 1798 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 1799 if (!rs_plan_s) { 1800 dev_err(DEV, "kmalloc of fifo_buffer failed"); 1801 retcode = ERR_NOMEM; 1802 goto fail; 1803 } 1804 } 1805 1806 /* ok, assign the rest of it as well. 1807 * lock against receive_SyncParam() */ 1808 spin_lock(&mdev->peer_seq_lock); 1809 mdev->sync_conf = sc; 1810 1811 if (!rsr) { 1812 crypto_free_hash(mdev->csums_tfm); 1813 mdev->csums_tfm = csums_tfm; 1814 csums_tfm = NULL; 1815 } 1816 1817 if (!ovr) { 1818 crypto_free_hash(mdev->verify_tfm); 1819 mdev->verify_tfm = verify_tfm; 1820 verify_tfm = NULL; 1821 } 1822 1823 if (fifo_size != mdev->rs_plan_s.size) { 1824 kfree(mdev->rs_plan_s.values); 1825 mdev->rs_plan_s.values = rs_plan_s; 1826 mdev->rs_plan_s.size = fifo_size; 1827 mdev->rs_planed = 0; 1828 rs_plan_s = NULL; 1829 } 1830 1831 spin_unlock(&mdev->peer_seq_lock); 1832 1833 if (get_ldev(mdev)) { 1834 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1835 drbd_al_shrink(mdev); 1836 err = drbd_check_al_size(mdev); 1837 lc_unlock(mdev->act_log); 1838 wake_up(&mdev->al_wait); 1839 1840 put_ldev(mdev); 1841 drbd_md_sync(mdev); 1842 1843 if (err) { 1844 retcode = ERR_NOMEM; 1845 goto fail; 1846 } 1847 } 1848 1849 if (mdev->state.conn >= C_CONNECTED) 1850 drbd_send_sync_param(mdev, &sc); 1851 1852 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1853 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1854 drbd_calc_cpu_mask(mdev); 1855 mdev->receiver.reset_cpu_mask = 1; 1856 mdev->asender.reset_cpu_mask = 1; 1857 mdev->worker.reset_cpu_mask = 1; 1858 } 1859 1860 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1861fail: 1862 kfree(rs_plan_s); 1863 free_cpumask_var(new_cpu_mask); 1864 crypto_free_hash(csums_tfm); 1865 crypto_free_hash(verify_tfm); 1866 reply->ret_code = retcode; 1867 return 0; 1868} 1869 1870static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1871 struct drbd_nl_cfg_reply *reply) 1872{ 1873 int retcode; 1874 1875 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1876 1877 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1878 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1879 1880 while (retcode == SS_NEED_CONNECTION) { 1881 spin_lock_irq(&mdev->req_lock); 1882 if (mdev->state.conn < C_CONNECTED) 1883 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1884 spin_unlock_irq(&mdev->req_lock); 1885 1886 if (retcode != SS_NEED_CONNECTION) 1887 break; 1888 1889 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1890 } 1891 1892 reply->ret_code = retcode; 1893 return 0; 1894} 1895 1896static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) 1897{ 1898 int rv; 1899 1900 rv = drbd_bmio_set_n_write(mdev); 1901 drbd_suspend_al(mdev); 1902 return rv; 1903} 1904 1905static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1906 struct drbd_nl_cfg_reply *reply) 1907{ 1908 int retcode; 1909 1910 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 1911 1912 if (retcode < SS_SUCCESS) { 1913 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) { 1914 /* The peer will get a resync upon connect anyways. Just make that 1915 into a full resync. */ 1916 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); 1917 if (retcode >= SS_SUCCESS) { 1918 /* open coded drbd_bitmap_io() */ 1919 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, 1920 "set_n_write from invalidate_peer")) 1921 retcode = ERR_IO_MD_DISK; 1922 } 1923 } else 1924 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1925 } 1926 1927 reply->ret_code = retcode; 1928 return 0; 1929} 1930 1931static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1932 struct drbd_nl_cfg_reply *reply) 1933{ 1934 int retcode = NO_ERROR; 1935 1936 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1937 retcode = ERR_PAUSE_IS_SET; 1938 1939 reply->ret_code = retcode; 1940 return 0; 1941} 1942 1943static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1944 struct drbd_nl_cfg_reply *reply) 1945{ 1946 int retcode = NO_ERROR; 1947 1948 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1949 retcode = ERR_PAUSE_IS_CLEAR; 1950 1951 reply->ret_code = retcode; 1952 return 0; 1953} 1954 1955static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1956 struct drbd_nl_cfg_reply *reply) 1957{ 1958 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1959 1960 return 0; 1961} 1962 1963static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1964 struct drbd_nl_cfg_reply *reply) 1965{ 1966 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1967 drbd_uuid_new_current(mdev); 1968 clear_bit(NEW_CUR_UUID, &mdev->flags); 1969 } 1970 drbd_suspend_io(mdev); 1971 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 1972 if (reply->ret_code == SS_SUCCESS) { 1973 if (mdev->state.conn < C_CONNECTED) 1974 tl_clear(mdev); 1975 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) 1976 tl_restart(mdev, fail_frozen_disk_io); 1977 } 1978 drbd_resume_io(mdev); 1979 1980 return 0; 1981} 1982 1983static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1984 struct drbd_nl_cfg_reply *reply) 1985{ 1986 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1987 return 0; 1988} 1989 1990static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1991 struct drbd_nl_cfg_reply *reply) 1992{ 1993 unsigned short *tl; 1994 1995 tl = reply->tag_list; 1996 1997 if (get_ldev(mdev)) { 1998 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1999 put_ldev(mdev); 2000 } 2001 2002 if (get_net_conf(mdev)) { 2003 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 2004 put_net_conf(mdev); 2005 } 2006 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 2007 2008 put_unaligned(TT_END, tl++); /* Close the tag list */ 2009 2010 return (int)((char *)tl - (char *)reply->tag_list); 2011} 2012 2013static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2014 struct drbd_nl_cfg_reply *reply) 2015{ 2016 unsigned short *tl = reply->tag_list; 2017 union drbd_state s = mdev->state; 2018 unsigned long rs_left; 2019 unsigned int res; 2020 2021 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 2022 2023 /* no local ref, no bitmap, no syncer progress. */ 2024 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 2025 if (get_ldev(mdev)) { 2026 drbd_get_syncer_progress(mdev, &rs_left, &res); 2027 tl = tl_add_int(tl, T_sync_progress, &res); 2028 put_ldev(mdev); 2029 } 2030 } 2031 put_unaligned(TT_END, tl++); /* Close the tag list */ 2032 2033 return (int)((char *)tl - (char *)reply->tag_list); 2034} 2035 2036static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2037 struct drbd_nl_cfg_reply *reply) 2038{ 2039 unsigned short *tl; 2040 2041 tl = reply->tag_list; 2042 2043 if (get_ldev(mdev)) { 2044 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 2045 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 2046 put_ldev(mdev); 2047 } 2048 put_unaligned(TT_END, tl++); /* Close the tag list */ 2049 2050 return (int)((char *)tl - (char *)reply->tag_list); 2051} 2052 2053/** 2054 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 2055 * @mdev: DRBD device. 2056 * @nlp: Netlink/connector packet from drbdsetup 2057 * @reply: Reply packet for drbdsetup 2058 */ 2059static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2060 struct drbd_nl_cfg_reply *reply) 2061{ 2062 unsigned short *tl; 2063 char rv; 2064 2065 tl = reply->tag_list; 2066 2067 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 2068 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 2069 2070 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 2071 put_unaligned(TT_END, tl++); /* Close the tag list */ 2072 2073 return (int)((char *)tl - (char *)reply->tag_list); 2074} 2075 2076static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2077 struct drbd_nl_cfg_reply *reply) 2078{ 2079 /* default to resume from last known position, if possible */ 2080 struct start_ov args = 2081 { .start_sector = mdev->ov_start_sector }; 2082 2083 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 2084 reply->ret_code = ERR_MANDATORY_TAG; 2085 return 0; 2086 } 2087 /* w_make_ov_request expects position to be aligned */ 2088 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 2089 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 2090 return 0; 2091} 2092 2093 2094static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2095 struct drbd_nl_cfg_reply *reply) 2096{ 2097 int retcode = NO_ERROR; 2098 int skip_initial_sync = 0; 2099 int err; 2100 2101 struct new_c_uuid args; 2102 2103 memset(&args, 0, sizeof(struct new_c_uuid)); 2104 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 2105 reply->ret_code = ERR_MANDATORY_TAG; 2106 return 0; 2107 } 2108 2109 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 2110 2111 if (!get_ldev(mdev)) { 2112 retcode = ERR_NO_DISK; 2113 goto out; 2114 } 2115 2116 /* this is "skip initial sync", assume to be clean */ 2117 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 2118 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 2119 dev_info(DEV, "Preparing to skip initial sync\n"); 2120 skip_initial_sync = 1; 2121 } else if (mdev->state.conn != C_STANDALONE) { 2122 retcode = ERR_CONNECTED; 2123 goto out_dec; 2124 } 2125 2126 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 2127 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 2128 2129 if (args.clear_bm) { 2130 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 2131 if (err) { 2132 dev_err(DEV, "Writing bitmap failed with %d\n",err); 2133 retcode = ERR_IO_MD_DISK; 2134 } 2135 if (skip_initial_sync) { 2136 drbd_send_uuids_skip_initial_sync(mdev); 2137 _drbd_uuid_set(mdev, UI_BITMAP, 0); 2138 spin_lock_irq(&mdev->req_lock); 2139 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 2140 CS_VERBOSE, NULL); 2141 spin_unlock_irq(&mdev->req_lock); 2142 } 2143 } 2144 2145 drbd_md_sync(mdev); 2146out_dec: 2147 put_ldev(mdev); 2148out: 2149 mutex_unlock(&mdev->state_mutex); 2150 2151 reply->ret_code = retcode; 2152 return 0; 2153} 2154 2155struct cn_handler_struct { 2156 int (*function)(struct drbd_conf *, 2157 struct drbd_nl_cfg_req *, 2158 struct drbd_nl_cfg_reply *); 2159 int reply_body_size; 2160}; 2161 2162static struct cn_handler_struct cnd_table[] = { 2163 [ P_primary ] = { &drbd_nl_primary, 0 }, 2164 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 2165 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 2166 [ P_detach ] = { &drbd_nl_detach, 0 }, 2167 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 2168 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 2169 [ P_resize ] = { &drbd_nl_resize, 0 }, 2170 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 2171 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 2172 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 2173 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 2174 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 2175 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 2176 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 2177 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 2178 [ P_get_config ] = { &drbd_nl_get_config, 2179 sizeof(struct syncer_conf_tag_len_struct) + 2180 sizeof(struct disk_conf_tag_len_struct) + 2181 sizeof(struct net_conf_tag_len_struct) }, 2182 [ P_get_state ] = { &drbd_nl_get_state, 2183 sizeof(struct get_state_tag_len_struct) + 2184 sizeof(struct sync_progress_tag_len_struct) }, 2185 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2186 sizeof(struct get_uuids_tag_len_struct) }, 2187 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2188 sizeof(struct get_timeout_flag_tag_len_struct)}, 2189 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2190 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2191}; 2192 2193static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2194{ 2195 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2196 struct cn_handler_struct *cm; 2197 struct cn_msg *cn_reply; 2198 struct drbd_nl_cfg_reply *reply; 2199 struct drbd_conf *mdev; 2200 int retcode, rr; 2201 int reply_size = sizeof(struct cn_msg) 2202 + sizeof(struct drbd_nl_cfg_reply) 2203 + sizeof(short int); 2204 2205 if (!try_module_get(THIS_MODULE)) { 2206 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2207 return; 2208 } 2209 2210 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2211 retcode = ERR_PERM; 2212 goto fail; 2213 } 2214 2215 mdev = ensure_mdev(nlp->drbd_minor, 2216 (nlp->flags & DRBD_NL_CREATE_DEVICE)); 2217 if (!mdev) { 2218 retcode = ERR_MINOR_INVALID; 2219 goto fail; 2220 } 2221 2222 if (nlp->packet_type >= P_nl_after_last_packet || 2223 nlp->packet_type == P_return_code_only) { 2224 retcode = ERR_PACKET_NR; 2225 goto fail; 2226 } 2227 2228 cm = cnd_table + nlp->packet_type; 2229 2230 /* This may happen if packet number is 0: */ 2231 if (cm->function == NULL) { 2232 retcode = ERR_PACKET_NR; 2233 goto fail; 2234 } 2235 2236 reply_size += cm->reply_body_size; 2237 2238 /* allocation not in the IO path, cqueue thread context */ 2239 cn_reply = kzalloc(reply_size, GFP_KERNEL); 2240 if (!cn_reply) { 2241 retcode = ERR_NOMEM; 2242 goto fail; 2243 } 2244 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2245 2246 reply->packet_type = 2247 cm->reply_body_size ? nlp->packet_type : P_return_code_only; 2248 reply->minor = nlp->drbd_minor; 2249 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2250 /* reply->tag_list; might be modified by cm->function. */ 2251 2252 rr = cm->function(mdev, nlp, reply); 2253 2254 cn_reply->id = req->id; 2255 cn_reply->seq = req->seq; 2256 cn_reply->ack = req->ack + 1; 2257 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2258 cn_reply->flags = 0; 2259 2260 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2261 if (rr && rr != -ESRCH) 2262 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2263 2264 kfree(cn_reply); 2265 module_put(THIS_MODULE); 2266 return; 2267 fail: 2268 drbd_nl_send_reply(req, retcode); 2269 module_put(THIS_MODULE); 2270} 2271 2272static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2273 2274static unsigned short * 2275__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2276 unsigned short len, int nul_terminated) 2277{ 2278 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2279 len = (len < l) ? len : l; 2280 put_unaligned(tag, tl++); 2281 put_unaligned(len, tl++); 2282 memcpy(tl, data, len); 2283 tl = (unsigned short*)((char*)tl + len); 2284 if (nul_terminated) 2285 *((char*)tl - 1) = 0; 2286 return tl; 2287} 2288 2289static unsigned short * 2290tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2291{ 2292 return __tl_add_blob(tl, tag, data, len, 0); 2293} 2294 2295static unsigned short * 2296tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2297{ 2298 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2299} 2300 2301static unsigned short * 2302tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2303{ 2304 put_unaligned(tag, tl++); 2305 switch(tag_type(tag)) { 2306 case TT_INTEGER: 2307 put_unaligned(sizeof(int), tl++); 2308 put_unaligned(*(int *)val, (int *)tl); 2309 tl = (unsigned short*)((char*)tl+sizeof(int)); 2310 break; 2311 case TT_INT64: 2312 put_unaligned(sizeof(u64), tl++); 2313 put_unaligned(*(u64 *)val, (u64 *)tl); 2314 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2315 break; 2316 default: 2317 /* someone did something stupid. */ 2318 ; 2319 } 2320 return tl; 2321} 2322 2323void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2324{ 2325 char buffer[sizeof(struct cn_msg)+ 2326 sizeof(struct drbd_nl_cfg_reply)+ 2327 sizeof(struct get_state_tag_len_struct)+ 2328 sizeof(short int)]; 2329 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2330 struct drbd_nl_cfg_reply *reply = 2331 (struct drbd_nl_cfg_reply *)cn_reply->data; 2332 unsigned short *tl = reply->tag_list; 2333 2334 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2335 2336 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2337 2338 put_unaligned(TT_END, tl++); /* Close the tag list */ 2339 2340 cn_reply->id.idx = CN_IDX_DRBD; 2341 cn_reply->id.val = CN_VAL_DRBD; 2342 2343 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2344 cn_reply->ack = 0; /* not used here. */ 2345 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2346 (int)((char *)tl - (char *)reply->tag_list); 2347 cn_reply->flags = 0; 2348 2349 reply->packet_type = P_get_state; 2350 reply->minor = mdev_to_minor(mdev); 2351 reply->ret_code = NO_ERROR; 2352 2353 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2354} 2355 2356void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2357{ 2358 char buffer[sizeof(struct cn_msg)+ 2359 sizeof(struct drbd_nl_cfg_reply)+ 2360 sizeof(struct call_helper_tag_len_struct)+ 2361 sizeof(short int)]; 2362 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2363 struct drbd_nl_cfg_reply *reply = 2364 (struct drbd_nl_cfg_reply *)cn_reply->data; 2365 unsigned short *tl = reply->tag_list; 2366 2367 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2368 2369 tl = tl_add_str(tl, T_helper, helper_name); 2370 put_unaligned(TT_END, tl++); /* Close the tag list */ 2371 2372 cn_reply->id.idx = CN_IDX_DRBD; 2373 cn_reply->id.val = CN_VAL_DRBD; 2374 2375 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2376 cn_reply->ack = 0; /* not used here. */ 2377 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2378 (int)((char *)tl - (char *)reply->tag_list); 2379 cn_reply->flags = 0; 2380 2381 reply->packet_type = P_call_helper; 2382 reply->minor = mdev_to_minor(mdev); 2383 reply->ret_code = NO_ERROR; 2384 2385 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2386} 2387 2388void drbd_bcast_ee(struct drbd_conf *mdev, 2389 const char *reason, const int dgs, 2390 const char* seen_hash, const char* calc_hash, 2391 const struct drbd_epoch_entry* e) 2392{ 2393 struct cn_msg *cn_reply; 2394 struct drbd_nl_cfg_reply *reply; 2395 unsigned short *tl; 2396 struct page *page; 2397 unsigned len; 2398 2399 if (!e) 2400 return; 2401 if (!reason || !reason[0]) 2402 return; 2403 2404 /* apparently we have to memcpy twice, first to prepare the data for the 2405 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2406 * netlink skb. */ 2407 /* receiver thread context, which is not in the writeout path (of this node), 2408 * but may be in the writeout path of the _other_ node. 2409 * GFP_NOIO to avoid potential "distributed deadlock". */ 2410 cn_reply = kzalloc( 2411 sizeof(struct cn_msg)+ 2412 sizeof(struct drbd_nl_cfg_reply)+ 2413 sizeof(struct dump_ee_tag_len_struct)+ 2414 sizeof(short int), 2415 GFP_NOIO); 2416 2417 if (!cn_reply) { 2418 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2419 (unsigned long long)e->sector, e->size); 2420 return; 2421 } 2422 2423 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2424 tl = reply->tag_list; 2425 2426 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2427 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2428 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2429 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2430 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2431 2432 /* dump the first 32k */ 2433 len = min_t(unsigned, e->size, 32 << 10); 2434 put_unaligned(T_ee_data, tl++); 2435 put_unaligned(len, tl++); 2436 2437 page = e->pages; 2438 page_chain_for_each(page) { 2439 void *d = kmap_atomic(page, KM_USER0); 2440 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2441 memcpy(tl, d, l); 2442 kunmap_atomic(d, KM_USER0); 2443 tl = (unsigned short*)((char*)tl + l); 2444 len -= l; 2445 if (len == 0) 2446 break; 2447 } 2448 put_unaligned(TT_END, tl++); /* Close the tag list */ 2449 2450 cn_reply->id.idx = CN_IDX_DRBD; 2451 cn_reply->id.val = CN_VAL_DRBD; 2452 2453 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2454 cn_reply->ack = 0; // not used here. 2455 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2456 (int)((char*)tl - (char*)reply->tag_list); 2457 cn_reply->flags = 0; 2458 2459 reply->packet_type = P_dump_ee; 2460 reply->minor = mdev_to_minor(mdev); 2461 reply->ret_code = NO_ERROR; 2462 2463 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2464 kfree(cn_reply); 2465} 2466 2467void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2468{ 2469 char buffer[sizeof(struct cn_msg)+ 2470 sizeof(struct drbd_nl_cfg_reply)+ 2471 sizeof(struct sync_progress_tag_len_struct)+ 2472 sizeof(short int)]; 2473 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2474 struct drbd_nl_cfg_reply *reply = 2475 (struct drbd_nl_cfg_reply *)cn_reply->data; 2476 unsigned short *tl = reply->tag_list; 2477 unsigned long rs_left; 2478 unsigned int res; 2479 2480 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2481 if (!get_ldev(mdev)) 2482 return; 2483 drbd_get_syncer_progress(mdev, &rs_left, &res); 2484 put_ldev(mdev); 2485 2486 tl = tl_add_int(tl, T_sync_progress, &res); 2487 put_unaligned(TT_END, tl++); /* Close the tag list */ 2488 2489 cn_reply->id.idx = CN_IDX_DRBD; 2490 cn_reply->id.val = CN_VAL_DRBD; 2491 2492 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2493 cn_reply->ack = 0; /* not used here. */ 2494 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2495 (int)((char *)tl - (char *)reply->tag_list); 2496 cn_reply->flags = 0; 2497 2498 reply->packet_type = P_sync_progress; 2499 reply->minor = mdev_to_minor(mdev); 2500 reply->ret_code = NO_ERROR; 2501 2502 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2503} 2504 2505int __init drbd_nl_init(void) 2506{ 2507 static struct cb_id cn_id_drbd; 2508 int err, try=10; 2509 2510 cn_id_drbd.val = CN_VAL_DRBD; 2511 do { 2512 cn_id_drbd.idx = cn_idx; 2513 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2514 if (!err) 2515 break; 2516 cn_idx = (cn_idx + CN_IDX_STEP); 2517 } while (try--); 2518 2519 if (err) { 2520 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2521 return err; 2522 } 2523 2524 return 0; 2525} 2526 2527void drbd_nl_cleanup(void) 2528{ 2529 static struct cb_id cn_id_drbd; 2530 2531 cn_id_drbd.idx = cn_idx; 2532 cn_id_drbd.val = CN_VAL_DRBD; 2533 2534 cn_del_callback(&cn_id_drbd); 2535} 2536 2537void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2538{ 2539 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2540 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2541 struct drbd_nl_cfg_reply *reply = 2542 (struct drbd_nl_cfg_reply *)cn_reply->data; 2543 int rr; 2544 2545 memset(buffer, 0, sizeof(buffer)); 2546 cn_reply->id = req->id; 2547 2548 cn_reply->seq = req->seq; 2549 cn_reply->ack = req->ack + 1; 2550 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2551 cn_reply->flags = 0; 2552 2553 reply->packet_type = P_return_code_only; 2554 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2555 reply->ret_code = ret_code; 2556 2557 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2558 if (rr && rr != -ESRCH) 2559 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2560} 2561 2562