drbd_nl.c revision ef50a3e34f93a067ada541346be3175e924331a2
1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_req.h" 37#include "drbd_wrappers.h" 38#include <asm/unaligned.h> 39#include <linux/drbd_tag_magic.h> 40#include <linux/drbd_limits.h> 41#include <linux/compiler.h> 42#include <linux/kthread.h> 43 44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 47 48/* see get_sb_bdev and bd_claim */ 49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 50 51/* Generate the tag_list to struct functions */ 52#define NL_PACKET(name, number, fields) \ 53static int name ## _from_tags(struct drbd_conf *mdev, \ 54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 55static int name ## _from_tags(struct drbd_conf *mdev, \ 56 unsigned short *tags, struct name *arg) \ 57{ \ 58 int tag; \ 59 int dlen; \ 60 \ 61 while ((tag = get_unaligned(tags++)) != TT_END) { \ 62 dlen = get_unaligned(tags++); \ 63 switch (tag_number(tag)) { \ 64 fields \ 65 default: \ 66 if (tag & T_MANDATORY) { \ 67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 68 return 0; \ 69 } \ 70 } \ 71 tags = (unsigned short *)((char *)tags + dlen); \ 72 } \ 73 return 1; \ 74} 75#define NL_INTEGER(pn, pr, member) \ 76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 77 arg->member = get_unaligned((int *)(tags)); \ 78 break; 79#define NL_INT64(pn, pr, member) \ 80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 81 arg->member = get_unaligned((u64 *)(tags)); \ 82 break; 83#define NL_BIT(pn, pr, member) \ 84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 85 arg->member = *(char *)(tags) ? 1 : 0; \ 86 break; 87#define NL_STRING(pn, pr, member, len) \ 88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 89 if (dlen > len) { \ 90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 91 #member, dlen, (unsigned int)len); \ 92 return 0; \ 93 } \ 94 arg->member ## _len = dlen; \ 95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 96 break; 97#include "linux/drbd_nl.h" 98 99/* Generate the struct to tag_list functions */ 100#define NL_PACKET(name, number, fields) \ 101static unsigned short* \ 102name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 104static unsigned short* \ 105name ## _to_tags(struct drbd_conf *mdev, \ 106 struct name *arg, unsigned short *tags) \ 107{ \ 108 fields \ 109 return tags; \ 110} 111 112#define NL_INTEGER(pn, pr, member) \ 113 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 114 put_unaligned(sizeof(int), tags++); \ 115 put_unaligned(arg->member, (int *)tags); \ 116 tags = (unsigned short *)((char *)tags+sizeof(int)); 117#define NL_INT64(pn, pr, member) \ 118 put_unaligned(pn | pr | TT_INT64, tags++); \ 119 put_unaligned(sizeof(u64), tags++); \ 120 put_unaligned(arg->member, (u64 *)tags); \ 121 tags = (unsigned short *)((char *)tags+sizeof(u64)); 122#define NL_BIT(pn, pr, member) \ 123 put_unaligned(pn | pr | TT_BIT, tags++); \ 124 put_unaligned(sizeof(char), tags++); \ 125 *(char *)tags = arg->member; \ 126 tags = (unsigned short *)((char *)tags+sizeof(char)); 127#define NL_STRING(pn, pr, member, len) \ 128 put_unaligned(pn | pr | TT_STRING, tags++); \ 129 put_unaligned(arg->member ## _len, tags++); \ 130 memcpy(tags, arg->member, arg->member ## _len); \ 131 tags = (unsigned short *)((char *)tags + arg->member ## _len); 132#include "linux/drbd_nl.h" 133 134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 135void drbd_nl_send_reply(struct cn_msg *, int); 136 137int drbd_khelper(struct drbd_conf *mdev, char *cmd) 138{ 139 char *envp[] = { "HOME=/", 140 "TERM=linux", 141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 142 NULL, /* Will be set to address family */ 143 NULL, /* Will be set to address */ 144 NULL }; 145 146 char mb[12], af[20], ad[60], *afs; 147 char *argv[] = {usermode_helper, cmd, mb, NULL }; 148 int ret; 149 150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 151 152 if (get_net_conf(mdev)) { 153 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 154 case AF_INET6: 155 afs = "ipv6"; 156 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 157 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 158 break; 159 case AF_INET: 160 afs = "ipv4"; 161 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 162 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 163 break; 164 default: 165 afs = "ssocks"; 166 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 167 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 168 } 169 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 170 envp[3]=af; 171 envp[4]=ad; 172 put_net_conf(mdev); 173 } 174 175 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 176 177 drbd_bcast_ev_helper(mdev, cmd); 178 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 179 if (ret) 180 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 181 usermode_helper, cmd, mb, 182 (ret >> 8) & 0xff, ret); 183 else 184 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 185 usermode_helper, cmd, mb, 186 (ret >> 8) & 0xff, ret); 187 188 if (ret < 0) /* Ignore any ERRNOs we got. */ 189 ret = 0; 190 191 return ret; 192} 193 194enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 195{ 196 char *ex_to_string; 197 int r; 198 enum drbd_disk_state nps; 199 enum drbd_fencing_p fp; 200 201 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 202 203 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 204 fp = mdev->ldev->dc.fencing; 205 put_ldev(mdev); 206 } else { 207 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 208 return mdev->state.pdsk; 209 } 210 211 r = drbd_khelper(mdev, "fence-peer"); 212 213 switch ((r>>8) & 0xff) { 214 case 3: /* peer is inconsistent */ 215 ex_to_string = "peer is inconsistent or worse"; 216 nps = D_INCONSISTENT; 217 break; 218 case 4: /* peer got outdated, or was already outdated */ 219 ex_to_string = "peer was fenced"; 220 nps = D_OUTDATED; 221 break; 222 case 5: /* peer was down */ 223 if (mdev->state.disk == D_UP_TO_DATE) { 224 /* we will(have) create(d) a new UUID anyways... */ 225 ex_to_string = "peer is unreachable, assumed to be dead"; 226 nps = D_OUTDATED; 227 } else { 228 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 229 nps = mdev->state.pdsk; 230 } 231 break; 232 case 6: /* Peer is primary, voluntarily outdate myself. 233 * This is useful when an unconnected R_SECONDARY is asked to 234 * become R_PRIMARY, but finds the other peer being active. */ 235 ex_to_string = "peer is active"; 236 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 237 nps = D_UNKNOWN; 238 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 239 break; 240 case 7: 241 if (fp != FP_STONITH) 242 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 243 ex_to_string = "peer was stonithed"; 244 nps = D_OUTDATED; 245 break; 246 default: 247 /* The script is broken ... */ 248 nps = D_UNKNOWN; 249 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 250 return nps; 251 } 252 253 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 254 (r>>8) & 0xff, ex_to_string); 255 return nps; 256} 257 258static int _try_outdate_peer_async(void *data) 259{ 260 struct drbd_conf *mdev = (struct drbd_conf *)data; 261 enum drbd_disk_state nps; 262 263 nps = drbd_try_outdate_peer(mdev); 264 drbd_request_state(mdev, NS(pdsk, nps)); 265 266 return 0; 267} 268 269void drbd_try_outdate_peer_async(struct drbd_conf *mdev) 270{ 271 struct task_struct *opa; 272 273 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev)); 274 if (IS_ERR(opa)) 275 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); 276} 277 278int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 279{ 280 const int max_tries = 4; 281 int r = 0; 282 int try = 0; 283 int forced = 0; 284 union drbd_state mask, val; 285 enum drbd_disk_state nps; 286 287 if (new_role == R_PRIMARY) 288 request_ping(mdev); /* Detect a dead peer ASAP */ 289 290 mutex_lock(&mdev->state_mutex); 291 292 mask.i = 0; mask.role = R_MASK; 293 val.i = 0; val.role = new_role; 294 295 while (try++ < max_tries) { 296 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 297 298 /* in case we first succeeded to outdate, 299 * but now suddenly could establish a connection */ 300 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 301 val.pdsk = 0; 302 mask.pdsk = 0; 303 continue; 304 } 305 306 if (r == SS_NO_UP_TO_DATE_DISK && force && 307 (mdev->state.disk < D_UP_TO_DATE && 308 mdev->state.disk >= D_INCONSISTENT)) { 309 mask.disk = D_MASK; 310 val.disk = D_UP_TO_DATE; 311 forced = 1; 312 continue; 313 } 314 315 if (r == SS_NO_UP_TO_DATE_DISK && 316 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 317 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 318 nps = drbd_try_outdate_peer(mdev); 319 320 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 321 val.disk = D_UP_TO_DATE; 322 mask.disk = D_MASK; 323 } 324 325 val.pdsk = nps; 326 mask.pdsk = D_MASK; 327 328 continue; 329 } 330 331 if (r == SS_NOTHING_TO_DO) 332 goto fail; 333 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 334 nps = drbd_try_outdate_peer(mdev); 335 336 if (force && nps > D_OUTDATED) { 337 dev_warn(DEV, "Forced into split brain situation!\n"); 338 nps = D_OUTDATED; 339 } 340 341 mask.pdsk = D_MASK; 342 val.pdsk = nps; 343 344 continue; 345 } 346 if (r == SS_TWO_PRIMARIES) { 347 /* Maybe the peer is detected as dead very soon... 348 retry at most once more in this case. */ 349 __set_current_state(TASK_INTERRUPTIBLE); 350 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 351 if (try < max_tries) 352 try = max_tries - 1; 353 continue; 354 } 355 if (r < SS_SUCCESS) { 356 r = _drbd_request_state(mdev, mask, val, 357 CS_VERBOSE + CS_WAIT_COMPLETE); 358 if (r < SS_SUCCESS) 359 goto fail; 360 } 361 break; 362 } 363 364 if (r < SS_SUCCESS) 365 goto fail; 366 367 if (forced) 368 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 369 370 /* Wait until nothing is on the fly :) */ 371 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 372 373 if (new_role == R_SECONDARY) { 374 set_disk_ro(mdev->vdisk, TRUE); 375 if (get_ldev(mdev)) { 376 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 377 put_ldev(mdev); 378 } 379 } else { 380 if (get_net_conf(mdev)) { 381 mdev->net_conf->want_lose = 0; 382 put_net_conf(mdev); 383 } 384 set_disk_ro(mdev->vdisk, FALSE); 385 if (get_ldev(mdev)) { 386 if (((mdev->state.conn < C_CONNECTED || 387 mdev->state.pdsk <= D_FAILED) 388 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 389 drbd_uuid_new_current(mdev); 390 391 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 392 put_ldev(mdev); 393 } 394 } 395 396 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 397 drbd_al_to_on_disk_bm(mdev); 398 put_ldev(mdev); 399 } 400 401 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 402 /* if this was forced, we should consider sync */ 403 if (forced) 404 drbd_send_uuids(mdev); 405 drbd_send_state(mdev); 406 } 407 408 drbd_md_sync(mdev); 409 410 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 411 fail: 412 mutex_unlock(&mdev->state_mutex); 413 return r; 414} 415 416static struct drbd_conf *ensure_mdev(int minor, int create) 417{ 418 struct drbd_conf *mdev; 419 420 if (minor >= minor_count) 421 return NULL; 422 423 mdev = minor_to_mdev(minor); 424 425 if (!mdev && create) { 426 struct gendisk *disk = NULL; 427 mdev = drbd_new_device(minor); 428 429 spin_lock_irq(&drbd_pp_lock); 430 if (minor_table[minor] == NULL) { 431 minor_table[minor] = mdev; 432 disk = mdev->vdisk; 433 mdev = NULL; 434 } /* else: we lost the race */ 435 spin_unlock_irq(&drbd_pp_lock); 436 437 if (disk) /* we won the race above */ 438 /* in case we ever add a drbd_delete_device(), 439 * don't forget the del_gendisk! */ 440 add_disk(disk); 441 else /* we lost the race above */ 442 drbd_free_mdev(mdev); 443 444 mdev = minor_to_mdev(minor); 445 } 446 447 return mdev; 448} 449 450static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 451 struct drbd_nl_cfg_reply *reply) 452{ 453 struct primary primary_args; 454 455 memset(&primary_args, 0, sizeof(struct primary)); 456 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 457 reply->ret_code = ERR_MANDATORY_TAG; 458 return 0; 459 } 460 461 reply->ret_code = 462 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); 463 464 return 0; 465} 466 467static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 468 struct drbd_nl_cfg_reply *reply) 469{ 470 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 471 472 return 0; 473} 474 475/* initializes the md.*_offset members, so we are able to find 476 * the on disk meta data */ 477static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 478 struct drbd_backing_dev *bdev) 479{ 480 sector_t md_size_sect = 0; 481 switch (bdev->dc.meta_dev_idx) { 482 default: 483 /* v07 style fixed size indexed meta data */ 484 bdev->md.md_size_sect = MD_RESERVED_SECT; 485 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 486 bdev->md.al_offset = MD_AL_OFFSET; 487 bdev->md.bm_offset = MD_BM_OFFSET; 488 break; 489 case DRBD_MD_INDEX_FLEX_EXT: 490 /* just occupy the full device; unit: sectors */ 491 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 492 bdev->md.md_offset = 0; 493 bdev->md.al_offset = MD_AL_OFFSET; 494 bdev->md.bm_offset = MD_BM_OFFSET; 495 break; 496 case DRBD_MD_INDEX_INTERNAL: 497 case DRBD_MD_INDEX_FLEX_INT: 498 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 499 /* al size is still fixed */ 500 bdev->md.al_offset = -MD_AL_MAX_SIZE; 501 /* we need (slightly less than) ~ this much bitmap sectors: */ 502 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 503 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 504 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 505 md_size_sect = ALIGN(md_size_sect, 8); 506 507 /* plus the "drbd meta data super block", 508 * and the activity log; */ 509 md_size_sect += MD_BM_OFFSET; 510 511 bdev->md.md_size_sect = md_size_sect; 512 /* bitmap offset is adjusted by 'super' block size */ 513 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 514 break; 515 } 516} 517 518char *ppsize(char *buf, unsigned long long size) 519{ 520 /* Needs 9 bytes at max. */ 521 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 522 int base = 0; 523 while (size >= 10000) { 524 /* shift + round */ 525 size = (size >> 10) + !!(size & (1<<9)); 526 base++; 527 } 528 sprintf(buf, "%lu %cB", (long)size, units[base]); 529 530 return buf; 531} 532 533/* there is still a theoretical deadlock when called from receiver 534 * on an D_INCONSISTENT R_PRIMARY: 535 * remote READ does inc_ap_bio, receiver would need to receive answer 536 * packet from remote to dec_ap_bio again. 537 * receiver receive_sizes(), comes here, 538 * waits for ap_bio_cnt == 0. -> deadlock. 539 * but this cannot happen, actually, because: 540 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 541 * (not connected, or bad/no disk on peer): 542 * see drbd_fail_request_early, ap_bio_cnt is zero. 543 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 544 * peer may not initiate a resize. 545 */ 546void drbd_suspend_io(struct drbd_conf *mdev) 547{ 548 set_bit(SUSPEND_IO, &mdev->flags); 549 if (mdev->state.susp) 550 return; 551 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 552} 553 554void drbd_resume_io(struct drbd_conf *mdev) 555{ 556 clear_bit(SUSPEND_IO, &mdev->flags); 557 wake_up(&mdev->misc_wait); 558} 559 560/** 561 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 562 * @mdev: DRBD device. 563 * 564 * Returns 0 on success, negative return values indicate errors. 565 * You should call drbd_md_sync() after calling this function. 566 */ 567enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 568{ 569 sector_t prev_first_sect, prev_size; /* previous meta location */ 570 sector_t la_size; 571 sector_t size; 572 char ppb[10]; 573 574 int md_moved, la_size_changed; 575 enum determine_dev_size rv = unchanged; 576 577 /* race: 578 * application request passes inc_ap_bio, 579 * but then cannot get an AL-reference. 580 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 581 * 582 * to avoid that: 583 * Suspend IO right here. 584 * still lock the act_log to not trigger ASSERTs there. 585 */ 586 drbd_suspend_io(mdev); 587 588 /* no wait necessary anymore, actually we could assert that */ 589 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 590 591 prev_first_sect = drbd_md_first_sector(mdev->ldev); 592 prev_size = mdev->ldev->md.md_size_sect; 593 la_size = mdev->ldev->md.la_size_sect; 594 595 /* TODO: should only be some assert here, not (re)init... */ 596 drbd_md_set_sector_offsets(mdev, mdev->ldev); 597 598 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); 599 600 if (drbd_get_capacity(mdev->this_bdev) != size || 601 drbd_bm_capacity(mdev) != size) { 602 int err; 603 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); 604 if (unlikely(err)) { 605 /* currently there is only one error: ENOMEM! */ 606 size = drbd_bm_capacity(mdev)>>1; 607 if (size == 0) { 608 dev_err(DEV, "OUT OF MEMORY! " 609 "Could not allocate bitmap!\n"); 610 } else { 611 dev_err(DEV, "BM resizing failed. " 612 "Leaving size unchanged at size = %lu KB\n", 613 (unsigned long)size); 614 } 615 rv = dev_size_error; 616 } 617 /* racy, see comments above. */ 618 drbd_set_my_capacity(mdev, size); 619 mdev->ldev->md.la_size_sect = size; 620 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 621 (unsigned long long)size>>1); 622 } 623 if (rv == dev_size_error) 624 goto out; 625 626 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 627 628 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 629 || prev_size != mdev->ldev->md.md_size_sect; 630 631 if (la_size_changed || md_moved) { 632 drbd_al_shrink(mdev); /* All extents inactive. */ 633 dev_info(DEV, "Writing the whole bitmap, %s\n", 634 la_size_changed && md_moved ? "size changed and md moved" : 635 la_size_changed ? "size changed" : "md moved"); 636 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 637 drbd_md_mark_dirty(mdev); 638 } 639 640 if (size > la_size) 641 rv = grew; 642 if (size < la_size) 643 rv = shrunk; 644out: 645 lc_unlock(mdev->act_log); 646 wake_up(&mdev->al_wait); 647 drbd_resume_io(mdev); 648 649 return rv; 650} 651 652sector_t 653drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 654{ 655 sector_t p_size = mdev->p_size; /* partner's disk size. */ 656 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 657 sector_t m_size; /* my size */ 658 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 659 sector_t size = 0; 660 661 m_size = drbd_get_max_capacity(bdev); 662 663 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 664 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 665 p_size = m_size; 666 } 667 668 if (p_size && m_size) { 669 size = min_t(sector_t, p_size, m_size); 670 } else { 671 if (la_size) { 672 size = la_size; 673 if (m_size && m_size < size) 674 size = m_size; 675 if (p_size && p_size < size) 676 size = p_size; 677 } else { 678 if (m_size) 679 size = m_size; 680 if (p_size) 681 size = p_size; 682 } 683 } 684 685 if (size == 0) 686 dev_err(DEV, "Both nodes diskless!\n"); 687 688 if (u_size) { 689 if (u_size > size) 690 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 691 (unsigned long)u_size>>1, (unsigned long)size>>1); 692 else 693 size = u_size; 694 } 695 696 return size; 697} 698 699/** 700 * drbd_check_al_size() - Ensures that the AL is of the right size 701 * @mdev: DRBD device. 702 * 703 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 704 * failed, and 0 on success. You should call drbd_md_sync() after you called 705 * this function. 706 */ 707static int drbd_check_al_size(struct drbd_conf *mdev) 708{ 709 struct lru_cache *n, *t; 710 struct lc_element *e; 711 unsigned int in_use; 712 int i; 713 714 ERR_IF(mdev->sync_conf.al_extents < 7) 715 mdev->sync_conf.al_extents = 127; 716 717 if (mdev->act_log && 718 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 719 return 0; 720 721 in_use = 0; 722 t = mdev->act_log; 723 n = lc_create("act_log", drbd_al_ext_cache, 724 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 725 726 if (n == NULL) { 727 dev_err(DEV, "Cannot allocate act_log lru!\n"); 728 return -ENOMEM; 729 } 730 spin_lock_irq(&mdev->al_lock); 731 if (t) { 732 for (i = 0; i < t->nr_elements; i++) { 733 e = lc_element_by_index(t, i); 734 if (e->refcnt) 735 dev_err(DEV, "refcnt(%d)==%d\n", 736 e->lc_number, e->refcnt); 737 in_use += e->refcnt; 738 } 739 } 740 if (!in_use) 741 mdev->act_log = n; 742 spin_unlock_irq(&mdev->al_lock); 743 if (in_use) { 744 dev_err(DEV, "Activity log still in use!\n"); 745 lc_destroy(n); 746 return -EBUSY; 747 } else { 748 if (t) 749 lc_destroy(t); 750 } 751 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 752 return 0; 753} 754 755void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 756{ 757 struct request_queue * const q = mdev->rq_queue; 758 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 759 int max_segments = mdev->ldev->dc.max_bio_bvecs; 760 761 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 762 763 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 764 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 765 blk_queue_max_segment_size(q, max_seg_s); 766 blk_queue_logical_block_size(q, 512); 767 blk_queue_segment_boundary(q, PAGE_SIZE-1); 768 blk_stack_limits(&q->limits, &b->limits, 0); 769 770 if (b->merge_bvec_fn) 771 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 772 b->merge_bvec_fn); 773 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 774 775 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 776 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 777 q->backing_dev_info.ra_pages, 778 b->backing_dev_info.ra_pages); 779 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 780 } 781} 782 783/* serialize deconfig (worker exiting, doing cleanup) 784 * and reconfig (drbdsetup disk, drbdsetup net) 785 * 786 * Wait for a potentially exiting worker, then restart it, 787 * or start a new one. Flush any pending work, there may still be an 788 * after_state_change queued. 789 */ 790static void drbd_reconfig_start(struct drbd_conf *mdev) 791{ 792 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 793 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 794 drbd_thread_start(&mdev->worker); 795 drbd_flush_workqueue(mdev); 796} 797 798/* if still unconfigured, stops worker again. 799 * if configured now, clears CONFIG_PENDING. 800 * wakes potential waiters */ 801static void drbd_reconfig_done(struct drbd_conf *mdev) 802{ 803 spin_lock_irq(&mdev->req_lock); 804 if (mdev->state.disk == D_DISKLESS && 805 mdev->state.conn == C_STANDALONE && 806 mdev->state.role == R_SECONDARY) { 807 set_bit(DEVICE_DYING, &mdev->flags); 808 drbd_thread_stop_nowait(&mdev->worker); 809 } else 810 clear_bit(CONFIG_PENDING, &mdev->flags); 811 spin_unlock_irq(&mdev->req_lock); 812 wake_up(&mdev->state_wait); 813} 814 815/* Make sure IO is suspended before calling this function(). */ 816static void drbd_suspend_al(struct drbd_conf *mdev) 817{ 818 int s = 0; 819 820 if (lc_try_lock(mdev->act_log)) { 821 drbd_al_shrink(mdev); 822 lc_unlock(mdev->act_log); 823 } else { 824 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n"); 825 return; 826 } 827 828 spin_lock_irq(&mdev->req_lock); 829 if (mdev->state.conn < C_CONNECTED) 830 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); 831 832 spin_unlock_irq(&mdev->req_lock); 833 834 if (s) 835 dev_info(DEV, "Suspended AL updates\n"); 836} 837 838/* does always return 0; 839 * interesting return code is in reply->ret_code */ 840static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 841 struct drbd_nl_cfg_reply *reply) 842{ 843 enum drbd_ret_codes retcode; 844 enum determine_dev_size dd; 845 sector_t max_possible_sectors; 846 sector_t min_md_device_sectors; 847 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 848 struct inode *inode, *inode2; 849 struct lru_cache *resync_lru = NULL; 850 union drbd_state ns, os; 851 int rv; 852 int cp_discovered = 0; 853 int logical_block_size; 854 855 drbd_reconfig_start(mdev); 856 857 /* if you want to reconfigure, please tear down first */ 858 if (mdev->state.disk > D_DISKLESS) { 859 retcode = ERR_DISK_CONFIGURED; 860 goto fail; 861 } 862 863 /* allocation not in the IO path, cqueue thread context */ 864 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 865 if (!nbc) { 866 retcode = ERR_NOMEM; 867 goto fail; 868 } 869 870 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 871 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 872 nbc->dc.fencing = DRBD_FENCING_DEF; 873 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 874 875 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 876 retcode = ERR_MANDATORY_TAG; 877 goto fail; 878 } 879 880 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 881 retcode = ERR_MD_IDX_INVALID; 882 goto fail; 883 } 884 885 if (get_net_conf(mdev)) { 886 int prot = mdev->net_conf->wire_protocol; 887 put_net_conf(mdev); 888 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) { 889 retcode = ERR_STONITH_AND_PROT_A; 890 goto fail; 891 } 892 } 893 894 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 895 if (IS_ERR(nbc->lo_file)) { 896 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 897 PTR_ERR(nbc->lo_file)); 898 nbc->lo_file = NULL; 899 retcode = ERR_OPEN_DISK; 900 goto fail; 901 } 902 903 inode = nbc->lo_file->f_dentry->d_inode; 904 905 if (!S_ISBLK(inode->i_mode)) { 906 retcode = ERR_DISK_NOT_BDEV; 907 goto fail; 908 } 909 910 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 911 if (IS_ERR(nbc->md_file)) { 912 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 913 PTR_ERR(nbc->md_file)); 914 nbc->md_file = NULL; 915 retcode = ERR_OPEN_MD_DISK; 916 goto fail; 917 } 918 919 inode2 = nbc->md_file->f_dentry->d_inode; 920 921 if (!S_ISBLK(inode2->i_mode)) { 922 retcode = ERR_MD_NOT_BDEV; 923 goto fail; 924 } 925 926 nbc->backing_bdev = inode->i_bdev; 927 if (bd_claim(nbc->backing_bdev, mdev)) { 928 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 929 nbc->backing_bdev, mdev, 930 nbc->backing_bdev->bd_holder, 931 nbc->backing_bdev->bd_contains->bd_holder, 932 nbc->backing_bdev->bd_holders); 933 retcode = ERR_BDCLAIM_DISK; 934 goto fail; 935 } 936 937 resync_lru = lc_create("resync", drbd_bm_ext_cache, 938 61, sizeof(struct bm_extent), 939 offsetof(struct bm_extent, lce)); 940 if (!resync_lru) { 941 retcode = ERR_NOMEM; 942 goto release_bdev_fail; 943 } 944 945 /* meta_dev_idx >= 0: external fixed size, 946 * possibly multiple drbd sharing one meta device. 947 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 948 * not yet used by some other drbd minor! 949 * (if you use drbd.conf + drbdadm, 950 * that should check it for you already; but if you don't, or someone 951 * fooled it, we need to double check here) */ 952 nbc->md_bdev = inode2->i_bdev; 953 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 954 : (void *) drbd_m_holder)) { 955 retcode = ERR_BDCLAIM_MD_DISK; 956 goto release_bdev_fail; 957 } 958 959 if ((nbc->backing_bdev == nbc->md_bdev) != 960 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 961 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 962 retcode = ERR_MD_IDX_INVALID; 963 goto release_bdev2_fail; 964 } 965 966 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 967 drbd_md_set_sector_offsets(mdev, nbc); 968 969 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 970 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 971 (unsigned long long) drbd_get_max_capacity(nbc), 972 (unsigned long long) nbc->dc.disk_size); 973 retcode = ERR_DISK_TO_SMALL; 974 goto release_bdev2_fail; 975 } 976 977 if (nbc->dc.meta_dev_idx < 0) { 978 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 979 /* at least one MB, otherwise it does not make sense */ 980 min_md_device_sectors = (2<<10); 981 } else { 982 max_possible_sectors = DRBD_MAX_SECTORS; 983 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 984 } 985 986 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 987 retcode = ERR_MD_DISK_TO_SMALL; 988 dev_warn(DEV, "refusing attach: md-device too small, " 989 "at least %llu sectors needed for this meta-disk type\n", 990 (unsigned long long) min_md_device_sectors); 991 goto release_bdev2_fail; 992 } 993 994 /* Make sure the new disk is big enough 995 * (we may currently be R_PRIMARY with no local disk...) */ 996 if (drbd_get_max_capacity(nbc) < 997 drbd_get_capacity(mdev->this_bdev)) { 998 retcode = ERR_DISK_TO_SMALL; 999 goto release_bdev2_fail; 1000 } 1001 1002 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 1003 1004 if (nbc->known_size > max_possible_sectors) { 1005 dev_warn(DEV, "==> truncating very big lower level device " 1006 "to currently maximum possible %llu sectors <==\n", 1007 (unsigned long long) max_possible_sectors); 1008 if (nbc->dc.meta_dev_idx >= 0) 1009 dev_warn(DEV, "==>> using internal or flexible " 1010 "meta data may help <<==\n"); 1011 } 1012 1013 drbd_suspend_io(mdev); 1014 /* also wait for the last barrier ack. */ 1015 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || mdev->state.susp); 1016 /* and for any other previously queued work */ 1017 drbd_flush_workqueue(mdev); 1018 1019 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 1020 drbd_resume_io(mdev); 1021 if (retcode < SS_SUCCESS) 1022 goto release_bdev2_fail; 1023 1024 if (!get_ldev_if_state(mdev, D_ATTACHING)) 1025 goto force_diskless; 1026 1027 drbd_md_set_sector_offsets(mdev, nbc); 1028 1029 /* allocate a second IO page if logical_block_size != 512 */ 1030 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 1031 if (logical_block_size == 0) 1032 logical_block_size = MD_SECTOR_SIZE; 1033 1034 if (logical_block_size != MD_SECTOR_SIZE) { 1035 if (!mdev->md_io_tmpp) { 1036 struct page *page = alloc_page(GFP_NOIO); 1037 if (!page) 1038 goto force_diskless_dec; 1039 1040 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 1041 logical_block_size, MD_SECTOR_SIZE); 1042 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 1043 1044 mdev->md_io_tmpp = page; 1045 } 1046 } 1047 1048 if (!mdev->bitmap) { 1049 if (drbd_bm_init(mdev)) { 1050 retcode = ERR_NOMEM; 1051 goto force_diskless_dec; 1052 } 1053 } 1054 1055 retcode = drbd_md_read(mdev, nbc); 1056 if (retcode != NO_ERROR) 1057 goto force_diskless_dec; 1058 1059 if (mdev->state.conn < C_CONNECTED && 1060 mdev->state.role == R_PRIMARY && 1061 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 1062 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 1063 (unsigned long long)mdev->ed_uuid); 1064 retcode = ERR_DATA_NOT_CURRENT; 1065 goto force_diskless_dec; 1066 } 1067 1068 /* Since we are diskless, fix the activity log first... */ 1069 if (drbd_check_al_size(mdev)) { 1070 retcode = ERR_NOMEM; 1071 goto force_diskless_dec; 1072 } 1073 1074 /* Prevent shrinking of consistent devices ! */ 1075 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1076 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 1077 dev_warn(DEV, "refusing to truncate a consistent device\n"); 1078 retcode = ERR_DISK_TO_SMALL; 1079 goto force_diskless_dec; 1080 } 1081 1082 if (!drbd_al_read_log(mdev, nbc)) { 1083 retcode = ERR_IO_MD_DISK; 1084 goto force_diskless_dec; 1085 } 1086 1087 /* Reset the "barriers don't work" bits here, then force meta data to 1088 * be written, to ensure we determine if barriers are supported. */ 1089 if (nbc->dc.no_md_flush) 1090 set_bit(MD_NO_BARRIER, &mdev->flags); 1091 else 1092 clear_bit(MD_NO_BARRIER, &mdev->flags); 1093 1094 /* Point of no return reached. 1095 * Devices and memory are no longer released by error cleanup below. 1096 * now mdev takes over responsibility, and the state engine should 1097 * clean it up somewhere. */ 1098 D_ASSERT(mdev->ldev == NULL); 1099 mdev->ldev = nbc; 1100 mdev->resync = resync_lru; 1101 nbc = NULL; 1102 resync_lru = NULL; 1103 1104 mdev->write_ordering = WO_bio_barrier; 1105 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1106 1107 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1108 set_bit(CRASHED_PRIMARY, &mdev->flags); 1109 else 1110 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1111 1112 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1113 !(mdev->state.role == R_PRIMARY && mdev->state.susp && 1114 mdev->sync_conf.on_no_data == OND_SUSPEND_IO)) { 1115 set_bit(CRASHED_PRIMARY, &mdev->flags); 1116 cp_discovered = 1; 1117 } 1118 1119 mdev->send_cnt = 0; 1120 mdev->recv_cnt = 0; 1121 mdev->read_cnt = 0; 1122 mdev->writ_cnt = 0; 1123 1124 drbd_setup_queue_param(mdev, mdev->state.conn == C_CONNECTED && 1125 mdev->agreed_pro_version < 95 ? 1126 DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE); 1127 1128 /* If I am currently not R_PRIMARY, 1129 * but meta data primary indicator is set, 1130 * I just now recover from a hard crash, 1131 * and have been R_PRIMARY before that crash. 1132 * 1133 * Now, if I had no connection before that crash 1134 * (have been degraded R_PRIMARY), chances are that 1135 * I won't find my peer now either. 1136 * 1137 * In that case, and _only_ in that case, 1138 * we use the degr-wfc-timeout instead of the default, 1139 * so we can automatically recover from a crash of a 1140 * degraded but active "cluster" after a certain timeout. 1141 */ 1142 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1143 if (mdev->state.role != R_PRIMARY && 1144 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1145 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1146 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1147 1148 dd = drbd_determin_dev_size(mdev, 0); 1149 if (dd == dev_size_error) { 1150 retcode = ERR_NOMEM_BITMAP; 1151 goto force_diskless_dec; 1152 } else if (dd == grew) 1153 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1154 1155 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1156 dev_info(DEV, "Assuming that all blocks are out of sync " 1157 "(aka FullSync)\n"); 1158 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1159 retcode = ERR_IO_MD_DISK; 1160 goto force_diskless_dec; 1161 } 1162 } else { 1163 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1164 retcode = ERR_IO_MD_DISK; 1165 goto force_diskless_dec; 1166 } 1167 } 1168 1169 if (cp_discovered) { 1170 drbd_al_apply_to_bm(mdev); 1171 drbd_al_to_on_disk_bm(mdev); 1172 } 1173 1174 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) 1175 drbd_suspend_al(mdev); /* IO is still suspended here... */ 1176 1177 spin_lock_irq(&mdev->req_lock); 1178 os = mdev->state; 1179 ns.i = os.i; 1180 /* If MDF_CONSISTENT is not set go into inconsistent state, 1181 otherwise investigate MDF_WasUpToDate... 1182 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1183 otherwise into D_CONSISTENT state. 1184 */ 1185 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1186 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1187 ns.disk = D_CONSISTENT; 1188 else 1189 ns.disk = D_OUTDATED; 1190 } else { 1191 ns.disk = D_INCONSISTENT; 1192 } 1193 1194 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1195 ns.pdsk = D_OUTDATED; 1196 1197 if ( ns.disk == D_CONSISTENT && 1198 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1199 ns.disk = D_UP_TO_DATE; 1200 1201 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1202 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1203 this point, because drbd_request_state() modifies these 1204 flags. */ 1205 1206 /* In case we are C_CONNECTED postpone any decision on the new disk 1207 state after the negotiation phase. */ 1208 if (mdev->state.conn == C_CONNECTED) { 1209 mdev->new_state_tmp.i = ns.i; 1210 ns.i = os.i; 1211 ns.disk = D_NEGOTIATING; 1212 1213 /* We expect to receive up-to-date UUIDs soon. 1214 To avoid a race in receive_state, free p_uuid while 1215 holding req_lock. I.e. atomic with the state change */ 1216 kfree(mdev->p_uuid); 1217 mdev->p_uuid = NULL; 1218 } 1219 1220 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1221 ns = mdev->state; 1222 spin_unlock_irq(&mdev->req_lock); 1223 1224 if (rv < SS_SUCCESS) 1225 goto force_diskless_dec; 1226 1227 if (mdev->state.role == R_PRIMARY) 1228 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1229 else 1230 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1231 1232 drbd_md_mark_dirty(mdev); 1233 drbd_md_sync(mdev); 1234 1235 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1236 put_ldev(mdev); 1237 reply->ret_code = retcode; 1238 drbd_reconfig_done(mdev); 1239 return 0; 1240 1241 force_diskless_dec: 1242 put_ldev(mdev); 1243 force_diskless: 1244 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1245 drbd_md_sync(mdev); 1246 release_bdev2_fail: 1247 if (nbc) 1248 bd_release(nbc->md_bdev); 1249 release_bdev_fail: 1250 if (nbc) 1251 bd_release(nbc->backing_bdev); 1252 fail: 1253 if (nbc) { 1254 if (nbc->lo_file) 1255 fput(nbc->lo_file); 1256 if (nbc->md_file) 1257 fput(nbc->md_file); 1258 kfree(nbc); 1259 } 1260 lc_destroy(resync_lru); 1261 1262 reply->ret_code = retcode; 1263 drbd_reconfig_done(mdev); 1264 return 0; 1265} 1266 1267static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1268 struct drbd_nl_cfg_reply *reply) 1269{ 1270 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1271 return 0; 1272} 1273 1274static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1275 struct drbd_nl_cfg_reply *reply) 1276{ 1277 int i, ns; 1278 enum drbd_ret_codes retcode; 1279 struct net_conf *new_conf = NULL; 1280 struct crypto_hash *tfm = NULL; 1281 struct crypto_hash *integrity_w_tfm = NULL; 1282 struct crypto_hash *integrity_r_tfm = NULL; 1283 struct hlist_head *new_tl_hash = NULL; 1284 struct hlist_head *new_ee_hash = NULL; 1285 struct drbd_conf *odev; 1286 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1287 void *int_dig_out = NULL; 1288 void *int_dig_in = NULL; 1289 void *int_dig_vv = NULL; 1290 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1291 1292 drbd_reconfig_start(mdev); 1293 1294 if (mdev->state.conn > C_STANDALONE) { 1295 retcode = ERR_NET_CONFIGURED; 1296 goto fail; 1297 } 1298 1299 /* allocation not in the IO path, cqueue thread context */ 1300 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1301 if (!new_conf) { 1302 retcode = ERR_NOMEM; 1303 goto fail; 1304 } 1305 1306 new_conf->timeout = DRBD_TIMEOUT_DEF; 1307 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1308 new_conf->ping_int = DRBD_PING_INT_DEF; 1309 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1310 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1311 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1312 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1313 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1314 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1315 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1316 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1317 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1318 new_conf->want_lose = 0; 1319 new_conf->two_primaries = 0; 1320 new_conf->wire_protocol = DRBD_PROT_C; 1321 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1322 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1323 1324 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1325 retcode = ERR_MANDATORY_TAG; 1326 goto fail; 1327 } 1328 1329 if (new_conf->two_primaries 1330 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1331 retcode = ERR_NOT_PROTO_C; 1332 goto fail; 1333 } 1334 1335 if (get_ldev(mdev)) { 1336 enum drbd_fencing_p fp = mdev->ldev->dc.fencing; 1337 put_ldev(mdev); 1338 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) { 1339 retcode = ERR_STONITH_AND_PROT_A; 1340 goto fail; 1341 } 1342 } 1343 1344 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1345 retcode = ERR_DISCARD; 1346 goto fail; 1347 } 1348 1349 retcode = NO_ERROR; 1350 1351 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1352 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1353 for (i = 0; i < minor_count; i++) { 1354 odev = minor_to_mdev(i); 1355 if (!odev || odev == mdev) 1356 continue; 1357 if (get_net_conf(odev)) { 1358 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1359 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1360 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1361 retcode = ERR_LOCAL_ADDR; 1362 1363 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1364 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1365 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1366 retcode = ERR_PEER_ADDR; 1367 1368 put_net_conf(odev); 1369 if (retcode != NO_ERROR) 1370 goto fail; 1371 } 1372 } 1373 1374 if (new_conf->cram_hmac_alg[0] != 0) { 1375 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1376 new_conf->cram_hmac_alg); 1377 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1378 if (IS_ERR(tfm)) { 1379 tfm = NULL; 1380 retcode = ERR_AUTH_ALG; 1381 goto fail; 1382 } 1383 1384 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1385 retcode = ERR_AUTH_ALG_ND; 1386 goto fail; 1387 } 1388 } 1389 1390 if (new_conf->integrity_alg[0]) { 1391 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1392 if (IS_ERR(integrity_w_tfm)) { 1393 integrity_w_tfm = NULL; 1394 retcode=ERR_INTEGRITY_ALG; 1395 goto fail; 1396 } 1397 1398 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1399 retcode=ERR_INTEGRITY_ALG_ND; 1400 goto fail; 1401 } 1402 1403 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1404 if (IS_ERR(integrity_r_tfm)) { 1405 integrity_r_tfm = NULL; 1406 retcode=ERR_INTEGRITY_ALG; 1407 goto fail; 1408 } 1409 } 1410 1411 ns = new_conf->max_epoch_size/8; 1412 if (mdev->tl_hash_s != ns) { 1413 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1414 if (!new_tl_hash) { 1415 retcode = ERR_NOMEM; 1416 goto fail; 1417 } 1418 } 1419 1420 ns = new_conf->max_buffers/8; 1421 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1422 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1423 if (!new_ee_hash) { 1424 retcode = ERR_NOMEM; 1425 goto fail; 1426 } 1427 } 1428 1429 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1430 1431 if (integrity_w_tfm) { 1432 i = crypto_hash_digestsize(integrity_w_tfm); 1433 int_dig_out = kmalloc(i, GFP_KERNEL); 1434 if (!int_dig_out) { 1435 retcode = ERR_NOMEM; 1436 goto fail; 1437 } 1438 int_dig_in = kmalloc(i, GFP_KERNEL); 1439 if (!int_dig_in) { 1440 retcode = ERR_NOMEM; 1441 goto fail; 1442 } 1443 int_dig_vv = kmalloc(i, GFP_KERNEL); 1444 if (!int_dig_vv) { 1445 retcode = ERR_NOMEM; 1446 goto fail; 1447 } 1448 } 1449 1450 if (!mdev->bitmap) { 1451 if(drbd_bm_init(mdev)) { 1452 retcode = ERR_NOMEM; 1453 goto fail; 1454 } 1455 } 1456 1457 drbd_flush_workqueue(mdev); 1458 spin_lock_irq(&mdev->req_lock); 1459 if (mdev->net_conf != NULL) { 1460 retcode = ERR_NET_CONFIGURED; 1461 spin_unlock_irq(&mdev->req_lock); 1462 goto fail; 1463 } 1464 mdev->net_conf = new_conf; 1465 1466 mdev->send_cnt = 0; 1467 mdev->recv_cnt = 0; 1468 1469 if (new_tl_hash) { 1470 kfree(mdev->tl_hash); 1471 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1472 mdev->tl_hash = new_tl_hash; 1473 } 1474 1475 if (new_ee_hash) { 1476 kfree(mdev->ee_hash); 1477 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1478 mdev->ee_hash = new_ee_hash; 1479 } 1480 1481 crypto_free_hash(mdev->cram_hmac_tfm); 1482 mdev->cram_hmac_tfm = tfm; 1483 1484 crypto_free_hash(mdev->integrity_w_tfm); 1485 mdev->integrity_w_tfm = integrity_w_tfm; 1486 1487 crypto_free_hash(mdev->integrity_r_tfm); 1488 mdev->integrity_r_tfm = integrity_r_tfm; 1489 1490 kfree(mdev->int_dig_out); 1491 kfree(mdev->int_dig_in); 1492 kfree(mdev->int_dig_vv); 1493 mdev->int_dig_out=int_dig_out; 1494 mdev->int_dig_in=int_dig_in; 1495 mdev->int_dig_vv=int_dig_vv; 1496 retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL); 1497 spin_unlock_irq(&mdev->req_lock); 1498 1499 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1500 reply->ret_code = retcode; 1501 drbd_reconfig_done(mdev); 1502 return 0; 1503 1504fail: 1505 kfree(int_dig_out); 1506 kfree(int_dig_in); 1507 kfree(int_dig_vv); 1508 crypto_free_hash(tfm); 1509 crypto_free_hash(integrity_w_tfm); 1510 crypto_free_hash(integrity_r_tfm); 1511 kfree(new_tl_hash); 1512 kfree(new_ee_hash); 1513 kfree(new_conf); 1514 1515 reply->ret_code = retcode; 1516 drbd_reconfig_done(mdev); 1517 return 0; 1518} 1519 1520static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1521 struct drbd_nl_cfg_reply *reply) 1522{ 1523 int retcode; 1524 1525 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1526 1527 if (retcode == SS_NOTHING_TO_DO) 1528 goto done; 1529 else if (retcode == SS_ALREADY_STANDALONE) 1530 goto done; 1531 else if (retcode == SS_PRIMARY_NOP) { 1532 /* Our statche checking code wants to see the peer outdated. */ 1533 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1534 pdsk, D_OUTDATED)); 1535 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1536 /* The peer probably wants to see us outdated. */ 1537 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1538 disk, D_OUTDATED), 1539 CS_ORDERED); 1540 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1541 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1542 retcode = SS_SUCCESS; 1543 } 1544 } 1545 1546 if (retcode < SS_SUCCESS) 1547 goto fail; 1548 1549 if (wait_event_interruptible(mdev->state_wait, 1550 mdev->state.conn != C_DISCONNECTING)) { 1551 /* Do not test for mdev->state.conn == C_STANDALONE, since 1552 someone else might connect us in the mean time! */ 1553 retcode = ERR_INTR; 1554 goto fail; 1555 } 1556 1557 done: 1558 retcode = NO_ERROR; 1559 fail: 1560 drbd_md_sync(mdev); 1561 reply->ret_code = retcode; 1562 return 0; 1563} 1564 1565void resync_after_online_grow(struct drbd_conf *mdev) 1566{ 1567 int iass; /* I am sync source */ 1568 1569 dev_info(DEV, "Resync of new storage after online grow\n"); 1570 if (mdev->state.role != mdev->state.peer) 1571 iass = (mdev->state.role == R_PRIMARY); 1572 else 1573 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1574 1575 if (iass) 1576 drbd_start_resync(mdev, C_SYNC_SOURCE); 1577 else 1578 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1579} 1580 1581static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1582 struct drbd_nl_cfg_reply *reply) 1583{ 1584 struct resize rs; 1585 int retcode = NO_ERROR; 1586 enum determine_dev_size dd; 1587 enum dds_flags ddsf; 1588 1589 memset(&rs, 0, sizeof(struct resize)); 1590 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1591 retcode = ERR_MANDATORY_TAG; 1592 goto fail; 1593 } 1594 1595 if (mdev->state.conn > C_CONNECTED) { 1596 retcode = ERR_RESIZE_RESYNC; 1597 goto fail; 1598 } 1599 1600 if (mdev->state.role == R_SECONDARY && 1601 mdev->state.peer == R_SECONDARY) { 1602 retcode = ERR_NO_PRIMARY; 1603 goto fail; 1604 } 1605 1606 if (!get_ldev(mdev)) { 1607 retcode = ERR_NO_DISK; 1608 goto fail; 1609 } 1610 1611 if (rs.no_resync && mdev->agreed_pro_version < 93) { 1612 retcode = ERR_NEED_APV_93; 1613 goto fail; 1614 } 1615 1616 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 1617 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1618 1619 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1620 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1621 dd = drbd_determin_dev_size(mdev, ddsf); 1622 drbd_md_sync(mdev); 1623 put_ldev(mdev); 1624 if (dd == dev_size_error) { 1625 retcode = ERR_NOMEM_BITMAP; 1626 goto fail; 1627 } 1628 1629 if (mdev->state.conn == C_CONNECTED) { 1630 if (dd == grew) 1631 set_bit(RESIZE_PENDING, &mdev->flags); 1632 1633 drbd_send_uuids(mdev); 1634 drbd_send_sizes(mdev, 1, ddsf); 1635 } 1636 1637 fail: 1638 reply->ret_code = retcode; 1639 return 0; 1640} 1641 1642static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1643 struct drbd_nl_cfg_reply *reply) 1644{ 1645 int retcode = NO_ERROR; 1646 int err; 1647 int ovr; /* online verify running */ 1648 int rsr; /* re-sync running */ 1649 struct crypto_hash *verify_tfm = NULL; 1650 struct crypto_hash *csums_tfm = NULL; 1651 struct syncer_conf sc; 1652 cpumask_var_t new_cpu_mask; 1653 int *rs_plan_s = NULL; 1654 int fifo_size; 1655 1656 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1657 retcode = ERR_NOMEM; 1658 goto fail; 1659 } 1660 1661 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1662 memset(&sc, 0, sizeof(struct syncer_conf)); 1663 sc.rate = DRBD_RATE_DEF; 1664 sc.after = DRBD_AFTER_DEF; 1665 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1666 sc.on_no_data = DRBD_ON_NO_DATA_DEF; 1667 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF; 1668 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF; 1669 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF; 1670 sc.c_max_rate = DRBD_C_MAX_RATE_DEF; 1671 sc.c_min_rate = DRBD_C_MIN_RATE_DEF; 1672 } else 1673 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1674 1675 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1676 retcode = ERR_MANDATORY_TAG; 1677 goto fail; 1678 } 1679 1680 /* re-sync running */ 1681 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1682 mdev->state.conn == C_SYNC_TARGET || 1683 mdev->state.conn == C_PAUSED_SYNC_S || 1684 mdev->state.conn == C_PAUSED_SYNC_T ); 1685 1686 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1687 retcode = ERR_CSUMS_RESYNC_RUNNING; 1688 goto fail; 1689 } 1690 1691 if (!rsr && sc.csums_alg[0]) { 1692 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1693 if (IS_ERR(csums_tfm)) { 1694 csums_tfm = NULL; 1695 retcode = ERR_CSUMS_ALG; 1696 goto fail; 1697 } 1698 1699 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1700 retcode = ERR_CSUMS_ALG_ND; 1701 goto fail; 1702 } 1703 } 1704 1705 /* online verify running */ 1706 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1707 1708 if (ovr) { 1709 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1710 retcode = ERR_VERIFY_RUNNING; 1711 goto fail; 1712 } 1713 } 1714 1715 if (!ovr && sc.verify_alg[0]) { 1716 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1717 if (IS_ERR(verify_tfm)) { 1718 verify_tfm = NULL; 1719 retcode = ERR_VERIFY_ALG; 1720 goto fail; 1721 } 1722 1723 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1724 retcode = ERR_VERIFY_ALG_ND; 1725 goto fail; 1726 } 1727 } 1728 1729 /* silently ignore cpu mask on UP kernel */ 1730 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1731 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1732 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1733 if (err) { 1734 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1735 retcode = ERR_CPU_MASK_PARSE; 1736 goto fail; 1737 } 1738 } 1739 1740 ERR_IF (sc.rate < 1) sc.rate = 1; 1741 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1742#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1743 if (sc.al_extents > AL_MAX) { 1744 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1745 sc.al_extents = AL_MAX; 1746 } 1747#undef AL_MAX 1748 1749 /* to avoid spurious errors when configuring minors before configuring 1750 * the minors they depend on: if necessary, first create the minor we 1751 * depend on */ 1752 if (sc.after >= 0) 1753 ensure_mdev(sc.after, 1); 1754 1755 /* most sanity checks done, try to assign the new sync-after 1756 * dependency. need to hold the global lock in there, 1757 * to avoid a race in the dependency loop check. */ 1758 retcode = drbd_alter_sa(mdev, sc.after); 1759 if (retcode != NO_ERROR) 1760 goto fail; 1761 1762 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 1763 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 1764 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 1765 if (!rs_plan_s) { 1766 dev_err(DEV, "kmalloc of fifo_buffer failed"); 1767 retcode = ERR_NOMEM; 1768 goto fail; 1769 } 1770 } 1771 1772 /* ok, assign the rest of it as well. 1773 * lock against receive_SyncParam() */ 1774 spin_lock(&mdev->peer_seq_lock); 1775 mdev->sync_conf = sc; 1776 1777 if (!rsr) { 1778 crypto_free_hash(mdev->csums_tfm); 1779 mdev->csums_tfm = csums_tfm; 1780 csums_tfm = NULL; 1781 } 1782 1783 if (!ovr) { 1784 crypto_free_hash(mdev->verify_tfm); 1785 mdev->verify_tfm = verify_tfm; 1786 verify_tfm = NULL; 1787 } 1788 1789 if (fifo_size != mdev->rs_plan_s.size) { 1790 kfree(mdev->rs_plan_s.values); 1791 mdev->rs_plan_s.values = rs_plan_s; 1792 mdev->rs_plan_s.size = fifo_size; 1793 mdev->rs_planed = 0; 1794 rs_plan_s = NULL; 1795 } 1796 1797 spin_unlock(&mdev->peer_seq_lock); 1798 1799 if (get_ldev(mdev)) { 1800 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1801 drbd_al_shrink(mdev); 1802 err = drbd_check_al_size(mdev); 1803 lc_unlock(mdev->act_log); 1804 wake_up(&mdev->al_wait); 1805 1806 put_ldev(mdev); 1807 drbd_md_sync(mdev); 1808 1809 if (err) { 1810 retcode = ERR_NOMEM; 1811 goto fail; 1812 } 1813 } 1814 1815 if (mdev->state.conn >= C_CONNECTED) 1816 drbd_send_sync_param(mdev, &sc); 1817 1818 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1819 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1820 drbd_calc_cpu_mask(mdev); 1821 mdev->receiver.reset_cpu_mask = 1; 1822 mdev->asender.reset_cpu_mask = 1; 1823 mdev->worker.reset_cpu_mask = 1; 1824 } 1825 1826 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1827fail: 1828 kfree(rs_plan_s); 1829 free_cpumask_var(new_cpu_mask); 1830 crypto_free_hash(csums_tfm); 1831 crypto_free_hash(verify_tfm); 1832 reply->ret_code = retcode; 1833 return 0; 1834} 1835 1836static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1837 struct drbd_nl_cfg_reply *reply) 1838{ 1839 int retcode; 1840 1841 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1842 1843 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1844 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1845 1846 while (retcode == SS_NEED_CONNECTION) { 1847 spin_lock_irq(&mdev->req_lock); 1848 if (mdev->state.conn < C_CONNECTED) 1849 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1850 spin_unlock_irq(&mdev->req_lock); 1851 1852 if (retcode != SS_NEED_CONNECTION) 1853 break; 1854 1855 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1856 } 1857 1858 reply->ret_code = retcode; 1859 return 0; 1860} 1861 1862static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) 1863{ 1864 int rv; 1865 1866 rv = drbd_bmio_set_n_write(mdev); 1867 drbd_suspend_al(mdev); 1868 return rv; 1869} 1870 1871static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1872 struct drbd_nl_cfg_reply *reply) 1873{ 1874 int retcode; 1875 1876 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 1877 1878 if (retcode < SS_SUCCESS) { 1879 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) { 1880 /* The peer will get a resync upon connect anyways. Just make that 1881 into a full resync. */ 1882 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); 1883 if (retcode >= SS_SUCCESS) { 1884 /* open coded drbd_bitmap_io() */ 1885 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, 1886 "set_n_write from invalidate_peer")) 1887 retcode = ERR_IO_MD_DISK; 1888 } 1889 } else 1890 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1891 } 1892 1893 reply->ret_code = retcode; 1894 return 0; 1895} 1896 1897static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1898 struct drbd_nl_cfg_reply *reply) 1899{ 1900 int retcode = NO_ERROR; 1901 1902 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1903 retcode = ERR_PAUSE_IS_SET; 1904 1905 reply->ret_code = retcode; 1906 return 0; 1907} 1908 1909static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1910 struct drbd_nl_cfg_reply *reply) 1911{ 1912 int retcode = NO_ERROR; 1913 1914 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1915 retcode = ERR_PAUSE_IS_CLEAR; 1916 1917 reply->ret_code = retcode; 1918 return 0; 1919} 1920 1921static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1922 struct drbd_nl_cfg_reply *reply) 1923{ 1924 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1925 1926 return 0; 1927} 1928 1929static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1930 struct drbd_nl_cfg_reply *reply) 1931{ 1932 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1933 drbd_uuid_new_current(mdev); 1934 clear_bit(NEW_CUR_UUID, &mdev->flags); 1935 drbd_md_sync(mdev); 1936 } 1937 drbd_suspend_io(mdev); 1938 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1939 if (reply->ret_code == SS_SUCCESS) { 1940 if (mdev->state.conn < C_CONNECTED) 1941 tl_clear(mdev); 1942 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) 1943 tl_restart(mdev, fail_frozen_disk_io); 1944 } 1945 drbd_resume_io(mdev); 1946 1947 return 0; 1948} 1949 1950static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1951 struct drbd_nl_cfg_reply *reply) 1952{ 1953 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1954 return 0; 1955} 1956 1957static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1958 struct drbd_nl_cfg_reply *reply) 1959{ 1960 unsigned short *tl; 1961 1962 tl = reply->tag_list; 1963 1964 if (get_ldev(mdev)) { 1965 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1966 put_ldev(mdev); 1967 } 1968 1969 if (get_net_conf(mdev)) { 1970 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1971 put_net_conf(mdev); 1972 } 1973 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1974 1975 put_unaligned(TT_END, tl++); /* Close the tag list */ 1976 1977 return (int)((char *)tl - (char *)reply->tag_list); 1978} 1979 1980static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1981 struct drbd_nl_cfg_reply *reply) 1982{ 1983 unsigned short *tl = reply->tag_list; 1984 union drbd_state s = mdev->state; 1985 unsigned long rs_left; 1986 unsigned int res; 1987 1988 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 1989 1990 /* no local ref, no bitmap, no syncer progress. */ 1991 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 1992 if (get_ldev(mdev)) { 1993 drbd_get_syncer_progress(mdev, &rs_left, &res); 1994 tl = tl_add_int(tl, T_sync_progress, &res); 1995 put_ldev(mdev); 1996 } 1997 } 1998 put_unaligned(TT_END, tl++); /* Close the tag list */ 1999 2000 return (int)((char *)tl - (char *)reply->tag_list); 2001} 2002 2003static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2004 struct drbd_nl_cfg_reply *reply) 2005{ 2006 unsigned short *tl; 2007 2008 tl = reply->tag_list; 2009 2010 if (get_ldev(mdev)) { 2011 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 2012 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 2013 put_ldev(mdev); 2014 } 2015 put_unaligned(TT_END, tl++); /* Close the tag list */ 2016 2017 return (int)((char *)tl - (char *)reply->tag_list); 2018} 2019 2020/** 2021 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 2022 * @mdev: DRBD device. 2023 * @nlp: Netlink/connector packet from drbdsetup 2024 * @reply: Reply packet for drbdsetup 2025 */ 2026static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2027 struct drbd_nl_cfg_reply *reply) 2028{ 2029 unsigned short *tl; 2030 char rv; 2031 2032 tl = reply->tag_list; 2033 2034 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 2035 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 2036 2037 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 2038 put_unaligned(TT_END, tl++); /* Close the tag list */ 2039 2040 return (int)((char *)tl - (char *)reply->tag_list); 2041} 2042 2043static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2044 struct drbd_nl_cfg_reply *reply) 2045{ 2046 /* default to resume from last known position, if possible */ 2047 struct start_ov args = 2048 { .start_sector = mdev->ov_start_sector }; 2049 2050 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 2051 reply->ret_code = ERR_MANDATORY_TAG; 2052 return 0; 2053 } 2054 /* w_make_ov_request expects position to be aligned */ 2055 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 2056 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 2057 return 0; 2058} 2059 2060 2061static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2062 struct drbd_nl_cfg_reply *reply) 2063{ 2064 int retcode = NO_ERROR; 2065 int skip_initial_sync = 0; 2066 int err; 2067 2068 struct new_c_uuid args; 2069 2070 memset(&args, 0, sizeof(struct new_c_uuid)); 2071 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 2072 reply->ret_code = ERR_MANDATORY_TAG; 2073 return 0; 2074 } 2075 2076 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 2077 2078 if (!get_ldev(mdev)) { 2079 retcode = ERR_NO_DISK; 2080 goto out; 2081 } 2082 2083 /* this is "skip initial sync", assume to be clean */ 2084 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 2085 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 2086 dev_info(DEV, "Preparing to skip initial sync\n"); 2087 skip_initial_sync = 1; 2088 } else if (mdev->state.conn != C_STANDALONE) { 2089 retcode = ERR_CONNECTED; 2090 goto out_dec; 2091 } 2092 2093 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 2094 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 2095 2096 if (args.clear_bm) { 2097 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 2098 if (err) { 2099 dev_err(DEV, "Writing bitmap failed with %d\n",err); 2100 retcode = ERR_IO_MD_DISK; 2101 } 2102 if (skip_initial_sync) { 2103 drbd_send_uuids_skip_initial_sync(mdev); 2104 _drbd_uuid_set(mdev, UI_BITMAP, 0); 2105 spin_lock_irq(&mdev->req_lock); 2106 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 2107 CS_VERBOSE, NULL); 2108 spin_unlock_irq(&mdev->req_lock); 2109 } 2110 } 2111 2112 drbd_md_sync(mdev); 2113out_dec: 2114 put_ldev(mdev); 2115out: 2116 mutex_unlock(&mdev->state_mutex); 2117 2118 reply->ret_code = retcode; 2119 return 0; 2120} 2121 2122struct cn_handler_struct { 2123 int (*function)(struct drbd_conf *, 2124 struct drbd_nl_cfg_req *, 2125 struct drbd_nl_cfg_reply *); 2126 int reply_body_size; 2127}; 2128 2129static struct cn_handler_struct cnd_table[] = { 2130 [ P_primary ] = { &drbd_nl_primary, 0 }, 2131 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 2132 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 2133 [ P_detach ] = { &drbd_nl_detach, 0 }, 2134 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 2135 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 2136 [ P_resize ] = { &drbd_nl_resize, 0 }, 2137 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 2138 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 2139 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 2140 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 2141 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 2142 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 2143 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 2144 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 2145 [ P_get_config ] = { &drbd_nl_get_config, 2146 sizeof(struct syncer_conf_tag_len_struct) + 2147 sizeof(struct disk_conf_tag_len_struct) + 2148 sizeof(struct net_conf_tag_len_struct) }, 2149 [ P_get_state ] = { &drbd_nl_get_state, 2150 sizeof(struct get_state_tag_len_struct) + 2151 sizeof(struct sync_progress_tag_len_struct) }, 2152 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2153 sizeof(struct get_uuids_tag_len_struct) }, 2154 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2155 sizeof(struct get_timeout_flag_tag_len_struct)}, 2156 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2157 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2158}; 2159 2160static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2161{ 2162 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2163 struct cn_handler_struct *cm; 2164 struct cn_msg *cn_reply; 2165 struct drbd_nl_cfg_reply *reply; 2166 struct drbd_conf *mdev; 2167 int retcode, rr; 2168 int reply_size = sizeof(struct cn_msg) 2169 + sizeof(struct drbd_nl_cfg_reply) 2170 + sizeof(short int); 2171 2172 if (!try_module_get(THIS_MODULE)) { 2173 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2174 return; 2175 } 2176 2177 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2178 retcode = ERR_PERM; 2179 goto fail; 2180 } 2181 2182 mdev = ensure_mdev(nlp->drbd_minor, 2183 (nlp->flags & DRBD_NL_CREATE_DEVICE)); 2184 if (!mdev) { 2185 retcode = ERR_MINOR_INVALID; 2186 goto fail; 2187 } 2188 2189 if (nlp->packet_type >= P_nl_after_last_packet) { 2190 retcode = ERR_PACKET_NR; 2191 goto fail; 2192 } 2193 2194 cm = cnd_table + nlp->packet_type; 2195 2196 /* This may happen if packet number is 0: */ 2197 if (cm->function == NULL) { 2198 retcode = ERR_PACKET_NR; 2199 goto fail; 2200 } 2201 2202 reply_size += cm->reply_body_size; 2203 2204 /* allocation not in the IO path, cqueue thread context */ 2205 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2206 if (!cn_reply) { 2207 retcode = ERR_NOMEM; 2208 goto fail; 2209 } 2210 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2211 2212 reply->packet_type = 2213 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2214 reply->minor = nlp->drbd_minor; 2215 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2216 /* reply->tag_list; might be modified by cm->function. */ 2217 2218 rr = cm->function(mdev, nlp, reply); 2219 2220 cn_reply->id = req->id; 2221 cn_reply->seq = req->seq; 2222 cn_reply->ack = req->ack + 1; 2223 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2224 cn_reply->flags = 0; 2225 2226 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2227 if (rr && rr != -ESRCH) 2228 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2229 2230 kfree(cn_reply); 2231 module_put(THIS_MODULE); 2232 return; 2233 fail: 2234 drbd_nl_send_reply(req, retcode); 2235 module_put(THIS_MODULE); 2236} 2237 2238static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2239 2240static unsigned short * 2241__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2242 unsigned short len, int nul_terminated) 2243{ 2244 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2245 len = (len < l) ? len : l; 2246 put_unaligned(tag, tl++); 2247 put_unaligned(len, tl++); 2248 memcpy(tl, data, len); 2249 tl = (unsigned short*)((char*)tl + len); 2250 if (nul_terminated) 2251 *((char*)tl - 1) = 0; 2252 return tl; 2253} 2254 2255static unsigned short * 2256tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2257{ 2258 return __tl_add_blob(tl, tag, data, len, 0); 2259} 2260 2261static unsigned short * 2262tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2263{ 2264 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2265} 2266 2267static unsigned short * 2268tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2269{ 2270 put_unaligned(tag, tl++); 2271 switch(tag_type(tag)) { 2272 case TT_INTEGER: 2273 put_unaligned(sizeof(int), tl++); 2274 put_unaligned(*(int *)val, (int *)tl); 2275 tl = (unsigned short*)((char*)tl+sizeof(int)); 2276 break; 2277 case TT_INT64: 2278 put_unaligned(sizeof(u64), tl++); 2279 put_unaligned(*(u64 *)val, (u64 *)tl); 2280 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2281 break; 2282 default: 2283 /* someone did something stupid. */ 2284 ; 2285 } 2286 return tl; 2287} 2288 2289void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2290{ 2291 char buffer[sizeof(struct cn_msg)+ 2292 sizeof(struct drbd_nl_cfg_reply)+ 2293 sizeof(struct get_state_tag_len_struct)+ 2294 sizeof(short int)]; 2295 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2296 struct drbd_nl_cfg_reply *reply = 2297 (struct drbd_nl_cfg_reply *)cn_reply->data; 2298 unsigned short *tl = reply->tag_list; 2299 2300 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2301 2302 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2303 2304 put_unaligned(TT_END, tl++); /* Close the tag list */ 2305 2306 cn_reply->id.idx = CN_IDX_DRBD; 2307 cn_reply->id.val = CN_VAL_DRBD; 2308 2309 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2310 cn_reply->ack = 0; /* not used here. */ 2311 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2312 (int)((char *)tl - (char *)reply->tag_list); 2313 cn_reply->flags = 0; 2314 2315 reply->packet_type = P_get_state; 2316 reply->minor = mdev_to_minor(mdev); 2317 reply->ret_code = NO_ERROR; 2318 2319 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2320} 2321 2322void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2323{ 2324 char buffer[sizeof(struct cn_msg)+ 2325 sizeof(struct drbd_nl_cfg_reply)+ 2326 sizeof(struct call_helper_tag_len_struct)+ 2327 sizeof(short int)]; 2328 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2329 struct drbd_nl_cfg_reply *reply = 2330 (struct drbd_nl_cfg_reply *)cn_reply->data; 2331 unsigned short *tl = reply->tag_list; 2332 2333 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2334 2335 tl = tl_add_str(tl, T_helper, helper_name); 2336 put_unaligned(TT_END, tl++); /* Close the tag list */ 2337 2338 cn_reply->id.idx = CN_IDX_DRBD; 2339 cn_reply->id.val = CN_VAL_DRBD; 2340 2341 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2342 cn_reply->ack = 0; /* not used here. */ 2343 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2344 (int)((char *)tl - (char *)reply->tag_list); 2345 cn_reply->flags = 0; 2346 2347 reply->packet_type = P_call_helper; 2348 reply->minor = mdev_to_minor(mdev); 2349 reply->ret_code = NO_ERROR; 2350 2351 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2352} 2353 2354void drbd_bcast_ee(struct drbd_conf *mdev, 2355 const char *reason, const int dgs, 2356 const char* seen_hash, const char* calc_hash, 2357 const struct drbd_epoch_entry* e) 2358{ 2359 struct cn_msg *cn_reply; 2360 struct drbd_nl_cfg_reply *reply; 2361 unsigned short *tl; 2362 struct page *page; 2363 unsigned len; 2364 2365 if (!e) 2366 return; 2367 if (!reason || !reason[0]) 2368 return; 2369 2370 /* apparently we have to memcpy twice, first to prepare the data for the 2371 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2372 * netlink skb. */ 2373 /* receiver thread context, which is not in the writeout path (of this node), 2374 * but may be in the writeout path of the _other_ node. 2375 * GFP_NOIO to avoid potential "distributed deadlock". */ 2376 cn_reply = kmalloc( 2377 sizeof(struct cn_msg)+ 2378 sizeof(struct drbd_nl_cfg_reply)+ 2379 sizeof(struct dump_ee_tag_len_struct)+ 2380 sizeof(short int), 2381 GFP_NOIO); 2382 2383 if (!cn_reply) { 2384 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2385 (unsigned long long)e->sector, e->size); 2386 return; 2387 } 2388 2389 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2390 tl = reply->tag_list; 2391 2392 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2393 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2394 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2395 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2396 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2397 2398 put_unaligned(T_ee_data, tl++); 2399 put_unaligned(e->size, tl++); 2400 2401 len = e->size; 2402 page = e->pages; 2403 page_chain_for_each(page) { 2404 void *d = kmap_atomic(page, KM_USER0); 2405 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2406 memcpy(tl, d, l); 2407 kunmap_atomic(d, KM_USER0); 2408 tl = (unsigned short*)((char*)tl + l); 2409 len -= l; 2410 } 2411 put_unaligned(TT_END, tl++); /* Close the tag list */ 2412 2413 cn_reply->id.idx = CN_IDX_DRBD; 2414 cn_reply->id.val = CN_VAL_DRBD; 2415 2416 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2417 cn_reply->ack = 0; // not used here. 2418 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2419 (int)((char*)tl - (char*)reply->tag_list); 2420 cn_reply->flags = 0; 2421 2422 reply->packet_type = P_dump_ee; 2423 reply->minor = mdev_to_minor(mdev); 2424 reply->ret_code = NO_ERROR; 2425 2426 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2427 kfree(cn_reply); 2428} 2429 2430void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2431{ 2432 char buffer[sizeof(struct cn_msg)+ 2433 sizeof(struct drbd_nl_cfg_reply)+ 2434 sizeof(struct sync_progress_tag_len_struct)+ 2435 sizeof(short int)]; 2436 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2437 struct drbd_nl_cfg_reply *reply = 2438 (struct drbd_nl_cfg_reply *)cn_reply->data; 2439 unsigned short *tl = reply->tag_list; 2440 unsigned long rs_left; 2441 unsigned int res; 2442 2443 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2444 if (!get_ldev(mdev)) 2445 return; 2446 drbd_get_syncer_progress(mdev, &rs_left, &res); 2447 put_ldev(mdev); 2448 2449 tl = tl_add_int(tl, T_sync_progress, &res); 2450 put_unaligned(TT_END, tl++); /* Close the tag list */ 2451 2452 cn_reply->id.idx = CN_IDX_DRBD; 2453 cn_reply->id.val = CN_VAL_DRBD; 2454 2455 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2456 cn_reply->ack = 0; /* not used here. */ 2457 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2458 (int)((char *)tl - (char *)reply->tag_list); 2459 cn_reply->flags = 0; 2460 2461 reply->packet_type = P_sync_progress; 2462 reply->minor = mdev_to_minor(mdev); 2463 reply->ret_code = NO_ERROR; 2464 2465 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2466} 2467 2468int __init drbd_nl_init(void) 2469{ 2470 static struct cb_id cn_id_drbd; 2471 int err, try=10; 2472 2473 cn_id_drbd.val = CN_VAL_DRBD; 2474 do { 2475 cn_id_drbd.idx = cn_idx; 2476 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2477 if (!err) 2478 break; 2479 cn_idx = (cn_idx + CN_IDX_STEP); 2480 } while (try--); 2481 2482 if (err) { 2483 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2484 return err; 2485 } 2486 2487 return 0; 2488} 2489 2490void drbd_nl_cleanup(void) 2491{ 2492 static struct cb_id cn_id_drbd; 2493 2494 cn_id_drbd.idx = cn_idx; 2495 cn_id_drbd.val = CN_VAL_DRBD; 2496 2497 cn_del_callback(&cn_id_drbd); 2498} 2499 2500void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2501{ 2502 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2503 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2504 struct drbd_nl_cfg_reply *reply = 2505 (struct drbd_nl_cfg_reply *)cn_reply->data; 2506 int rr; 2507 2508 cn_reply->id = req->id; 2509 2510 cn_reply->seq = req->seq; 2511 cn_reply->ack = req->ack + 1; 2512 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2513 cn_reply->flags = 0; 2514 2515 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2516 reply->ret_code = ret_code; 2517 2518 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2519 if (rr && rr != -ESRCH) 2520 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2521} 2522 2523