drbd_nl.c revision 43a5182cccae5850f7590f78dd9651bd407be440
1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_req.h" 37#include "drbd_wrappers.h" 38#include <asm/unaligned.h> 39#include <linux/drbd_tag_magic.h> 40#include <linux/drbd_limits.h> 41#include <linux/compiler.h> 42#include <linux/kthread.h> 43 44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 47 48/* see get_sb_bdev and bd_claim */ 49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 50 51/* Generate the tag_list to struct functions */ 52#define NL_PACKET(name, number, fields) \ 53static int name ## _from_tags(struct drbd_conf *mdev, \ 54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 55static int name ## _from_tags(struct drbd_conf *mdev, \ 56 unsigned short *tags, struct name *arg) \ 57{ \ 58 int tag; \ 59 int dlen; \ 60 \ 61 while ((tag = get_unaligned(tags++)) != TT_END) { \ 62 dlen = get_unaligned(tags++); \ 63 switch (tag_number(tag)) { \ 64 fields \ 65 default: \ 66 if (tag & T_MANDATORY) { \ 67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 68 return 0; \ 69 } \ 70 } \ 71 tags = (unsigned short *)((char *)tags + dlen); \ 72 } \ 73 return 1; \ 74} 75#define NL_INTEGER(pn, pr, member) \ 76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 77 arg->member = get_unaligned((int *)(tags)); \ 78 break; 79#define NL_INT64(pn, pr, member) \ 80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 81 arg->member = get_unaligned((u64 *)(tags)); \ 82 break; 83#define NL_BIT(pn, pr, member) \ 84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 85 arg->member = *(char *)(tags) ? 1 : 0; \ 86 break; 87#define NL_STRING(pn, pr, member, len) \ 88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 89 if (dlen > len) { \ 90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 91 #member, dlen, (unsigned int)len); \ 92 return 0; \ 93 } \ 94 arg->member ## _len = dlen; \ 95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 96 break; 97#include "linux/drbd_nl.h" 98 99/* Generate the struct to tag_list functions */ 100#define NL_PACKET(name, number, fields) \ 101static unsigned short* \ 102name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 104static unsigned short* \ 105name ## _to_tags(struct drbd_conf *mdev, \ 106 struct name *arg, unsigned short *tags) \ 107{ \ 108 fields \ 109 return tags; \ 110} 111 112#define NL_INTEGER(pn, pr, member) \ 113 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 114 put_unaligned(sizeof(int), tags++); \ 115 put_unaligned(arg->member, (int *)tags); \ 116 tags = (unsigned short *)((char *)tags+sizeof(int)); 117#define NL_INT64(pn, pr, member) \ 118 put_unaligned(pn | pr | TT_INT64, tags++); \ 119 put_unaligned(sizeof(u64), tags++); \ 120 put_unaligned(arg->member, (u64 *)tags); \ 121 tags = (unsigned short *)((char *)tags+sizeof(u64)); 122#define NL_BIT(pn, pr, member) \ 123 put_unaligned(pn | pr | TT_BIT, tags++); \ 124 put_unaligned(sizeof(char), tags++); \ 125 *(char *)tags = arg->member; \ 126 tags = (unsigned short *)((char *)tags+sizeof(char)); 127#define NL_STRING(pn, pr, member, len) \ 128 put_unaligned(pn | pr | TT_STRING, tags++); \ 129 put_unaligned(arg->member ## _len, tags++); \ 130 memcpy(tags, arg->member, arg->member ## _len); \ 131 tags = (unsigned short *)((char *)tags + arg->member ## _len); 132#include "linux/drbd_nl.h" 133 134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 135void drbd_nl_send_reply(struct cn_msg *, int); 136 137int drbd_khelper(struct drbd_conf *mdev, char *cmd) 138{ 139 char *envp[] = { "HOME=/", 140 "TERM=linux", 141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 142 NULL, /* Will be set to address family */ 143 NULL, /* Will be set to address */ 144 NULL }; 145 146 char mb[12], af[20], ad[60], *afs; 147 char *argv[] = {usermode_helper, cmd, mb, NULL }; 148 int ret; 149 150 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 151 152 if (get_net_conf(mdev)) { 153 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 154 case AF_INET6: 155 afs = "ipv6"; 156 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 157 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 158 break; 159 case AF_INET: 160 afs = "ipv4"; 161 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 162 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 163 break; 164 default: 165 afs = "ssocks"; 166 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 167 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 168 } 169 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 170 envp[3]=af; 171 envp[4]=ad; 172 put_net_conf(mdev); 173 } 174 175 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 176 177 drbd_bcast_ev_helper(mdev, cmd); 178 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 179 if (ret) 180 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 181 usermode_helper, cmd, mb, 182 (ret >> 8) & 0xff, ret); 183 else 184 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 185 usermode_helper, cmd, mb, 186 (ret >> 8) & 0xff, ret); 187 188 if (ret < 0) /* Ignore any ERRNOs we got. */ 189 ret = 0; 190 191 return ret; 192} 193 194enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 195{ 196 char *ex_to_string; 197 int r; 198 enum drbd_disk_state nps; 199 enum drbd_fencing_p fp; 200 201 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 202 203 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 204 fp = mdev->ldev->dc.fencing; 205 put_ldev(mdev); 206 } else { 207 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 208 return mdev->state.pdsk; 209 } 210 211 if (fp == FP_STONITH) 212 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); 213 214 r = drbd_khelper(mdev, "fence-peer"); 215 216 switch ((r>>8) & 0xff) { 217 case 3: /* peer is inconsistent */ 218 ex_to_string = "peer is inconsistent or worse"; 219 nps = D_INCONSISTENT; 220 break; 221 case 4: /* peer got outdated, or was already outdated */ 222 ex_to_string = "peer was fenced"; 223 nps = D_OUTDATED; 224 break; 225 case 5: /* peer was down */ 226 if (mdev->state.disk == D_UP_TO_DATE) { 227 /* we will(have) create(d) a new UUID anyways... */ 228 ex_to_string = "peer is unreachable, assumed to be dead"; 229 nps = D_OUTDATED; 230 } else { 231 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 232 nps = mdev->state.pdsk; 233 } 234 break; 235 case 6: /* Peer is primary, voluntarily outdate myself. 236 * This is useful when an unconnected R_SECONDARY is asked to 237 * become R_PRIMARY, but finds the other peer being active. */ 238 ex_to_string = "peer is active"; 239 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 240 nps = D_UNKNOWN; 241 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 242 break; 243 case 7: 244 if (fp != FP_STONITH) 245 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 246 ex_to_string = "peer was stonithed"; 247 nps = D_OUTDATED; 248 break; 249 default: 250 /* The script is broken ... */ 251 nps = D_UNKNOWN; 252 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 253 return nps; 254 } 255 256 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 257 (r>>8) & 0xff, ex_to_string); 258 return nps; 259} 260 261static int _try_outdate_peer_async(void *data) 262{ 263 struct drbd_conf *mdev = (struct drbd_conf *)data; 264 enum drbd_disk_state nps; 265 266 nps = drbd_try_outdate_peer(mdev); 267 drbd_request_state(mdev, NS(pdsk, nps)); 268 269 return 0; 270} 271 272void drbd_try_outdate_peer_async(struct drbd_conf *mdev) 273{ 274 struct task_struct *opa; 275 276 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev)); 277 if (IS_ERR(opa)) 278 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); 279} 280 281int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 282{ 283 const int max_tries = 4; 284 int r = 0; 285 int try = 0; 286 int forced = 0; 287 union drbd_state mask, val; 288 enum drbd_disk_state nps; 289 290 if (new_role == R_PRIMARY) 291 request_ping(mdev); /* Detect a dead peer ASAP */ 292 293 mutex_lock(&mdev->state_mutex); 294 295 mask.i = 0; mask.role = R_MASK; 296 val.i = 0; val.role = new_role; 297 298 while (try++ < max_tries) { 299 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 300 301 /* in case we first succeeded to outdate, 302 * but now suddenly could establish a connection */ 303 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 304 val.pdsk = 0; 305 mask.pdsk = 0; 306 continue; 307 } 308 309 if (r == SS_NO_UP_TO_DATE_DISK && force && 310 (mdev->state.disk < D_UP_TO_DATE && 311 mdev->state.disk >= D_INCONSISTENT)) { 312 mask.disk = D_MASK; 313 val.disk = D_UP_TO_DATE; 314 forced = 1; 315 continue; 316 } 317 318 if (r == SS_NO_UP_TO_DATE_DISK && 319 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 320 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 321 nps = drbd_try_outdate_peer(mdev); 322 323 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 324 val.disk = D_UP_TO_DATE; 325 mask.disk = D_MASK; 326 } 327 328 val.pdsk = nps; 329 mask.pdsk = D_MASK; 330 331 continue; 332 } 333 334 if (r == SS_NOTHING_TO_DO) 335 goto fail; 336 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 337 nps = drbd_try_outdate_peer(mdev); 338 339 if (force && nps > D_OUTDATED) { 340 dev_warn(DEV, "Forced into split brain situation!\n"); 341 nps = D_OUTDATED; 342 } 343 344 mask.pdsk = D_MASK; 345 val.pdsk = nps; 346 347 continue; 348 } 349 if (r == SS_TWO_PRIMARIES) { 350 /* Maybe the peer is detected as dead very soon... 351 retry at most once more in this case. */ 352 __set_current_state(TASK_INTERRUPTIBLE); 353 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 354 if (try < max_tries) 355 try = max_tries - 1; 356 continue; 357 } 358 if (r < SS_SUCCESS) { 359 r = _drbd_request_state(mdev, mask, val, 360 CS_VERBOSE + CS_WAIT_COMPLETE); 361 if (r < SS_SUCCESS) 362 goto fail; 363 } 364 break; 365 } 366 367 if (r < SS_SUCCESS) 368 goto fail; 369 370 if (forced) 371 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 372 373 /* Wait until nothing is on the fly :) */ 374 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 375 376 if (new_role == R_SECONDARY) { 377 set_disk_ro(mdev->vdisk, TRUE); 378 if (get_ldev(mdev)) { 379 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 380 put_ldev(mdev); 381 } 382 } else { 383 if (get_net_conf(mdev)) { 384 mdev->net_conf->want_lose = 0; 385 put_net_conf(mdev); 386 } 387 set_disk_ro(mdev->vdisk, FALSE); 388 if (get_ldev(mdev)) { 389 if (((mdev->state.conn < C_CONNECTED || 390 mdev->state.pdsk <= D_FAILED) 391 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 392 drbd_uuid_new_current(mdev); 393 394 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 395 put_ldev(mdev); 396 } 397 } 398 399 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 400 drbd_al_to_on_disk_bm(mdev); 401 put_ldev(mdev); 402 } 403 404 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 405 /* if this was forced, we should consider sync */ 406 if (forced) 407 drbd_send_uuids(mdev); 408 drbd_send_state(mdev); 409 } 410 411 drbd_md_sync(mdev); 412 413 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 414 fail: 415 mutex_unlock(&mdev->state_mutex); 416 return r; 417} 418 419 420static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 421 struct drbd_nl_cfg_reply *reply) 422{ 423 struct primary primary_args; 424 425 memset(&primary_args, 0, sizeof(struct primary)); 426 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 427 reply->ret_code = ERR_MANDATORY_TAG; 428 return 0; 429 } 430 431 reply->ret_code = 432 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); 433 434 return 0; 435} 436 437static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 438 struct drbd_nl_cfg_reply *reply) 439{ 440 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 441 442 return 0; 443} 444 445/* initializes the md.*_offset members, so we are able to find 446 * the on disk meta data */ 447static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 448 struct drbd_backing_dev *bdev) 449{ 450 sector_t md_size_sect = 0; 451 switch (bdev->dc.meta_dev_idx) { 452 default: 453 /* v07 style fixed size indexed meta data */ 454 bdev->md.md_size_sect = MD_RESERVED_SECT; 455 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 456 bdev->md.al_offset = MD_AL_OFFSET; 457 bdev->md.bm_offset = MD_BM_OFFSET; 458 break; 459 case DRBD_MD_INDEX_FLEX_EXT: 460 /* just occupy the full device; unit: sectors */ 461 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 462 bdev->md.md_offset = 0; 463 bdev->md.al_offset = MD_AL_OFFSET; 464 bdev->md.bm_offset = MD_BM_OFFSET; 465 break; 466 case DRBD_MD_INDEX_INTERNAL: 467 case DRBD_MD_INDEX_FLEX_INT: 468 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 469 /* al size is still fixed */ 470 bdev->md.al_offset = -MD_AL_MAX_SIZE; 471 /* we need (slightly less than) ~ this much bitmap sectors: */ 472 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 473 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 474 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 475 md_size_sect = ALIGN(md_size_sect, 8); 476 477 /* plus the "drbd meta data super block", 478 * and the activity log; */ 479 md_size_sect += MD_BM_OFFSET; 480 481 bdev->md.md_size_sect = md_size_sect; 482 /* bitmap offset is adjusted by 'super' block size */ 483 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 484 break; 485 } 486} 487 488char *ppsize(char *buf, unsigned long long size) 489{ 490 /* Needs 9 bytes at max. */ 491 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 492 int base = 0; 493 while (size >= 10000) { 494 /* shift + round */ 495 size = (size >> 10) + !!(size & (1<<9)); 496 base++; 497 } 498 sprintf(buf, "%lu %cB", (long)size, units[base]); 499 500 return buf; 501} 502 503/* there is still a theoretical deadlock when called from receiver 504 * on an D_INCONSISTENT R_PRIMARY: 505 * remote READ does inc_ap_bio, receiver would need to receive answer 506 * packet from remote to dec_ap_bio again. 507 * receiver receive_sizes(), comes here, 508 * waits for ap_bio_cnt == 0. -> deadlock. 509 * but this cannot happen, actually, because: 510 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 511 * (not connected, or bad/no disk on peer): 512 * see drbd_fail_request_early, ap_bio_cnt is zero. 513 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 514 * peer may not initiate a resize. 515 */ 516void drbd_suspend_io(struct drbd_conf *mdev) 517{ 518 set_bit(SUSPEND_IO, &mdev->flags); 519 if (mdev->state.susp) 520 return; 521 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 522} 523 524void drbd_resume_io(struct drbd_conf *mdev) 525{ 526 clear_bit(SUSPEND_IO, &mdev->flags); 527 wake_up(&mdev->misc_wait); 528} 529 530/** 531 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 532 * @mdev: DRBD device. 533 * 534 * Returns 0 on success, negative return values indicate errors. 535 * You should call drbd_md_sync() after calling this function. 536 */ 537enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 538{ 539 sector_t prev_first_sect, prev_size; /* previous meta location */ 540 sector_t la_size; 541 sector_t size; 542 char ppb[10]; 543 544 int md_moved, la_size_changed; 545 enum determine_dev_size rv = unchanged; 546 547 /* race: 548 * application request passes inc_ap_bio, 549 * but then cannot get an AL-reference. 550 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 551 * 552 * to avoid that: 553 * Suspend IO right here. 554 * still lock the act_log to not trigger ASSERTs there. 555 */ 556 drbd_suspend_io(mdev); 557 558 /* no wait necessary anymore, actually we could assert that */ 559 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 560 561 prev_first_sect = drbd_md_first_sector(mdev->ldev); 562 prev_size = mdev->ldev->md.md_size_sect; 563 la_size = mdev->ldev->md.la_size_sect; 564 565 /* TODO: should only be some assert here, not (re)init... */ 566 drbd_md_set_sector_offsets(mdev, mdev->ldev); 567 568 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); 569 570 if (drbd_get_capacity(mdev->this_bdev) != size || 571 drbd_bm_capacity(mdev) != size) { 572 int err; 573 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); 574 if (unlikely(err)) { 575 /* currently there is only one error: ENOMEM! */ 576 size = drbd_bm_capacity(mdev)>>1; 577 if (size == 0) { 578 dev_err(DEV, "OUT OF MEMORY! " 579 "Could not allocate bitmap!\n"); 580 } else { 581 dev_err(DEV, "BM resizing failed. " 582 "Leaving size unchanged at size = %lu KB\n", 583 (unsigned long)size); 584 } 585 rv = dev_size_error; 586 } 587 /* racy, see comments above. */ 588 drbd_set_my_capacity(mdev, size); 589 mdev->ldev->md.la_size_sect = size; 590 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 591 (unsigned long long)size>>1); 592 } 593 if (rv == dev_size_error) 594 goto out; 595 596 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 597 598 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 599 || prev_size != mdev->ldev->md.md_size_sect; 600 601 if (la_size_changed || md_moved) { 602 drbd_al_shrink(mdev); /* All extents inactive. */ 603 dev_info(DEV, "Writing the whole bitmap, %s\n", 604 la_size_changed && md_moved ? "size changed and md moved" : 605 la_size_changed ? "size changed" : "md moved"); 606 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 607 drbd_md_mark_dirty(mdev); 608 } 609 610 if (size > la_size) 611 rv = grew; 612 if (size < la_size) 613 rv = shrunk; 614out: 615 lc_unlock(mdev->act_log); 616 wake_up(&mdev->al_wait); 617 drbd_resume_io(mdev); 618 619 return rv; 620} 621 622sector_t 623drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 624{ 625 sector_t p_size = mdev->p_size; /* partner's disk size. */ 626 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 627 sector_t m_size; /* my size */ 628 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 629 sector_t size = 0; 630 631 m_size = drbd_get_max_capacity(bdev); 632 633 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 634 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 635 p_size = m_size; 636 } 637 638 if (p_size && m_size) { 639 size = min_t(sector_t, p_size, m_size); 640 } else { 641 if (la_size) { 642 size = la_size; 643 if (m_size && m_size < size) 644 size = m_size; 645 if (p_size && p_size < size) 646 size = p_size; 647 } else { 648 if (m_size) 649 size = m_size; 650 if (p_size) 651 size = p_size; 652 } 653 } 654 655 if (size == 0) 656 dev_err(DEV, "Both nodes diskless!\n"); 657 658 if (u_size) { 659 if (u_size > size) 660 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 661 (unsigned long)u_size>>1, (unsigned long)size>>1); 662 else 663 size = u_size; 664 } 665 666 return size; 667} 668 669/** 670 * drbd_check_al_size() - Ensures that the AL is of the right size 671 * @mdev: DRBD device. 672 * 673 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 674 * failed, and 0 on success. You should call drbd_md_sync() after you called 675 * this function. 676 */ 677static int drbd_check_al_size(struct drbd_conf *mdev) 678{ 679 struct lru_cache *n, *t; 680 struct lc_element *e; 681 unsigned int in_use; 682 int i; 683 684 ERR_IF(mdev->sync_conf.al_extents < 7) 685 mdev->sync_conf.al_extents = 127; 686 687 if (mdev->act_log && 688 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 689 return 0; 690 691 in_use = 0; 692 t = mdev->act_log; 693 n = lc_create("act_log", drbd_al_ext_cache, 694 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 695 696 if (n == NULL) { 697 dev_err(DEV, "Cannot allocate act_log lru!\n"); 698 return -ENOMEM; 699 } 700 spin_lock_irq(&mdev->al_lock); 701 if (t) { 702 for (i = 0; i < t->nr_elements; i++) { 703 e = lc_element_by_index(t, i); 704 if (e->refcnt) 705 dev_err(DEV, "refcnt(%d)==%d\n", 706 e->lc_number, e->refcnt); 707 in_use += e->refcnt; 708 } 709 } 710 if (!in_use) 711 mdev->act_log = n; 712 spin_unlock_irq(&mdev->al_lock); 713 if (in_use) { 714 dev_err(DEV, "Activity log still in use!\n"); 715 lc_destroy(n); 716 return -EBUSY; 717 } else { 718 if (t) 719 lc_destroy(t); 720 } 721 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 722 return 0; 723} 724 725void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 726{ 727 struct request_queue * const q = mdev->rq_queue; 728 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 729 int max_segments = mdev->ldev->dc.max_bio_bvecs; 730 731 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 732 733 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 734 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 735 blk_queue_max_segment_size(q, max_seg_s); 736 blk_queue_logical_block_size(q, 512); 737 blk_queue_segment_boundary(q, PAGE_SIZE-1); 738 blk_stack_limits(&q->limits, &b->limits, 0); 739 740 if (b->merge_bvec_fn) 741 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 742 b->merge_bvec_fn); 743 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 744 745 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 746 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 747 q->backing_dev_info.ra_pages, 748 b->backing_dev_info.ra_pages); 749 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 750 } 751} 752 753/* serialize deconfig (worker exiting, doing cleanup) 754 * and reconfig (drbdsetup disk, drbdsetup net) 755 * 756 * wait for a potentially exiting worker, then restart it, 757 * or start a new one. 758 */ 759static void drbd_reconfig_start(struct drbd_conf *mdev) 760{ 761 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 762 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 763 drbd_thread_start(&mdev->worker); 764} 765 766/* if still unconfigured, stops worker again. 767 * if configured now, clears CONFIG_PENDING. 768 * wakes potential waiters */ 769static void drbd_reconfig_done(struct drbd_conf *mdev) 770{ 771 spin_lock_irq(&mdev->req_lock); 772 if (mdev->state.disk == D_DISKLESS && 773 mdev->state.conn == C_STANDALONE && 774 mdev->state.role == R_SECONDARY) { 775 set_bit(DEVICE_DYING, &mdev->flags); 776 drbd_thread_stop_nowait(&mdev->worker); 777 } else 778 clear_bit(CONFIG_PENDING, &mdev->flags); 779 spin_unlock_irq(&mdev->req_lock); 780 wake_up(&mdev->state_wait); 781} 782 783/* does always return 0; 784 * interesting return code is in reply->ret_code */ 785static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 786 struct drbd_nl_cfg_reply *reply) 787{ 788 enum drbd_ret_codes retcode; 789 enum determine_dev_size dd; 790 sector_t max_possible_sectors; 791 sector_t min_md_device_sectors; 792 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 793 struct inode *inode, *inode2; 794 struct lru_cache *resync_lru = NULL; 795 union drbd_state ns, os; 796 int rv; 797 int cp_discovered = 0; 798 int logical_block_size; 799 800 drbd_reconfig_start(mdev); 801 802 /* if you want to reconfigure, please tear down first */ 803 if (mdev->state.disk > D_DISKLESS) { 804 retcode = ERR_DISK_CONFIGURED; 805 goto fail; 806 } 807 808 /* allocation not in the IO path, cqueue thread context */ 809 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 810 if (!nbc) { 811 retcode = ERR_NOMEM; 812 goto fail; 813 } 814 815 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 816 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 817 nbc->dc.fencing = DRBD_FENCING_DEF; 818 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 819 820 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 821 retcode = ERR_MANDATORY_TAG; 822 goto fail; 823 } 824 825 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 826 retcode = ERR_MD_IDX_INVALID; 827 goto fail; 828 } 829 830 if (get_net_conf(mdev)) { 831 int prot = mdev->net_conf->wire_protocol; 832 put_net_conf(mdev); 833 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) { 834 retcode = ERR_STONITH_AND_PROT_A; 835 goto fail; 836 } 837 } 838 839 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 840 if (IS_ERR(nbc->lo_file)) { 841 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 842 PTR_ERR(nbc->lo_file)); 843 nbc->lo_file = NULL; 844 retcode = ERR_OPEN_DISK; 845 goto fail; 846 } 847 848 inode = nbc->lo_file->f_dentry->d_inode; 849 850 if (!S_ISBLK(inode->i_mode)) { 851 retcode = ERR_DISK_NOT_BDEV; 852 goto fail; 853 } 854 855 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 856 if (IS_ERR(nbc->md_file)) { 857 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 858 PTR_ERR(nbc->md_file)); 859 nbc->md_file = NULL; 860 retcode = ERR_OPEN_MD_DISK; 861 goto fail; 862 } 863 864 inode2 = nbc->md_file->f_dentry->d_inode; 865 866 if (!S_ISBLK(inode2->i_mode)) { 867 retcode = ERR_MD_NOT_BDEV; 868 goto fail; 869 } 870 871 nbc->backing_bdev = inode->i_bdev; 872 if (bd_claim(nbc->backing_bdev, mdev)) { 873 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 874 nbc->backing_bdev, mdev, 875 nbc->backing_bdev->bd_holder, 876 nbc->backing_bdev->bd_contains->bd_holder, 877 nbc->backing_bdev->bd_holders); 878 retcode = ERR_BDCLAIM_DISK; 879 goto fail; 880 } 881 882 resync_lru = lc_create("resync", drbd_bm_ext_cache, 883 61, sizeof(struct bm_extent), 884 offsetof(struct bm_extent, lce)); 885 if (!resync_lru) { 886 retcode = ERR_NOMEM; 887 goto release_bdev_fail; 888 } 889 890 /* meta_dev_idx >= 0: external fixed size, 891 * possibly multiple drbd sharing one meta device. 892 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 893 * not yet used by some other drbd minor! 894 * (if you use drbd.conf + drbdadm, 895 * that should check it for you already; but if you don't, or someone 896 * fooled it, we need to double check here) */ 897 nbc->md_bdev = inode2->i_bdev; 898 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 899 : (void *) drbd_m_holder)) { 900 retcode = ERR_BDCLAIM_MD_DISK; 901 goto release_bdev_fail; 902 } 903 904 if ((nbc->backing_bdev == nbc->md_bdev) != 905 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 906 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 907 retcode = ERR_MD_IDX_INVALID; 908 goto release_bdev2_fail; 909 } 910 911 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 912 drbd_md_set_sector_offsets(mdev, nbc); 913 914 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 915 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 916 (unsigned long long) drbd_get_max_capacity(nbc), 917 (unsigned long long) nbc->dc.disk_size); 918 retcode = ERR_DISK_TO_SMALL; 919 goto release_bdev2_fail; 920 } 921 922 if (nbc->dc.meta_dev_idx < 0) { 923 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 924 /* at least one MB, otherwise it does not make sense */ 925 min_md_device_sectors = (2<<10); 926 } else { 927 max_possible_sectors = DRBD_MAX_SECTORS; 928 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 929 } 930 931 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 932 retcode = ERR_MD_DISK_TO_SMALL; 933 dev_warn(DEV, "refusing attach: md-device too small, " 934 "at least %llu sectors needed for this meta-disk type\n", 935 (unsigned long long) min_md_device_sectors); 936 goto release_bdev2_fail; 937 } 938 939 /* Make sure the new disk is big enough 940 * (we may currently be R_PRIMARY with no local disk...) */ 941 if (drbd_get_max_capacity(nbc) < 942 drbd_get_capacity(mdev->this_bdev)) { 943 retcode = ERR_DISK_TO_SMALL; 944 goto release_bdev2_fail; 945 } 946 947 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 948 949 if (nbc->known_size > max_possible_sectors) { 950 dev_warn(DEV, "==> truncating very big lower level device " 951 "to currently maximum possible %llu sectors <==\n", 952 (unsigned long long) max_possible_sectors); 953 if (nbc->dc.meta_dev_idx >= 0) 954 dev_warn(DEV, "==>> using internal or flexible " 955 "meta data may help <<==\n"); 956 } 957 958 drbd_suspend_io(mdev); 959 /* also wait for the last barrier ack. */ 960 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); 961 /* and for any other previously queued work */ 962 drbd_flush_workqueue(mdev); 963 964 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 965 drbd_resume_io(mdev); 966 if (retcode < SS_SUCCESS) 967 goto release_bdev2_fail; 968 969 if (!get_ldev_if_state(mdev, D_ATTACHING)) 970 goto force_diskless; 971 972 drbd_md_set_sector_offsets(mdev, nbc); 973 974 /* allocate a second IO page if logical_block_size != 512 */ 975 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 976 if (logical_block_size == 0) 977 logical_block_size = MD_SECTOR_SIZE; 978 979 if (logical_block_size != MD_SECTOR_SIZE) { 980 if (!mdev->md_io_tmpp) { 981 struct page *page = alloc_page(GFP_NOIO); 982 if (!page) 983 goto force_diskless_dec; 984 985 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 986 logical_block_size, MD_SECTOR_SIZE); 987 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 988 989 mdev->md_io_tmpp = page; 990 } 991 } 992 993 if (!mdev->bitmap) { 994 if (drbd_bm_init(mdev)) { 995 retcode = ERR_NOMEM; 996 goto force_diskless_dec; 997 } 998 } 999 1000 retcode = drbd_md_read(mdev, nbc); 1001 if (retcode != NO_ERROR) 1002 goto force_diskless_dec; 1003 1004 if (mdev->state.conn < C_CONNECTED && 1005 mdev->state.role == R_PRIMARY && 1006 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 1007 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 1008 (unsigned long long)mdev->ed_uuid); 1009 retcode = ERR_DATA_NOT_CURRENT; 1010 goto force_diskless_dec; 1011 } 1012 1013 /* Since we are diskless, fix the activity log first... */ 1014 if (drbd_check_al_size(mdev)) { 1015 retcode = ERR_NOMEM; 1016 goto force_diskless_dec; 1017 } 1018 1019 /* Prevent shrinking of consistent devices ! */ 1020 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1021 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 1022 dev_warn(DEV, "refusing to truncate a consistent device\n"); 1023 retcode = ERR_DISK_TO_SMALL; 1024 goto force_diskless_dec; 1025 } 1026 1027 if (!drbd_al_read_log(mdev, nbc)) { 1028 retcode = ERR_IO_MD_DISK; 1029 goto force_diskless_dec; 1030 } 1031 1032 /* Reset the "barriers don't work" bits here, then force meta data to 1033 * be written, to ensure we determine if barriers are supported. */ 1034 if (nbc->dc.no_md_flush) 1035 set_bit(MD_NO_BARRIER, &mdev->flags); 1036 else 1037 clear_bit(MD_NO_BARRIER, &mdev->flags); 1038 1039 /* Point of no return reached. 1040 * Devices and memory are no longer released by error cleanup below. 1041 * now mdev takes over responsibility, and the state engine should 1042 * clean it up somewhere. */ 1043 D_ASSERT(mdev->ldev == NULL); 1044 mdev->ldev = nbc; 1045 mdev->resync = resync_lru; 1046 nbc = NULL; 1047 resync_lru = NULL; 1048 1049 mdev->write_ordering = WO_bio_barrier; 1050 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1051 1052 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1053 set_bit(CRASHED_PRIMARY, &mdev->flags); 1054 else 1055 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1056 1057 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1058 !(mdev->state.role == R_PRIMARY && mdev->state.susp && 1059 mdev->sync_conf.on_no_data == OND_SUSPEND_IO)) { 1060 set_bit(CRASHED_PRIMARY, &mdev->flags); 1061 cp_discovered = 1; 1062 } 1063 1064 mdev->send_cnt = 0; 1065 mdev->recv_cnt = 0; 1066 mdev->read_cnt = 0; 1067 mdev->writ_cnt = 0; 1068 1069 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); 1070 1071 /* If I am currently not R_PRIMARY, 1072 * but meta data primary indicator is set, 1073 * I just now recover from a hard crash, 1074 * and have been R_PRIMARY before that crash. 1075 * 1076 * Now, if I had no connection before that crash 1077 * (have been degraded R_PRIMARY), chances are that 1078 * I won't find my peer now either. 1079 * 1080 * In that case, and _only_ in that case, 1081 * we use the degr-wfc-timeout instead of the default, 1082 * so we can automatically recover from a crash of a 1083 * degraded but active "cluster" after a certain timeout. 1084 */ 1085 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1086 if (mdev->state.role != R_PRIMARY && 1087 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1088 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1089 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1090 1091 dd = drbd_determin_dev_size(mdev, 0); 1092 if (dd == dev_size_error) { 1093 retcode = ERR_NOMEM_BITMAP; 1094 goto force_diskless_dec; 1095 } else if (dd == grew) 1096 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1097 1098 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1099 dev_info(DEV, "Assuming that all blocks are out of sync " 1100 "(aka FullSync)\n"); 1101 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1102 retcode = ERR_IO_MD_DISK; 1103 goto force_diskless_dec; 1104 } 1105 } else { 1106 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1107 retcode = ERR_IO_MD_DISK; 1108 goto force_diskless_dec; 1109 } 1110 } 1111 1112 if (cp_discovered) { 1113 drbd_al_apply_to_bm(mdev); 1114 drbd_al_to_on_disk_bm(mdev); 1115 } 1116 1117 spin_lock_irq(&mdev->req_lock); 1118 os = mdev->state; 1119 ns.i = os.i; 1120 /* If MDF_CONSISTENT is not set go into inconsistent state, 1121 otherwise investigate MDF_WasUpToDate... 1122 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1123 otherwise into D_CONSISTENT state. 1124 */ 1125 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1126 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1127 ns.disk = D_CONSISTENT; 1128 else 1129 ns.disk = D_OUTDATED; 1130 } else { 1131 ns.disk = D_INCONSISTENT; 1132 } 1133 1134 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1135 ns.pdsk = D_OUTDATED; 1136 1137 if ( ns.disk == D_CONSISTENT && 1138 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1139 ns.disk = D_UP_TO_DATE; 1140 1141 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1142 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1143 this point, because drbd_request_state() modifies these 1144 flags. */ 1145 1146 /* In case we are C_CONNECTED postpone any decision on the new disk 1147 state after the negotiation phase. */ 1148 if (mdev->state.conn == C_CONNECTED) { 1149 mdev->new_state_tmp.i = ns.i; 1150 ns.i = os.i; 1151 ns.disk = D_NEGOTIATING; 1152 1153 /* We expect to receive up-to-date UUIDs soon. 1154 To avoid a race in receive_state, free p_uuid while 1155 holding req_lock. I.e. atomic with the state change */ 1156 kfree(mdev->p_uuid); 1157 mdev->p_uuid = NULL; 1158 } 1159 1160 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1161 ns = mdev->state; 1162 spin_unlock_irq(&mdev->req_lock); 1163 1164 if (rv < SS_SUCCESS) 1165 goto force_diskless_dec; 1166 1167 if (mdev->state.role == R_PRIMARY) 1168 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1169 else 1170 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1171 1172 drbd_md_mark_dirty(mdev); 1173 drbd_md_sync(mdev); 1174 1175 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1176 put_ldev(mdev); 1177 reply->ret_code = retcode; 1178 drbd_reconfig_done(mdev); 1179 return 0; 1180 1181 force_diskless_dec: 1182 put_ldev(mdev); 1183 force_diskless: 1184 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1185 drbd_md_sync(mdev); 1186 release_bdev2_fail: 1187 if (nbc) 1188 bd_release(nbc->md_bdev); 1189 release_bdev_fail: 1190 if (nbc) 1191 bd_release(nbc->backing_bdev); 1192 fail: 1193 if (nbc) { 1194 if (nbc->lo_file) 1195 fput(nbc->lo_file); 1196 if (nbc->md_file) 1197 fput(nbc->md_file); 1198 kfree(nbc); 1199 } 1200 lc_destroy(resync_lru); 1201 1202 reply->ret_code = retcode; 1203 drbd_reconfig_done(mdev); 1204 return 0; 1205} 1206 1207static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1208 struct drbd_nl_cfg_reply *reply) 1209{ 1210 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1211 return 0; 1212} 1213 1214static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1215 struct drbd_nl_cfg_reply *reply) 1216{ 1217 int i, ns; 1218 enum drbd_ret_codes retcode; 1219 struct net_conf *new_conf = NULL; 1220 struct crypto_hash *tfm = NULL; 1221 struct crypto_hash *integrity_w_tfm = NULL; 1222 struct crypto_hash *integrity_r_tfm = NULL; 1223 struct hlist_head *new_tl_hash = NULL; 1224 struct hlist_head *new_ee_hash = NULL; 1225 struct drbd_conf *odev; 1226 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1227 void *int_dig_out = NULL; 1228 void *int_dig_in = NULL; 1229 void *int_dig_vv = NULL; 1230 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1231 1232 drbd_reconfig_start(mdev); 1233 1234 if (mdev->state.conn > C_STANDALONE) { 1235 retcode = ERR_NET_CONFIGURED; 1236 goto fail; 1237 } 1238 1239 /* allocation not in the IO path, cqueue thread context */ 1240 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1241 if (!new_conf) { 1242 retcode = ERR_NOMEM; 1243 goto fail; 1244 } 1245 1246 new_conf->timeout = DRBD_TIMEOUT_DEF; 1247 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1248 new_conf->ping_int = DRBD_PING_INT_DEF; 1249 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1250 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1251 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1252 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1253 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1254 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1255 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1256 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1257 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1258 new_conf->want_lose = 0; 1259 new_conf->two_primaries = 0; 1260 new_conf->wire_protocol = DRBD_PROT_C; 1261 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1262 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1263 1264 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1265 retcode = ERR_MANDATORY_TAG; 1266 goto fail; 1267 } 1268 1269 if (new_conf->two_primaries 1270 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1271 retcode = ERR_NOT_PROTO_C; 1272 goto fail; 1273 } 1274 1275 if (get_ldev(mdev)) { 1276 enum drbd_fencing_p fp = mdev->ldev->dc.fencing; 1277 put_ldev(mdev); 1278 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) { 1279 retcode = ERR_STONITH_AND_PROT_A; 1280 goto fail; 1281 } 1282 } 1283 1284 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1285 retcode = ERR_DISCARD; 1286 goto fail; 1287 } 1288 1289 retcode = NO_ERROR; 1290 1291 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1292 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1293 for (i = 0; i < minor_count; i++) { 1294 odev = minor_to_mdev(i); 1295 if (!odev || odev == mdev) 1296 continue; 1297 if (get_net_conf(odev)) { 1298 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1299 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1300 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1301 retcode = ERR_LOCAL_ADDR; 1302 1303 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1304 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1305 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1306 retcode = ERR_PEER_ADDR; 1307 1308 put_net_conf(odev); 1309 if (retcode != NO_ERROR) 1310 goto fail; 1311 } 1312 } 1313 1314 if (new_conf->cram_hmac_alg[0] != 0) { 1315 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1316 new_conf->cram_hmac_alg); 1317 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1318 if (IS_ERR(tfm)) { 1319 tfm = NULL; 1320 retcode = ERR_AUTH_ALG; 1321 goto fail; 1322 } 1323 1324 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1325 retcode = ERR_AUTH_ALG_ND; 1326 goto fail; 1327 } 1328 } 1329 1330 if (new_conf->integrity_alg[0]) { 1331 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1332 if (IS_ERR(integrity_w_tfm)) { 1333 integrity_w_tfm = NULL; 1334 retcode=ERR_INTEGRITY_ALG; 1335 goto fail; 1336 } 1337 1338 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1339 retcode=ERR_INTEGRITY_ALG_ND; 1340 goto fail; 1341 } 1342 1343 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1344 if (IS_ERR(integrity_r_tfm)) { 1345 integrity_r_tfm = NULL; 1346 retcode=ERR_INTEGRITY_ALG; 1347 goto fail; 1348 } 1349 } 1350 1351 ns = new_conf->max_epoch_size/8; 1352 if (mdev->tl_hash_s != ns) { 1353 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1354 if (!new_tl_hash) { 1355 retcode = ERR_NOMEM; 1356 goto fail; 1357 } 1358 } 1359 1360 ns = new_conf->max_buffers/8; 1361 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1362 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1363 if (!new_ee_hash) { 1364 retcode = ERR_NOMEM; 1365 goto fail; 1366 } 1367 } 1368 1369 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1370 1371 if (integrity_w_tfm) { 1372 i = crypto_hash_digestsize(integrity_w_tfm); 1373 int_dig_out = kmalloc(i, GFP_KERNEL); 1374 if (!int_dig_out) { 1375 retcode = ERR_NOMEM; 1376 goto fail; 1377 } 1378 int_dig_in = kmalloc(i, GFP_KERNEL); 1379 if (!int_dig_in) { 1380 retcode = ERR_NOMEM; 1381 goto fail; 1382 } 1383 int_dig_vv = kmalloc(i, GFP_KERNEL); 1384 if (!int_dig_vv) { 1385 retcode = ERR_NOMEM; 1386 goto fail; 1387 } 1388 } 1389 1390 if (!mdev->bitmap) { 1391 if(drbd_bm_init(mdev)) { 1392 retcode = ERR_NOMEM; 1393 goto fail; 1394 } 1395 } 1396 1397 spin_lock_irq(&mdev->req_lock); 1398 if (mdev->net_conf != NULL) { 1399 retcode = ERR_NET_CONFIGURED; 1400 spin_unlock_irq(&mdev->req_lock); 1401 goto fail; 1402 } 1403 mdev->net_conf = new_conf; 1404 1405 mdev->send_cnt = 0; 1406 mdev->recv_cnt = 0; 1407 1408 if (new_tl_hash) { 1409 kfree(mdev->tl_hash); 1410 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1411 mdev->tl_hash = new_tl_hash; 1412 } 1413 1414 if (new_ee_hash) { 1415 kfree(mdev->ee_hash); 1416 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1417 mdev->ee_hash = new_ee_hash; 1418 } 1419 1420 crypto_free_hash(mdev->cram_hmac_tfm); 1421 mdev->cram_hmac_tfm = tfm; 1422 1423 crypto_free_hash(mdev->integrity_w_tfm); 1424 mdev->integrity_w_tfm = integrity_w_tfm; 1425 1426 crypto_free_hash(mdev->integrity_r_tfm); 1427 mdev->integrity_r_tfm = integrity_r_tfm; 1428 1429 kfree(mdev->int_dig_out); 1430 kfree(mdev->int_dig_in); 1431 kfree(mdev->int_dig_vv); 1432 mdev->int_dig_out=int_dig_out; 1433 mdev->int_dig_in=int_dig_in; 1434 mdev->int_dig_vv=int_dig_vv; 1435 spin_unlock_irq(&mdev->req_lock); 1436 1437 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); 1438 1439 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1440 reply->ret_code = retcode; 1441 drbd_reconfig_done(mdev); 1442 return 0; 1443 1444fail: 1445 kfree(int_dig_out); 1446 kfree(int_dig_in); 1447 kfree(int_dig_vv); 1448 crypto_free_hash(tfm); 1449 crypto_free_hash(integrity_w_tfm); 1450 crypto_free_hash(integrity_r_tfm); 1451 kfree(new_tl_hash); 1452 kfree(new_ee_hash); 1453 kfree(new_conf); 1454 1455 reply->ret_code = retcode; 1456 drbd_reconfig_done(mdev); 1457 return 0; 1458} 1459 1460static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1461 struct drbd_nl_cfg_reply *reply) 1462{ 1463 int retcode; 1464 1465 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1466 1467 if (retcode == SS_NOTHING_TO_DO) 1468 goto done; 1469 else if (retcode == SS_ALREADY_STANDALONE) 1470 goto done; 1471 else if (retcode == SS_PRIMARY_NOP) { 1472 /* Our statche checking code wants to see the peer outdated. */ 1473 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1474 pdsk, D_OUTDATED)); 1475 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1476 /* The peer probably wants to see us outdated. */ 1477 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1478 disk, D_OUTDATED), 1479 CS_ORDERED); 1480 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1481 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1482 retcode = SS_SUCCESS; 1483 } 1484 } 1485 1486 if (retcode < SS_SUCCESS) 1487 goto fail; 1488 1489 if (wait_event_interruptible(mdev->state_wait, 1490 mdev->state.conn != C_DISCONNECTING)) { 1491 /* Do not test for mdev->state.conn == C_STANDALONE, since 1492 someone else might connect us in the mean time! */ 1493 retcode = ERR_INTR; 1494 goto fail; 1495 } 1496 1497 done: 1498 retcode = NO_ERROR; 1499 fail: 1500 drbd_md_sync(mdev); 1501 reply->ret_code = retcode; 1502 return 0; 1503} 1504 1505void resync_after_online_grow(struct drbd_conf *mdev) 1506{ 1507 int iass; /* I am sync source */ 1508 1509 dev_info(DEV, "Resync of new storage after online grow\n"); 1510 if (mdev->state.role != mdev->state.peer) 1511 iass = (mdev->state.role == R_PRIMARY); 1512 else 1513 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1514 1515 if (iass) 1516 drbd_start_resync(mdev, C_SYNC_SOURCE); 1517 else 1518 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1519} 1520 1521static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1522 struct drbd_nl_cfg_reply *reply) 1523{ 1524 struct resize rs; 1525 int retcode = NO_ERROR; 1526 enum determine_dev_size dd; 1527 enum dds_flags ddsf; 1528 1529 memset(&rs, 0, sizeof(struct resize)); 1530 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1531 retcode = ERR_MANDATORY_TAG; 1532 goto fail; 1533 } 1534 1535 if (mdev->state.conn > C_CONNECTED) { 1536 retcode = ERR_RESIZE_RESYNC; 1537 goto fail; 1538 } 1539 1540 if (mdev->state.role == R_SECONDARY && 1541 mdev->state.peer == R_SECONDARY) { 1542 retcode = ERR_NO_PRIMARY; 1543 goto fail; 1544 } 1545 1546 if (!get_ldev(mdev)) { 1547 retcode = ERR_NO_DISK; 1548 goto fail; 1549 } 1550 1551 if (rs.no_resync && mdev->agreed_pro_version < 93) { 1552 retcode = ERR_NEED_APV_93; 1553 goto fail; 1554 } 1555 1556 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 1557 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1558 1559 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1560 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1561 dd = drbd_determin_dev_size(mdev, ddsf); 1562 drbd_md_sync(mdev); 1563 put_ldev(mdev); 1564 if (dd == dev_size_error) { 1565 retcode = ERR_NOMEM_BITMAP; 1566 goto fail; 1567 } 1568 1569 if (mdev->state.conn == C_CONNECTED) { 1570 if (dd == grew) 1571 set_bit(RESIZE_PENDING, &mdev->flags); 1572 1573 drbd_send_uuids(mdev); 1574 drbd_send_sizes(mdev, 1, ddsf); 1575 } 1576 1577 fail: 1578 reply->ret_code = retcode; 1579 return 0; 1580} 1581 1582static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1583 struct drbd_nl_cfg_reply *reply) 1584{ 1585 int retcode = NO_ERROR; 1586 int err; 1587 int ovr; /* online verify running */ 1588 int rsr; /* re-sync running */ 1589 struct crypto_hash *verify_tfm = NULL; 1590 struct crypto_hash *csums_tfm = NULL; 1591 struct syncer_conf sc; 1592 cpumask_var_t new_cpu_mask; 1593 1594 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1595 retcode = ERR_NOMEM; 1596 goto fail; 1597 } 1598 1599 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1600 memset(&sc, 0, sizeof(struct syncer_conf)); 1601 sc.rate = DRBD_RATE_DEF; 1602 sc.after = DRBD_AFTER_DEF; 1603 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1604 sc.on_no_data = DRBD_ON_NO_DATA_DEF; 1605 } else 1606 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1607 1608 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1609 retcode = ERR_MANDATORY_TAG; 1610 goto fail; 1611 } 1612 1613 /* re-sync running */ 1614 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1615 mdev->state.conn == C_SYNC_TARGET || 1616 mdev->state.conn == C_PAUSED_SYNC_S || 1617 mdev->state.conn == C_PAUSED_SYNC_T ); 1618 1619 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1620 retcode = ERR_CSUMS_RESYNC_RUNNING; 1621 goto fail; 1622 } 1623 1624 if (!rsr && sc.csums_alg[0]) { 1625 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1626 if (IS_ERR(csums_tfm)) { 1627 csums_tfm = NULL; 1628 retcode = ERR_CSUMS_ALG; 1629 goto fail; 1630 } 1631 1632 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1633 retcode = ERR_CSUMS_ALG_ND; 1634 goto fail; 1635 } 1636 } 1637 1638 /* online verify running */ 1639 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1640 1641 if (ovr) { 1642 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1643 retcode = ERR_VERIFY_RUNNING; 1644 goto fail; 1645 } 1646 } 1647 1648 if (!ovr && sc.verify_alg[0]) { 1649 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1650 if (IS_ERR(verify_tfm)) { 1651 verify_tfm = NULL; 1652 retcode = ERR_VERIFY_ALG; 1653 goto fail; 1654 } 1655 1656 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1657 retcode = ERR_VERIFY_ALG_ND; 1658 goto fail; 1659 } 1660 } 1661 1662 /* silently ignore cpu mask on UP kernel */ 1663 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1664 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1665 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1666 if (err) { 1667 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1668 retcode = ERR_CPU_MASK_PARSE; 1669 goto fail; 1670 } 1671 } 1672 1673 ERR_IF (sc.rate < 1) sc.rate = 1; 1674 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1675#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1676 if (sc.al_extents > AL_MAX) { 1677 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1678 sc.al_extents = AL_MAX; 1679 } 1680#undef AL_MAX 1681 1682 /* most sanity checks done, try to assign the new sync-after 1683 * dependency. need to hold the global lock in there, 1684 * to avoid a race in the dependency loop check. */ 1685 retcode = drbd_alter_sa(mdev, sc.after); 1686 if (retcode != NO_ERROR) 1687 goto fail; 1688 1689 /* ok, assign the rest of it as well. 1690 * lock against receive_SyncParam() */ 1691 spin_lock(&mdev->peer_seq_lock); 1692 mdev->sync_conf = sc; 1693 1694 if (!rsr) { 1695 crypto_free_hash(mdev->csums_tfm); 1696 mdev->csums_tfm = csums_tfm; 1697 csums_tfm = NULL; 1698 } 1699 1700 if (!ovr) { 1701 crypto_free_hash(mdev->verify_tfm); 1702 mdev->verify_tfm = verify_tfm; 1703 verify_tfm = NULL; 1704 } 1705 spin_unlock(&mdev->peer_seq_lock); 1706 1707 if (get_ldev(mdev)) { 1708 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1709 drbd_al_shrink(mdev); 1710 err = drbd_check_al_size(mdev); 1711 lc_unlock(mdev->act_log); 1712 wake_up(&mdev->al_wait); 1713 1714 put_ldev(mdev); 1715 drbd_md_sync(mdev); 1716 1717 if (err) { 1718 retcode = ERR_NOMEM; 1719 goto fail; 1720 } 1721 } 1722 1723 if (mdev->state.conn >= C_CONNECTED) 1724 drbd_send_sync_param(mdev, &sc); 1725 1726 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1727 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1728 drbd_calc_cpu_mask(mdev); 1729 mdev->receiver.reset_cpu_mask = 1; 1730 mdev->asender.reset_cpu_mask = 1; 1731 mdev->worker.reset_cpu_mask = 1; 1732 } 1733 1734 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1735fail: 1736 free_cpumask_var(new_cpu_mask); 1737 crypto_free_hash(csums_tfm); 1738 crypto_free_hash(verify_tfm); 1739 reply->ret_code = retcode; 1740 return 0; 1741} 1742 1743static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1744 struct drbd_nl_cfg_reply *reply) 1745{ 1746 int retcode; 1747 1748 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1749 1750 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1751 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1752 1753 while (retcode == SS_NEED_CONNECTION) { 1754 spin_lock_irq(&mdev->req_lock); 1755 if (mdev->state.conn < C_CONNECTED) 1756 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1757 spin_unlock_irq(&mdev->req_lock); 1758 1759 if (retcode != SS_NEED_CONNECTION) 1760 break; 1761 1762 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1763 } 1764 1765 reply->ret_code = retcode; 1766 return 0; 1767} 1768 1769static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1770 struct drbd_nl_cfg_reply *reply) 1771{ 1772 1773 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1774 1775 return 0; 1776} 1777 1778static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1779 struct drbd_nl_cfg_reply *reply) 1780{ 1781 int retcode = NO_ERROR; 1782 1783 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1784 retcode = ERR_PAUSE_IS_SET; 1785 1786 reply->ret_code = retcode; 1787 return 0; 1788} 1789 1790static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1791 struct drbd_nl_cfg_reply *reply) 1792{ 1793 int retcode = NO_ERROR; 1794 1795 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1796 retcode = ERR_PAUSE_IS_CLEAR; 1797 1798 reply->ret_code = retcode; 1799 return 0; 1800} 1801 1802static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1803 struct drbd_nl_cfg_reply *reply) 1804{ 1805 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1806 1807 return 0; 1808} 1809 1810static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1811 struct drbd_nl_cfg_reply *reply) 1812{ 1813 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1814 drbd_uuid_new_current(mdev); 1815 clear_bit(NEW_CUR_UUID, &mdev->flags); 1816 drbd_md_sync(mdev); 1817 } 1818 drbd_suspend_io(mdev); 1819 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1820 if (reply->ret_code == SS_SUCCESS) { 1821 if (mdev->state.conn < C_CONNECTED) 1822 tl_clear(mdev); 1823 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) 1824 tl_restart(mdev, fail_frozen_disk_io); 1825 } 1826 drbd_resume_io(mdev); 1827 1828 return 0; 1829} 1830 1831static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1832 struct drbd_nl_cfg_reply *reply) 1833{ 1834 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1835 return 0; 1836} 1837 1838static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1839 struct drbd_nl_cfg_reply *reply) 1840{ 1841 unsigned short *tl; 1842 1843 tl = reply->tag_list; 1844 1845 if (get_ldev(mdev)) { 1846 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1847 put_ldev(mdev); 1848 } 1849 1850 if (get_net_conf(mdev)) { 1851 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1852 put_net_conf(mdev); 1853 } 1854 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1855 1856 put_unaligned(TT_END, tl++); /* Close the tag list */ 1857 1858 return (int)((char *)tl - (char *)reply->tag_list); 1859} 1860 1861static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1862 struct drbd_nl_cfg_reply *reply) 1863{ 1864 unsigned short *tl = reply->tag_list; 1865 union drbd_state s = mdev->state; 1866 unsigned long rs_left; 1867 unsigned int res; 1868 1869 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 1870 1871 /* no local ref, no bitmap, no syncer progress. */ 1872 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 1873 if (get_ldev(mdev)) { 1874 drbd_get_syncer_progress(mdev, &rs_left, &res); 1875 tl = tl_add_int(tl, T_sync_progress, &res); 1876 put_ldev(mdev); 1877 } 1878 } 1879 put_unaligned(TT_END, tl++); /* Close the tag list */ 1880 1881 return (int)((char *)tl - (char *)reply->tag_list); 1882} 1883 1884static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1885 struct drbd_nl_cfg_reply *reply) 1886{ 1887 unsigned short *tl; 1888 1889 tl = reply->tag_list; 1890 1891 if (get_ldev(mdev)) { 1892 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 1893 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 1894 put_ldev(mdev); 1895 } 1896 put_unaligned(TT_END, tl++); /* Close the tag list */ 1897 1898 return (int)((char *)tl - (char *)reply->tag_list); 1899} 1900 1901/** 1902 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 1903 * @mdev: DRBD device. 1904 * @nlp: Netlink/connector packet from drbdsetup 1905 * @reply: Reply packet for drbdsetup 1906 */ 1907static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1908 struct drbd_nl_cfg_reply *reply) 1909{ 1910 unsigned short *tl; 1911 char rv; 1912 1913 tl = reply->tag_list; 1914 1915 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 1916 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 1917 1918 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 1919 put_unaligned(TT_END, tl++); /* Close the tag list */ 1920 1921 return (int)((char *)tl - (char *)reply->tag_list); 1922} 1923 1924static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1925 struct drbd_nl_cfg_reply *reply) 1926{ 1927 /* default to resume from last known position, if possible */ 1928 struct start_ov args = 1929 { .start_sector = mdev->ov_start_sector }; 1930 1931 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 1932 reply->ret_code = ERR_MANDATORY_TAG; 1933 return 0; 1934 } 1935 /* w_make_ov_request expects position to be aligned */ 1936 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 1937 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 1938 return 0; 1939} 1940 1941 1942static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1943 struct drbd_nl_cfg_reply *reply) 1944{ 1945 int retcode = NO_ERROR; 1946 int skip_initial_sync = 0; 1947 int err; 1948 1949 struct new_c_uuid args; 1950 1951 memset(&args, 0, sizeof(struct new_c_uuid)); 1952 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 1953 reply->ret_code = ERR_MANDATORY_TAG; 1954 return 0; 1955 } 1956 1957 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 1958 1959 if (!get_ldev(mdev)) { 1960 retcode = ERR_NO_DISK; 1961 goto out; 1962 } 1963 1964 /* this is "skip initial sync", assume to be clean */ 1965 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 1966 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 1967 dev_info(DEV, "Preparing to skip initial sync\n"); 1968 skip_initial_sync = 1; 1969 } else if (mdev->state.conn != C_STANDALONE) { 1970 retcode = ERR_CONNECTED; 1971 goto out_dec; 1972 } 1973 1974 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 1975 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 1976 1977 if (args.clear_bm) { 1978 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 1979 if (err) { 1980 dev_err(DEV, "Writing bitmap failed with %d\n",err); 1981 retcode = ERR_IO_MD_DISK; 1982 } 1983 if (skip_initial_sync) { 1984 drbd_send_uuids_skip_initial_sync(mdev); 1985 _drbd_uuid_set(mdev, UI_BITMAP, 0); 1986 spin_lock_irq(&mdev->req_lock); 1987 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 1988 CS_VERBOSE, NULL); 1989 spin_unlock_irq(&mdev->req_lock); 1990 } 1991 } 1992 1993 drbd_md_sync(mdev); 1994out_dec: 1995 put_ldev(mdev); 1996out: 1997 mutex_unlock(&mdev->state_mutex); 1998 1999 reply->ret_code = retcode; 2000 return 0; 2001} 2002 2003static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) 2004{ 2005 struct drbd_conf *mdev; 2006 2007 if (nlp->drbd_minor >= minor_count) 2008 return NULL; 2009 2010 mdev = minor_to_mdev(nlp->drbd_minor); 2011 2012 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { 2013 struct gendisk *disk = NULL; 2014 mdev = drbd_new_device(nlp->drbd_minor); 2015 2016 spin_lock_irq(&drbd_pp_lock); 2017 if (minor_table[nlp->drbd_minor] == NULL) { 2018 minor_table[nlp->drbd_minor] = mdev; 2019 disk = mdev->vdisk; 2020 mdev = NULL; 2021 } /* else: we lost the race */ 2022 spin_unlock_irq(&drbd_pp_lock); 2023 2024 if (disk) /* we won the race above */ 2025 /* in case we ever add a drbd_delete_device(), 2026 * don't forget the del_gendisk! */ 2027 add_disk(disk); 2028 else /* we lost the race above */ 2029 drbd_free_mdev(mdev); 2030 2031 mdev = minor_to_mdev(nlp->drbd_minor); 2032 } 2033 2034 return mdev; 2035} 2036 2037struct cn_handler_struct { 2038 int (*function)(struct drbd_conf *, 2039 struct drbd_nl_cfg_req *, 2040 struct drbd_nl_cfg_reply *); 2041 int reply_body_size; 2042}; 2043 2044static struct cn_handler_struct cnd_table[] = { 2045 [ P_primary ] = { &drbd_nl_primary, 0 }, 2046 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 2047 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 2048 [ P_detach ] = { &drbd_nl_detach, 0 }, 2049 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 2050 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 2051 [ P_resize ] = { &drbd_nl_resize, 0 }, 2052 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 2053 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 2054 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 2055 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 2056 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 2057 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 2058 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 2059 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 2060 [ P_get_config ] = { &drbd_nl_get_config, 2061 sizeof(struct syncer_conf_tag_len_struct) + 2062 sizeof(struct disk_conf_tag_len_struct) + 2063 sizeof(struct net_conf_tag_len_struct) }, 2064 [ P_get_state ] = { &drbd_nl_get_state, 2065 sizeof(struct get_state_tag_len_struct) + 2066 sizeof(struct sync_progress_tag_len_struct) }, 2067 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2068 sizeof(struct get_uuids_tag_len_struct) }, 2069 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2070 sizeof(struct get_timeout_flag_tag_len_struct)}, 2071 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2072 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2073}; 2074 2075static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2076{ 2077 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2078 struct cn_handler_struct *cm; 2079 struct cn_msg *cn_reply; 2080 struct drbd_nl_cfg_reply *reply; 2081 struct drbd_conf *mdev; 2082 int retcode, rr; 2083 int reply_size = sizeof(struct cn_msg) 2084 + sizeof(struct drbd_nl_cfg_reply) 2085 + sizeof(short int); 2086 2087 if (!try_module_get(THIS_MODULE)) { 2088 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2089 return; 2090 } 2091 2092 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2093 retcode = ERR_PERM; 2094 goto fail; 2095 } 2096 2097 mdev = ensure_mdev(nlp); 2098 if (!mdev) { 2099 retcode = ERR_MINOR_INVALID; 2100 goto fail; 2101 } 2102 2103 if (nlp->packet_type >= P_nl_after_last_packet) { 2104 retcode = ERR_PACKET_NR; 2105 goto fail; 2106 } 2107 2108 cm = cnd_table + nlp->packet_type; 2109 2110 /* This may happen if packet number is 0: */ 2111 if (cm->function == NULL) { 2112 retcode = ERR_PACKET_NR; 2113 goto fail; 2114 } 2115 2116 reply_size += cm->reply_body_size; 2117 2118 /* allocation not in the IO path, cqueue thread context */ 2119 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2120 if (!cn_reply) { 2121 retcode = ERR_NOMEM; 2122 goto fail; 2123 } 2124 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2125 2126 reply->packet_type = 2127 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2128 reply->minor = nlp->drbd_minor; 2129 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2130 /* reply->tag_list; might be modified by cm->function. */ 2131 2132 rr = cm->function(mdev, nlp, reply); 2133 2134 cn_reply->id = req->id; 2135 cn_reply->seq = req->seq; 2136 cn_reply->ack = req->ack + 1; 2137 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2138 cn_reply->flags = 0; 2139 2140 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2141 if (rr && rr != -ESRCH) 2142 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2143 2144 kfree(cn_reply); 2145 module_put(THIS_MODULE); 2146 return; 2147 fail: 2148 drbd_nl_send_reply(req, retcode); 2149 module_put(THIS_MODULE); 2150} 2151 2152static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2153 2154static unsigned short * 2155__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2156 unsigned short len, int nul_terminated) 2157{ 2158 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2159 len = (len < l) ? len : l; 2160 put_unaligned(tag, tl++); 2161 put_unaligned(len, tl++); 2162 memcpy(tl, data, len); 2163 tl = (unsigned short*)((char*)tl + len); 2164 if (nul_terminated) 2165 *((char*)tl - 1) = 0; 2166 return tl; 2167} 2168 2169static unsigned short * 2170tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2171{ 2172 return __tl_add_blob(tl, tag, data, len, 0); 2173} 2174 2175static unsigned short * 2176tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2177{ 2178 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2179} 2180 2181static unsigned short * 2182tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2183{ 2184 put_unaligned(tag, tl++); 2185 switch(tag_type(tag)) { 2186 case TT_INTEGER: 2187 put_unaligned(sizeof(int), tl++); 2188 put_unaligned(*(int *)val, (int *)tl); 2189 tl = (unsigned short*)((char*)tl+sizeof(int)); 2190 break; 2191 case TT_INT64: 2192 put_unaligned(sizeof(u64), tl++); 2193 put_unaligned(*(u64 *)val, (u64 *)tl); 2194 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2195 break; 2196 default: 2197 /* someone did something stupid. */ 2198 ; 2199 } 2200 return tl; 2201} 2202 2203void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2204{ 2205 char buffer[sizeof(struct cn_msg)+ 2206 sizeof(struct drbd_nl_cfg_reply)+ 2207 sizeof(struct get_state_tag_len_struct)+ 2208 sizeof(short int)]; 2209 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2210 struct drbd_nl_cfg_reply *reply = 2211 (struct drbd_nl_cfg_reply *)cn_reply->data; 2212 unsigned short *tl = reply->tag_list; 2213 2214 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2215 2216 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2217 2218 put_unaligned(TT_END, tl++); /* Close the tag list */ 2219 2220 cn_reply->id.idx = CN_IDX_DRBD; 2221 cn_reply->id.val = CN_VAL_DRBD; 2222 2223 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2224 cn_reply->ack = 0; /* not used here. */ 2225 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2226 (int)((char *)tl - (char *)reply->tag_list); 2227 cn_reply->flags = 0; 2228 2229 reply->packet_type = P_get_state; 2230 reply->minor = mdev_to_minor(mdev); 2231 reply->ret_code = NO_ERROR; 2232 2233 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2234} 2235 2236void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2237{ 2238 char buffer[sizeof(struct cn_msg)+ 2239 sizeof(struct drbd_nl_cfg_reply)+ 2240 sizeof(struct call_helper_tag_len_struct)+ 2241 sizeof(short int)]; 2242 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2243 struct drbd_nl_cfg_reply *reply = 2244 (struct drbd_nl_cfg_reply *)cn_reply->data; 2245 unsigned short *tl = reply->tag_list; 2246 2247 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2248 2249 tl = tl_add_str(tl, T_helper, helper_name); 2250 put_unaligned(TT_END, tl++); /* Close the tag list */ 2251 2252 cn_reply->id.idx = CN_IDX_DRBD; 2253 cn_reply->id.val = CN_VAL_DRBD; 2254 2255 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2256 cn_reply->ack = 0; /* not used here. */ 2257 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2258 (int)((char *)tl - (char *)reply->tag_list); 2259 cn_reply->flags = 0; 2260 2261 reply->packet_type = P_call_helper; 2262 reply->minor = mdev_to_minor(mdev); 2263 reply->ret_code = NO_ERROR; 2264 2265 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2266} 2267 2268void drbd_bcast_ee(struct drbd_conf *mdev, 2269 const char *reason, const int dgs, 2270 const char* seen_hash, const char* calc_hash, 2271 const struct drbd_epoch_entry* e) 2272{ 2273 struct cn_msg *cn_reply; 2274 struct drbd_nl_cfg_reply *reply; 2275 unsigned short *tl; 2276 struct page *page; 2277 unsigned len; 2278 2279 if (!e) 2280 return; 2281 if (!reason || !reason[0]) 2282 return; 2283 2284 /* apparently we have to memcpy twice, first to prepare the data for the 2285 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2286 * netlink skb. */ 2287 /* receiver thread context, which is not in the writeout path (of this node), 2288 * but may be in the writeout path of the _other_ node. 2289 * GFP_NOIO to avoid potential "distributed deadlock". */ 2290 cn_reply = kmalloc( 2291 sizeof(struct cn_msg)+ 2292 sizeof(struct drbd_nl_cfg_reply)+ 2293 sizeof(struct dump_ee_tag_len_struct)+ 2294 sizeof(short int), 2295 GFP_NOIO); 2296 2297 if (!cn_reply) { 2298 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2299 (unsigned long long)e->sector, e->size); 2300 return; 2301 } 2302 2303 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2304 tl = reply->tag_list; 2305 2306 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2307 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2308 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2309 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2310 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2311 2312 put_unaligned(T_ee_data, tl++); 2313 put_unaligned(e->size, tl++); 2314 2315 len = e->size; 2316 page = e->pages; 2317 page_chain_for_each(page) { 2318 void *d = kmap_atomic(page, KM_USER0); 2319 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2320 memcpy(tl, d, l); 2321 kunmap_atomic(d, KM_USER0); 2322 tl = (unsigned short*)((char*)tl + l); 2323 len -= l; 2324 } 2325 put_unaligned(TT_END, tl++); /* Close the tag list */ 2326 2327 cn_reply->id.idx = CN_IDX_DRBD; 2328 cn_reply->id.val = CN_VAL_DRBD; 2329 2330 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2331 cn_reply->ack = 0; // not used here. 2332 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2333 (int)((char*)tl - (char*)reply->tag_list); 2334 cn_reply->flags = 0; 2335 2336 reply->packet_type = P_dump_ee; 2337 reply->minor = mdev_to_minor(mdev); 2338 reply->ret_code = NO_ERROR; 2339 2340 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2341 kfree(cn_reply); 2342} 2343 2344void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2345{ 2346 char buffer[sizeof(struct cn_msg)+ 2347 sizeof(struct drbd_nl_cfg_reply)+ 2348 sizeof(struct sync_progress_tag_len_struct)+ 2349 sizeof(short int)]; 2350 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2351 struct drbd_nl_cfg_reply *reply = 2352 (struct drbd_nl_cfg_reply *)cn_reply->data; 2353 unsigned short *tl = reply->tag_list; 2354 unsigned long rs_left; 2355 unsigned int res; 2356 2357 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2358 if (!get_ldev(mdev)) 2359 return; 2360 drbd_get_syncer_progress(mdev, &rs_left, &res); 2361 put_ldev(mdev); 2362 2363 tl = tl_add_int(tl, T_sync_progress, &res); 2364 put_unaligned(TT_END, tl++); /* Close the tag list */ 2365 2366 cn_reply->id.idx = CN_IDX_DRBD; 2367 cn_reply->id.val = CN_VAL_DRBD; 2368 2369 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2370 cn_reply->ack = 0; /* not used here. */ 2371 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2372 (int)((char *)tl - (char *)reply->tag_list); 2373 cn_reply->flags = 0; 2374 2375 reply->packet_type = P_sync_progress; 2376 reply->minor = mdev_to_minor(mdev); 2377 reply->ret_code = NO_ERROR; 2378 2379 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2380} 2381 2382int __init drbd_nl_init(void) 2383{ 2384 static struct cb_id cn_id_drbd; 2385 int err, try=10; 2386 2387 cn_id_drbd.val = CN_VAL_DRBD; 2388 do { 2389 cn_id_drbd.idx = cn_idx; 2390 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2391 if (!err) 2392 break; 2393 cn_idx = (cn_idx + CN_IDX_STEP); 2394 } while (try--); 2395 2396 if (err) { 2397 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2398 return err; 2399 } 2400 2401 return 0; 2402} 2403 2404void drbd_nl_cleanup(void) 2405{ 2406 static struct cb_id cn_id_drbd; 2407 2408 cn_id_drbd.idx = cn_idx; 2409 cn_id_drbd.val = CN_VAL_DRBD; 2410 2411 cn_del_callback(&cn_id_drbd); 2412} 2413 2414void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2415{ 2416 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2417 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2418 struct drbd_nl_cfg_reply *reply = 2419 (struct drbd_nl_cfg_reply *)cn_reply->data; 2420 int rr; 2421 2422 cn_reply->id = req->id; 2423 2424 cn_reply->seq = req->seq; 2425 cn_reply->ack = req->ack + 1; 2426 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2427 cn_reply->flags = 0; 2428 2429 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2430 reply->ret_code = ret_code; 2431 2432 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2433 if (rr && rr != -ESRCH) 2434 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2435} 2436 2437