drbd_nl.c revision 086fa5ff0854c676ec333760f4c0154b3b242616
1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_wrappers.h" 37#include <asm/unaligned.h> 38#include <linux/drbd_tag_magic.h> 39#include <linux/drbd_limits.h> 40 41static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 42static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 43static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 44 45/* see get_sb_bdev and bd_claim */ 46static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 47 48/* Generate the tag_list to struct functions */ 49#define NL_PACKET(name, number, fields) \ 50static int name ## _from_tags(struct drbd_conf *mdev, \ 51 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 52static int name ## _from_tags(struct drbd_conf *mdev, \ 53 unsigned short *tags, struct name *arg) \ 54{ \ 55 int tag; \ 56 int dlen; \ 57 \ 58 while ((tag = get_unaligned(tags++)) != TT_END) { \ 59 dlen = get_unaligned(tags++); \ 60 switch (tag_number(tag)) { \ 61 fields \ 62 default: \ 63 if (tag & T_MANDATORY) { \ 64 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 65 return 0; \ 66 } \ 67 } \ 68 tags = (unsigned short *)((char *)tags + dlen); \ 69 } \ 70 return 1; \ 71} 72#define NL_INTEGER(pn, pr, member) \ 73 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 74 arg->member = get_unaligned((int *)(tags)); \ 75 break; 76#define NL_INT64(pn, pr, member) \ 77 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 78 arg->member = get_unaligned((u64 *)(tags)); \ 79 break; 80#define NL_BIT(pn, pr, member) \ 81 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 82 arg->member = *(char *)(tags) ? 1 : 0; \ 83 break; 84#define NL_STRING(pn, pr, member, len) \ 85 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 86 if (dlen > len) { \ 87 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 88 #member, dlen, (unsigned int)len); \ 89 return 0; \ 90 } \ 91 arg->member ## _len = dlen; \ 92 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 93 break; 94#include "linux/drbd_nl.h" 95 96/* Generate the struct to tag_list functions */ 97#define NL_PACKET(name, number, fields) \ 98static unsigned short* \ 99name ## _to_tags(struct drbd_conf *mdev, \ 100 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 101static unsigned short* \ 102name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) \ 104{ \ 105 fields \ 106 return tags; \ 107} 108 109#define NL_INTEGER(pn, pr, member) \ 110 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 111 put_unaligned(sizeof(int), tags++); \ 112 put_unaligned(arg->member, (int *)tags); \ 113 tags = (unsigned short *)((char *)tags+sizeof(int)); 114#define NL_INT64(pn, pr, member) \ 115 put_unaligned(pn | pr | TT_INT64, tags++); \ 116 put_unaligned(sizeof(u64), tags++); \ 117 put_unaligned(arg->member, (u64 *)tags); \ 118 tags = (unsigned short *)((char *)tags+sizeof(u64)); 119#define NL_BIT(pn, pr, member) \ 120 put_unaligned(pn | pr | TT_BIT, tags++); \ 121 put_unaligned(sizeof(char), tags++); \ 122 *(char *)tags = arg->member; \ 123 tags = (unsigned short *)((char *)tags+sizeof(char)); 124#define NL_STRING(pn, pr, member, len) \ 125 put_unaligned(pn | pr | TT_STRING, tags++); \ 126 put_unaligned(arg->member ## _len, tags++); \ 127 memcpy(tags, arg->member, arg->member ## _len); \ 128 tags = (unsigned short *)((char *)tags + arg->member ## _len); 129#include "linux/drbd_nl.h" 130 131void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 132void drbd_nl_send_reply(struct cn_msg *, int); 133 134int drbd_khelper(struct drbd_conf *mdev, char *cmd) 135{ 136 char *envp[] = { "HOME=/", 137 "TERM=linux", 138 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 139 NULL, /* Will be set to address family */ 140 NULL, /* Will be set to address */ 141 NULL }; 142 143 char mb[12], af[20], ad[60], *afs; 144 char *argv[] = {usermode_helper, cmd, mb, NULL }; 145 int ret; 146 147 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 148 149 if (get_net_conf(mdev)) { 150 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 151 case AF_INET6: 152 afs = "ipv6"; 153 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 154 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 155 break; 156 case AF_INET: 157 afs = "ipv4"; 158 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 159 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 160 break; 161 default: 162 afs = "ssocks"; 163 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 164 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 165 } 166 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 167 envp[3]=af; 168 envp[4]=ad; 169 put_net_conf(mdev); 170 } 171 172 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 173 174 drbd_bcast_ev_helper(mdev, cmd); 175 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 176 if (ret) 177 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 178 usermode_helper, cmd, mb, 179 (ret >> 8) & 0xff, ret); 180 else 181 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 182 usermode_helper, cmd, mb, 183 (ret >> 8) & 0xff, ret); 184 185 if (ret < 0) /* Ignore any ERRNOs we got. */ 186 ret = 0; 187 188 return ret; 189} 190 191enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 192{ 193 char *ex_to_string; 194 int r; 195 enum drbd_disk_state nps; 196 enum drbd_fencing_p fp; 197 198 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 199 200 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 201 fp = mdev->ldev->dc.fencing; 202 put_ldev(mdev); 203 } else { 204 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 205 return mdev->state.pdsk; 206 } 207 208 if (fp == FP_STONITH) 209 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); 210 211 r = drbd_khelper(mdev, "fence-peer"); 212 213 switch ((r>>8) & 0xff) { 214 case 3: /* peer is inconsistent */ 215 ex_to_string = "peer is inconsistent or worse"; 216 nps = D_INCONSISTENT; 217 break; 218 case 4: /* peer got outdated, or was already outdated */ 219 ex_to_string = "peer was fenced"; 220 nps = D_OUTDATED; 221 break; 222 case 5: /* peer was down */ 223 if (mdev->state.disk == D_UP_TO_DATE) { 224 /* we will(have) create(d) a new UUID anyways... */ 225 ex_to_string = "peer is unreachable, assumed to be dead"; 226 nps = D_OUTDATED; 227 } else { 228 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 229 nps = mdev->state.pdsk; 230 } 231 break; 232 case 6: /* Peer is primary, voluntarily outdate myself. 233 * This is useful when an unconnected R_SECONDARY is asked to 234 * become R_PRIMARY, but finds the other peer being active. */ 235 ex_to_string = "peer is active"; 236 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 237 nps = D_UNKNOWN; 238 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 239 break; 240 case 7: 241 if (fp != FP_STONITH) 242 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 243 ex_to_string = "peer was stonithed"; 244 nps = D_OUTDATED; 245 break; 246 default: 247 /* The script is broken ... */ 248 nps = D_UNKNOWN; 249 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 250 return nps; 251 } 252 253 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 254 (r>>8) & 0xff, ex_to_string); 255 return nps; 256} 257 258 259int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 260{ 261 const int max_tries = 4; 262 int r = 0; 263 int try = 0; 264 int forced = 0; 265 union drbd_state mask, val; 266 enum drbd_disk_state nps; 267 268 if (new_role == R_PRIMARY) 269 request_ping(mdev); /* Detect a dead peer ASAP */ 270 271 mutex_lock(&mdev->state_mutex); 272 273 mask.i = 0; mask.role = R_MASK; 274 val.i = 0; val.role = new_role; 275 276 while (try++ < max_tries) { 277 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 278 279 /* in case we first succeeded to outdate, 280 * but now suddenly could establish a connection */ 281 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 282 val.pdsk = 0; 283 mask.pdsk = 0; 284 continue; 285 } 286 287 if (r == SS_NO_UP_TO_DATE_DISK && force && 288 (mdev->state.disk == D_INCONSISTENT || 289 mdev->state.disk == D_OUTDATED)) { 290 mask.disk = D_MASK; 291 val.disk = D_UP_TO_DATE; 292 forced = 1; 293 continue; 294 } 295 296 if (r == SS_NO_UP_TO_DATE_DISK && 297 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 298 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 299 nps = drbd_try_outdate_peer(mdev); 300 301 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 302 val.disk = D_UP_TO_DATE; 303 mask.disk = D_MASK; 304 } 305 306 val.pdsk = nps; 307 mask.pdsk = D_MASK; 308 309 continue; 310 } 311 312 if (r == SS_NOTHING_TO_DO) 313 goto fail; 314 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 315 nps = drbd_try_outdate_peer(mdev); 316 317 if (force && nps > D_OUTDATED) { 318 dev_warn(DEV, "Forced into split brain situation!\n"); 319 nps = D_OUTDATED; 320 } 321 322 mask.pdsk = D_MASK; 323 val.pdsk = nps; 324 325 continue; 326 } 327 if (r == SS_TWO_PRIMARIES) { 328 /* Maybe the peer is detected as dead very soon... 329 retry at most once more in this case. */ 330 __set_current_state(TASK_INTERRUPTIBLE); 331 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 332 if (try < max_tries) 333 try = max_tries - 1; 334 continue; 335 } 336 if (r < SS_SUCCESS) { 337 r = _drbd_request_state(mdev, mask, val, 338 CS_VERBOSE + CS_WAIT_COMPLETE); 339 if (r < SS_SUCCESS) 340 goto fail; 341 } 342 break; 343 } 344 345 if (r < SS_SUCCESS) 346 goto fail; 347 348 if (forced) 349 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 350 351 /* Wait until nothing is on the fly :) */ 352 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 353 354 if (new_role == R_SECONDARY) { 355 set_disk_ro(mdev->vdisk, TRUE); 356 if (get_ldev(mdev)) { 357 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 358 put_ldev(mdev); 359 } 360 } else { 361 if (get_net_conf(mdev)) { 362 mdev->net_conf->want_lose = 0; 363 put_net_conf(mdev); 364 } 365 set_disk_ro(mdev->vdisk, FALSE); 366 if (get_ldev(mdev)) { 367 if (((mdev->state.conn < C_CONNECTED || 368 mdev->state.pdsk <= D_FAILED) 369 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 370 drbd_uuid_new_current(mdev); 371 372 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 373 put_ldev(mdev); 374 } 375 } 376 377 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 378 drbd_al_to_on_disk_bm(mdev); 379 put_ldev(mdev); 380 } 381 382 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 383 /* if this was forced, we should consider sync */ 384 if (forced) 385 drbd_send_uuids(mdev); 386 drbd_send_state(mdev); 387 } 388 389 drbd_md_sync(mdev); 390 391 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 392 fail: 393 mutex_unlock(&mdev->state_mutex); 394 return r; 395} 396 397 398static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 399 struct drbd_nl_cfg_reply *reply) 400{ 401 struct primary primary_args; 402 403 memset(&primary_args, 0, sizeof(struct primary)); 404 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 405 reply->ret_code = ERR_MANDATORY_TAG; 406 return 0; 407 } 408 409 reply->ret_code = 410 drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); 411 412 return 0; 413} 414 415static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 416 struct drbd_nl_cfg_reply *reply) 417{ 418 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 419 420 return 0; 421} 422 423/* initializes the md.*_offset members, so we are able to find 424 * the on disk meta data */ 425static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 426 struct drbd_backing_dev *bdev) 427{ 428 sector_t md_size_sect = 0; 429 switch (bdev->dc.meta_dev_idx) { 430 default: 431 /* v07 style fixed size indexed meta data */ 432 bdev->md.md_size_sect = MD_RESERVED_SECT; 433 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 434 bdev->md.al_offset = MD_AL_OFFSET; 435 bdev->md.bm_offset = MD_BM_OFFSET; 436 break; 437 case DRBD_MD_INDEX_FLEX_EXT: 438 /* just occupy the full device; unit: sectors */ 439 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 440 bdev->md.md_offset = 0; 441 bdev->md.al_offset = MD_AL_OFFSET; 442 bdev->md.bm_offset = MD_BM_OFFSET; 443 break; 444 case DRBD_MD_INDEX_INTERNAL: 445 case DRBD_MD_INDEX_FLEX_INT: 446 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 447 /* al size is still fixed */ 448 bdev->md.al_offset = -MD_AL_MAX_SIZE; 449 /* we need (slightly less than) ~ this much bitmap sectors: */ 450 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 451 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 452 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 453 md_size_sect = ALIGN(md_size_sect, 8); 454 455 /* plus the "drbd meta data super block", 456 * and the activity log; */ 457 md_size_sect += MD_BM_OFFSET; 458 459 bdev->md.md_size_sect = md_size_sect; 460 /* bitmap offset is adjusted by 'super' block size */ 461 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 462 break; 463 } 464} 465 466char *ppsize(char *buf, unsigned long long size) 467{ 468 /* Needs 9 bytes at max. */ 469 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 470 int base = 0; 471 while (size >= 10000) { 472 /* shift + round */ 473 size = (size >> 10) + !!(size & (1<<9)); 474 base++; 475 } 476 sprintf(buf, "%lu %cB", (long)size, units[base]); 477 478 return buf; 479} 480 481/* there is still a theoretical deadlock when called from receiver 482 * on an D_INCONSISTENT R_PRIMARY: 483 * remote READ does inc_ap_bio, receiver would need to receive answer 484 * packet from remote to dec_ap_bio again. 485 * receiver receive_sizes(), comes here, 486 * waits for ap_bio_cnt == 0. -> deadlock. 487 * but this cannot happen, actually, because: 488 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 489 * (not connected, or bad/no disk on peer): 490 * see drbd_fail_request_early, ap_bio_cnt is zero. 491 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 492 * peer may not initiate a resize. 493 */ 494void drbd_suspend_io(struct drbd_conf *mdev) 495{ 496 set_bit(SUSPEND_IO, &mdev->flags); 497 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 498} 499 500void drbd_resume_io(struct drbd_conf *mdev) 501{ 502 clear_bit(SUSPEND_IO, &mdev->flags); 503 wake_up(&mdev->misc_wait); 504} 505 506/** 507 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 508 * @mdev: DRBD device. 509 * 510 * Returns 0 on success, negative return values indicate errors. 511 * You should call drbd_md_sync() after calling this function. 512 */ 513enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local) 514{ 515 sector_t prev_first_sect, prev_size; /* previous meta location */ 516 sector_t la_size; 517 sector_t size; 518 char ppb[10]; 519 520 int md_moved, la_size_changed; 521 enum determine_dev_size rv = unchanged; 522 523 /* race: 524 * application request passes inc_ap_bio, 525 * but then cannot get an AL-reference. 526 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 527 * 528 * to avoid that: 529 * Suspend IO right here. 530 * still lock the act_log to not trigger ASSERTs there. 531 */ 532 drbd_suspend_io(mdev); 533 534 /* no wait necessary anymore, actually we could assert that */ 535 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 536 537 prev_first_sect = drbd_md_first_sector(mdev->ldev); 538 prev_size = mdev->ldev->md.md_size_sect; 539 la_size = mdev->ldev->md.la_size_sect; 540 541 /* TODO: should only be some assert here, not (re)init... */ 542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 543 544 size = drbd_new_dev_size(mdev, mdev->ldev, force); 545 546 if (drbd_get_capacity(mdev->this_bdev) != size || 547 drbd_bm_capacity(mdev) != size) { 548 int err; 549 err = drbd_bm_resize(mdev, size); 550 if (unlikely(err)) { 551 /* currently there is only one error: ENOMEM! */ 552 size = drbd_bm_capacity(mdev)>>1; 553 if (size == 0) { 554 dev_err(DEV, "OUT OF MEMORY! " 555 "Could not allocate bitmap!\n"); 556 } else { 557 dev_err(DEV, "BM resizing failed. " 558 "Leaving size unchanged at size = %lu KB\n", 559 (unsigned long)size); 560 } 561 rv = dev_size_error; 562 } 563 /* racy, see comments above. */ 564 drbd_set_my_capacity(mdev, size); 565 mdev->ldev->md.la_size_sect = size; 566 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 567 (unsigned long long)size>>1); 568 } 569 if (rv == dev_size_error) 570 goto out; 571 572 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 573 574 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 575 || prev_size != mdev->ldev->md.md_size_sect; 576 577 if (la_size_changed || md_moved) { 578 drbd_al_shrink(mdev); /* All extents inactive. */ 579 dev_info(DEV, "Writing the whole bitmap, %s\n", 580 la_size_changed && md_moved ? "size changed and md moved" : 581 la_size_changed ? "size changed" : "md moved"); 582 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 583 drbd_md_mark_dirty(mdev); 584 } 585 586 if (size > la_size) 587 rv = grew; 588 if (size < la_size) 589 rv = shrunk; 590out: 591 lc_unlock(mdev->act_log); 592 wake_up(&mdev->al_wait); 593 drbd_resume_io(mdev); 594 595 return rv; 596} 597 598sector_t 599drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 600{ 601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 603 sector_t m_size; /* my size */ 604 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 605 sector_t size = 0; 606 607 m_size = drbd_get_max_capacity(bdev); 608 609 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 610 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 611 p_size = m_size; 612 } 613 614 if (p_size && m_size) { 615 size = min_t(sector_t, p_size, m_size); 616 } else { 617 if (la_size) { 618 size = la_size; 619 if (m_size && m_size < size) 620 size = m_size; 621 if (p_size && p_size < size) 622 size = p_size; 623 } else { 624 if (m_size) 625 size = m_size; 626 if (p_size) 627 size = p_size; 628 } 629 } 630 631 if (size == 0) 632 dev_err(DEV, "Both nodes diskless!\n"); 633 634 if (u_size) { 635 if (u_size > size) 636 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 637 (unsigned long)u_size>>1, (unsigned long)size>>1); 638 else 639 size = u_size; 640 } 641 642 return size; 643} 644 645/** 646 * drbd_check_al_size() - Ensures that the AL is of the right size 647 * @mdev: DRBD device. 648 * 649 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 650 * failed, and 0 on success. You should call drbd_md_sync() after you called 651 * this function. 652 */ 653static int drbd_check_al_size(struct drbd_conf *mdev) 654{ 655 struct lru_cache *n, *t; 656 struct lc_element *e; 657 unsigned int in_use; 658 int i; 659 660 ERR_IF(mdev->sync_conf.al_extents < 7) 661 mdev->sync_conf.al_extents = 127; 662 663 if (mdev->act_log && 664 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 665 return 0; 666 667 in_use = 0; 668 t = mdev->act_log; 669 n = lc_create("act_log", drbd_al_ext_cache, 670 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 671 672 if (n == NULL) { 673 dev_err(DEV, "Cannot allocate act_log lru!\n"); 674 return -ENOMEM; 675 } 676 spin_lock_irq(&mdev->al_lock); 677 if (t) { 678 for (i = 0; i < t->nr_elements; i++) { 679 e = lc_element_by_index(t, i); 680 if (e->refcnt) 681 dev_err(DEV, "refcnt(%d)==%d\n", 682 e->lc_number, e->refcnt); 683 in_use += e->refcnt; 684 } 685 } 686 if (!in_use) 687 mdev->act_log = n; 688 spin_unlock_irq(&mdev->al_lock); 689 if (in_use) { 690 dev_err(DEV, "Activity log still in use!\n"); 691 lc_destroy(n); 692 return -EBUSY; 693 } else { 694 if (t) 695 lc_destroy(t); 696 } 697 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 698 return 0; 699} 700 701void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 702{ 703 struct request_queue * const q = mdev->rq_queue; 704 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 705 int max_segments = mdev->ldev->dc.max_bio_bvecs; 706 707 if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv) 708 max_seg_s = PAGE_SIZE; 709 710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 711 712 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 713 blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); 714 blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); 715 blk_queue_max_segment_size(q, max_seg_s); 716 blk_queue_logical_block_size(q, 512); 717 blk_queue_segment_boundary(q, PAGE_SIZE-1); 718 blk_stack_limits(&q->limits, &b->limits, 0); 719 720 if (b->merge_bvec_fn) 721 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 722 b->merge_bvec_fn); 723 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 724 725 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 726 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 727 q->backing_dev_info.ra_pages, 728 b->backing_dev_info.ra_pages); 729 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 730 } 731} 732 733/* serialize deconfig (worker exiting, doing cleanup) 734 * and reconfig (drbdsetup disk, drbdsetup net) 735 * 736 * wait for a potentially exiting worker, then restart it, 737 * or start a new one. 738 */ 739static void drbd_reconfig_start(struct drbd_conf *mdev) 740{ 741 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 742 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 743 drbd_thread_start(&mdev->worker); 744} 745 746/* if still unconfigured, stops worker again. 747 * if configured now, clears CONFIG_PENDING. 748 * wakes potential waiters */ 749static void drbd_reconfig_done(struct drbd_conf *mdev) 750{ 751 spin_lock_irq(&mdev->req_lock); 752 if (mdev->state.disk == D_DISKLESS && 753 mdev->state.conn == C_STANDALONE && 754 mdev->state.role == R_SECONDARY) { 755 set_bit(DEVICE_DYING, &mdev->flags); 756 drbd_thread_stop_nowait(&mdev->worker); 757 } else 758 clear_bit(CONFIG_PENDING, &mdev->flags); 759 spin_unlock_irq(&mdev->req_lock); 760 wake_up(&mdev->state_wait); 761} 762 763/* does always return 0; 764 * interesting return code is in reply->ret_code */ 765static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 766 struct drbd_nl_cfg_reply *reply) 767{ 768 enum drbd_ret_codes retcode; 769 enum determine_dev_size dd; 770 sector_t max_possible_sectors; 771 sector_t min_md_device_sectors; 772 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 773 struct inode *inode, *inode2; 774 struct lru_cache *resync_lru = NULL; 775 union drbd_state ns, os; 776 int rv; 777 int cp_discovered = 0; 778 int logical_block_size; 779 780 drbd_reconfig_start(mdev); 781 782 /* if you want to reconfigure, please tear down first */ 783 if (mdev->state.disk > D_DISKLESS) { 784 retcode = ERR_DISK_CONFIGURED; 785 goto fail; 786 } 787 788 /* allocation not in the IO path, cqueue thread context */ 789 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 790 if (!nbc) { 791 retcode = ERR_NOMEM; 792 goto fail; 793 } 794 795 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 796 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 797 nbc->dc.fencing = DRBD_FENCING_DEF; 798 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 799 800 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 801 retcode = ERR_MANDATORY_TAG; 802 goto fail; 803 } 804 805 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 806 retcode = ERR_MD_IDX_INVALID; 807 goto fail; 808 } 809 810 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 811 if (IS_ERR(nbc->lo_file)) { 812 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 813 PTR_ERR(nbc->lo_file)); 814 nbc->lo_file = NULL; 815 retcode = ERR_OPEN_DISK; 816 goto fail; 817 } 818 819 inode = nbc->lo_file->f_dentry->d_inode; 820 821 if (!S_ISBLK(inode->i_mode)) { 822 retcode = ERR_DISK_NOT_BDEV; 823 goto fail; 824 } 825 826 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 827 if (IS_ERR(nbc->md_file)) { 828 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 829 PTR_ERR(nbc->md_file)); 830 nbc->md_file = NULL; 831 retcode = ERR_OPEN_MD_DISK; 832 goto fail; 833 } 834 835 inode2 = nbc->md_file->f_dentry->d_inode; 836 837 if (!S_ISBLK(inode2->i_mode)) { 838 retcode = ERR_MD_NOT_BDEV; 839 goto fail; 840 } 841 842 nbc->backing_bdev = inode->i_bdev; 843 if (bd_claim(nbc->backing_bdev, mdev)) { 844 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 845 nbc->backing_bdev, mdev, 846 nbc->backing_bdev->bd_holder, 847 nbc->backing_bdev->bd_contains->bd_holder, 848 nbc->backing_bdev->bd_holders); 849 retcode = ERR_BDCLAIM_DISK; 850 goto fail; 851 } 852 853 resync_lru = lc_create("resync", drbd_bm_ext_cache, 854 61, sizeof(struct bm_extent), 855 offsetof(struct bm_extent, lce)); 856 if (!resync_lru) { 857 retcode = ERR_NOMEM; 858 goto release_bdev_fail; 859 } 860 861 /* meta_dev_idx >= 0: external fixed size, 862 * possibly multiple drbd sharing one meta device. 863 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 864 * not yet used by some other drbd minor! 865 * (if you use drbd.conf + drbdadm, 866 * that should check it for you already; but if you don't, or someone 867 * fooled it, we need to double check here) */ 868 nbc->md_bdev = inode2->i_bdev; 869 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 870 : (void *) drbd_m_holder)) { 871 retcode = ERR_BDCLAIM_MD_DISK; 872 goto release_bdev_fail; 873 } 874 875 if ((nbc->backing_bdev == nbc->md_bdev) != 876 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 877 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 878 retcode = ERR_MD_IDX_INVALID; 879 goto release_bdev2_fail; 880 } 881 882 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 883 drbd_md_set_sector_offsets(mdev, nbc); 884 885 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 886 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 887 (unsigned long long) drbd_get_max_capacity(nbc), 888 (unsigned long long) nbc->dc.disk_size); 889 retcode = ERR_DISK_TO_SMALL; 890 goto release_bdev2_fail; 891 } 892 893 if (nbc->dc.meta_dev_idx < 0) { 894 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 895 /* at least one MB, otherwise it does not make sense */ 896 min_md_device_sectors = (2<<10); 897 } else { 898 max_possible_sectors = DRBD_MAX_SECTORS; 899 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 900 } 901 902 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 903 retcode = ERR_MD_DISK_TO_SMALL; 904 dev_warn(DEV, "refusing attach: md-device too small, " 905 "at least %llu sectors needed for this meta-disk type\n", 906 (unsigned long long) min_md_device_sectors); 907 goto release_bdev2_fail; 908 } 909 910 /* Make sure the new disk is big enough 911 * (we may currently be R_PRIMARY with no local disk...) */ 912 if (drbd_get_max_capacity(nbc) < 913 drbd_get_capacity(mdev->this_bdev)) { 914 retcode = ERR_DISK_TO_SMALL; 915 goto release_bdev2_fail; 916 } 917 918 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 919 920 if (nbc->known_size > max_possible_sectors) { 921 dev_warn(DEV, "==> truncating very big lower level device " 922 "to currently maximum possible %llu sectors <==\n", 923 (unsigned long long) max_possible_sectors); 924 if (nbc->dc.meta_dev_idx >= 0) 925 dev_warn(DEV, "==>> using internal or flexible " 926 "meta data may help <<==\n"); 927 } 928 929 drbd_suspend_io(mdev); 930 /* also wait for the last barrier ack. */ 931 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); 932 /* and for any other previously queued work */ 933 drbd_flush_workqueue(mdev); 934 935 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 936 drbd_resume_io(mdev); 937 if (retcode < SS_SUCCESS) 938 goto release_bdev2_fail; 939 940 if (!get_ldev_if_state(mdev, D_ATTACHING)) 941 goto force_diskless; 942 943 drbd_md_set_sector_offsets(mdev, nbc); 944 945 if (!mdev->bitmap) { 946 if (drbd_bm_init(mdev)) { 947 retcode = ERR_NOMEM; 948 goto force_diskless_dec; 949 } 950 } 951 952 retcode = drbd_md_read(mdev, nbc); 953 if (retcode != NO_ERROR) 954 goto force_diskless_dec; 955 956 if (mdev->state.conn < C_CONNECTED && 957 mdev->state.role == R_PRIMARY && 958 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 959 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 960 (unsigned long long)mdev->ed_uuid); 961 retcode = ERR_DATA_NOT_CURRENT; 962 goto force_diskless_dec; 963 } 964 965 /* Since we are diskless, fix the activity log first... */ 966 if (drbd_check_al_size(mdev)) { 967 retcode = ERR_NOMEM; 968 goto force_diskless_dec; 969 } 970 971 /* Prevent shrinking of consistent devices ! */ 972 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 973 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 974 dev_warn(DEV, "refusing to truncate a consistent device\n"); 975 retcode = ERR_DISK_TO_SMALL; 976 goto force_diskless_dec; 977 } 978 979 if (!drbd_al_read_log(mdev, nbc)) { 980 retcode = ERR_IO_MD_DISK; 981 goto force_diskless_dec; 982 } 983 984 /* allocate a second IO page if logical_block_size != 512 */ 985 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 986 if (logical_block_size == 0) 987 logical_block_size = MD_SECTOR_SIZE; 988 989 if (logical_block_size != MD_SECTOR_SIZE) { 990 if (!mdev->md_io_tmpp) { 991 struct page *page = alloc_page(GFP_NOIO); 992 if (!page) 993 goto force_diskless_dec; 994 995 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 996 logical_block_size, MD_SECTOR_SIZE); 997 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 998 999 mdev->md_io_tmpp = page; 1000 } 1001 } 1002 1003 /* Reset the "barriers don't work" bits here, then force meta data to 1004 * be written, to ensure we determine if barriers are supported. */ 1005 if (nbc->dc.no_md_flush) 1006 set_bit(MD_NO_BARRIER, &mdev->flags); 1007 else 1008 clear_bit(MD_NO_BARRIER, &mdev->flags); 1009 1010 /* Point of no return reached. 1011 * Devices and memory are no longer released by error cleanup below. 1012 * now mdev takes over responsibility, and the state engine should 1013 * clean it up somewhere. */ 1014 D_ASSERT(mdev->ldev == NULL); 1015 mdev->ldev = nbc; 1016 mdev->resync = resync_lru; 1017 nbc = NULL; 1018 resync_lru = NULL; 1019 1020 mdev->write_ordering = WO_bio_barrier; 1021 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1022 1023 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1024 set_bit(CRASHED_PRIMARY, &mdev->flags); 1025 else 1026 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1027 1028 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) { 1029 set_bit(CRASHED_PRIMARY, &mdev->flags); 1030 cp_discovered = 1; 1031 } 1032 1033 mdev->send_cnt = 0; 1034 mdev->recv_cnt = 0; 1035 mdev->read_cnt = 0; 1036 mdev->writ_cnt = 0; 1037 1038 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); 1039 1040 /* If I am currently not R_PRIMARY, 1041 * but meta data primary indicator is set, 1042 * I just now recover from a hard crash, 1043 * and have been R_PRIMARY before that crash. 1044 * 1045 * Now, if I had no connection before that crash 1046 * (have been degraded R_PRIMARY), chances are that 1047 * I won't find my peer now either. 1048 * 1049 * In that case, and _only_ in that case, 1050 * we use the degr-wfc-timeout instead of the default, 1051 * so we can automatically recover from a crash of a 1052 * degraded but active "cluster" after a certain timeout. 1053 */ 1054 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1055 if (mdev->state.role != R_PRIMARY && 1056 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1057 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1058 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1059 1060 dd = drbd_determin_dev_size(mdev, 0); 1061 if (dd == dev_size_error) { 1062 retcode = ERR_NOMEM_BITMAP; 1063 goto force_diskless_dec; 1064 } else if (dd == grew) 1065 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1066 1067 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1068 dev_info(DEV, "Assuming that all blocks are out of sync " 1069 "(aka FullSync)\n"); 1070 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1071 retcode = ERR_IO_MD_DISK; 1072 goto force_diskless_dec; 1073 } 1074 } else { 1075 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1076 retcode = ERR_IO_MD_DISK; 1077 goto force_diskless_dec; 1078 } 1079 } 1080 1081 if (cp_discovered) { 1082 drbd_al_apply_to_bm(mdev); 1083 drbd_al_to_on_disk_bm(mdev); 1084 } 1085 1086 spin_lock_irq(&mdev->req_lock); 1087 os = mdev->state; 1088 ns.i = os.i; 1089 /* If MDF_CONSISTENT is not set go into inconsistent state, 1090 otherwise investigate MDF_WasUpToDate... 1091 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1092 otherwise into D_CONSISTENT state. 1093 */ 1094 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1095 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1096 ns.disk = D_CONSISTENT; 1097 else 1098 ns.disk = D_OUTDATED; 1099 } else { 1100 ns.disk = D_INCONSISTENT; 1101 } 1102 1103 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1104 ns.pdsk = D_OUTDATED; 1105 1106 if ( ns.disk == D_CONSISTENT && 1107 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1108 ns.disk = D_UP_TO_DATE; 1109 1110 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1111 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1112 this point, because drbd_request_state() modifies these 1113 flags. */ 1114 1115 /* In case we are C_CONNECTED postpone any decision on the new disk 1116 state after the negotiation phase. */ 1117 if (mdev->state.conn == C_CONNECTED) { 1118 mdev->new_state_tmp.i = ns.i; 1119 ns.i = os.i; 1120 ns.disk = D_NEGOTIATING; 1121 } 1122 1123 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1124 ns = mdev->state; 1125 spin_unlock_irq(&mdev->req_lock); 1126 1127 if (rv < SS_SUCCESS) 1128 goto force_diskless_dec; 1129 1130 if (mdev->state.role == R_PRIMARY) 1131 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1132 else 1133 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1134 1135 drbd_md_mark_dirty(mdev); 1136 drbd_md_sync(mdev); 1137 1138 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1139 put_ldev(mdev); 1140 reply->ret_code = retcode; 1141 drbd_reconfig_done(mdev); 1142 return 0; 1143 1144 force_diskless_dec: 1145 put_ldev(mdev); 1146 force_diskless: 1147 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1148 drbd_md_sync(mdev); 1149 release_bdev2_fail: 1150 if (nbc) 1151 bd_release(nbc->md_bdev); 1152 release_bdev_fail: 1153 if (nbc) 1154 bd_release(nbc->backing_bdev); 1155 fail: 1156 if (nbc) { 1157 if (nbc->lo_file) 1158 fput(nbc->lo_file); 1159 if (nbc->md_file) 1160 fput(nbc->md_file); 1161 kfree(nbc); 1162 } 1163 lc_destroy(resync_lru); 1164 1165 reply->ret_code = retcode; 1166 drbd_reconfig_done(mdev); 1167 return 0; 1168} 1169 1170static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1171 struct drbd_nl_cfg_reply *reply) 1172{ 1173 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1174 return 0; 1175} 1176 1177static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1178 struct drbd_nl_cfg_reply *reply) 1179{ 1180 int i, ns; 1181 enum drbd_ret_codes retcode; 1182 struct net_conf *new_conf = NULL; 1183 struct crypto_hash *tfm = NULL; 1184 struct crypto_hash *integrity_w_tfm = NULL; 1185 struct crypto_hash *integrity_r_tfm = NULL; 1186 struct hlist_head *new_tl_hash = NULL; 1187 struct hlist_head *new_ee_hash = NULL; 1188 struct drbd_conf *odev; 1189 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1190 void *int_dig_out = NULL; 1191 void *int_dig_in = NULL; 1192 void *int_dig_vv = NULL; 1193 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1194 1195 drbd_reconfig_start(mdev); 1196 1197 if (mdev->state.conn > C_STANDALONE) { 1198 retcode = ERR_NET_CONFIGURED; 1199 goto fail; 1200 } 1201 1202 /* allocation not in the IO path, cqueue thread context */ 1203 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); 1204 if (!new_conf) { 1205 retcode = ERR_NOMEM; 1206 goto fail; 1207 } 1208 1209 memset(new_conf, 0, sizeof(struct net_conf)); 1210 new_conf->timeout = DRBD_TIMEOUT_DEF; 1211 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1212 new_conf->ping_int = DRBD_PING_INT_DEF; 1213 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1214 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1215 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1216 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1217 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1218 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1219 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1220 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1221 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1222 new_conf->want_lose = 0; 1223 new_conf->two_primaries = 0; 1224 new_conf->wire_protocol = DRBD_PROT_C; 1225 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1226 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1227 1228 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1229 retcode = ERR_MANDATORY_TAG; 1230 goto fail; 1231 } 1232 1233 if (new_conf->two_primaries 1234 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1235 retcode = ERR_NOT_PROTO_C; 1236 goto fail; 1237 }; 1238 1239 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1240 retcode = ERR_DISCARD; 1241 goto fail; 1242 } 1243 1244 retcode = NO_ERROR; 1245 1246 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1247 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1248 for (i = 0; i < minor_count; i++) { 1249 odev = minor_to_mdev(i); 1250 if (!odev || odev == mdev) 1251 continue; 1252 if (get_net_conf(odev)) { 1253 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1254 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1255 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1256 retcode = ERR_LOCAL_ADDR; 1257 1258 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1259 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1260 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1261 retcode = ERR_PEER_ADDR; 1262 1263 put_net_conf(odev); 1264 if (retcode != NO_ERROR) 1265 goto fail; 1266 } 1267 } 1268 1269 if (new_conf->cram_hmac_alg[0] != 0) { 1270 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1271 new_conf->cram_hmac_alg); 1272 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1273 if (IS_ERR(tfm)) { 1274 tfm = NULL; 1275 retcode = ERR_AUTH_ALG; 1276 goto fail; 1277 } 1278 1279 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1280 retcode = ERR_AUTH_ALG_ND; 1281 goto fail; 1282 } 1283 } 1284 1285 if (new_conf->integrity_alg[0]) { 1286 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1287 if (IS_ERR(integrity_w_tfm)) { 1288 integrity_w_tfm = NULL; 1289 retcode=ERR_INTEGRITY_ALG; 1290 goto fail; 1291 } 1292 1293 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1294 retcode=ERR_INTEGRITY_ALG_ND; 1295 goto fail; 1296 } 1297 1298 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1299 if (IS_ERR(integrity_r_tfm)) { 1300 integrity_r_tfm = NULL; 1301 retcode=ERR_INTEGRITY_ALG; 1302 goto fail; 1303 } 1304 } 1305 1306 ns = new_conf->max_epoch_size/8; 1307 if (mdev->tl_hash_s != ns) { 1308 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1309 if (!new_tl_hash) { 1310 retcode = ERR_NOMEM; 1311 goto fail; 1312 } 1313 } 1314 1315 ns = new_conf->max_buffers/8; 1316 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1317 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1318 if (!new_ee_hash) { 1319 retcode = ERR_NOMEM; 1320 goto fail; 1321 } 1322 } 1323 1324 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1325 1326 if (integrity_w_tfm) { 1327 i = crypto_hash_digestsize(integrity_w_tfm); 1328 int_dig_out = kmalloc(i, GFP_KERNEL); 1329 if (!int_dig_out) { 1330 retcode = ERR_NOMEM; 1331 goto fail; 1332 } 1333 int_dig_in = kmalloc(i, GFP_KERNEL); 1334 if (!int_dig_in) { 1335 retcode = ERR_NOMEM; 1336 goto fail; 1337 } 1338 int_dig_vv = kmalloc(i, GFP_KERNEL); 1339 if (!int_dig_vv) { 1340 retcode = ERR_NOMEM; 1341 goto fail; 1342 } 1343 } 1344 1345 if (!mdev->bitmap) { 1346 if(drbd_bm_init(mdev)) { 1347 retcode = ERR_NOMEM; 1348 goto fail; 1349 } 1350 } 1351 1352 spin_lock_irq(&mdev->req_lock); 1353 if (mdev->net_conf != NULL) { 1354 retcode = ERR_NET_CONFIGURED; 1355 spin_unlock_irq(&mdev->req_lock); 1356 goto fail; 1357 } 1358 mdev->net_conf = new_conf; 1359 1360 mdev->send_cnt = 0; 1361 mdev->recv_cnt = 0; 1362 1363 if (new_tl_hash) { 1364 kfree(mdev->tl_hash); 1365 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1366 mdev->tl_hash = new_tl_hash; 1367 } 1368 1369 if (new_ee_hash) { 1370 kfree(mdev->ee_hash); 1371 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1372 mdev->ee_hash = new_ee_hash; 1373 } 1374 1375 crypto_free_hash(mdev->cram_hmac_tfm); 1376 mdev->cram_hmac_tfm = tfm; 1377 1378 crypto_free_hash(mdev->integrity_w_tfm); 1379 mdev->integrity_w_tfm = integrity_w_tfm; 1380 1381 crypto_free_hash(mdev->integrity_r_tfm); 1382 mdev->integrity_r_tfm = integrity_r_tfm; 1383 1384 kfree(mdev->int_dig_out); 1385 kfree(mdev->int_dig_in); 1386 kfree(mdev->int_dig_vv); 1387 mdev->int_dig_out=int_dig_out; 1388 mdev->int_dig_in=int_dig_in; 1389 mdev->int_dig_vv=int_dig_vv; 1390 spin_unlock_irq(&mdev->req_lock); 1391 1392 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); 1393 1394 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1395 reply->ret_code = retcode; 1396 drbd_reconfig_done(mdev); 1397 return 0; 1398 1399fail: 1400 kfree(int_dig_out); 1401 kfree(int_dig_in); 1402 kfree(int_dig_vv); 1403 crypto_free_hash(tfm); 1404 crypto_free_hash(integrity_w_tfm); 1405 crypto_free_hash(integrity_r_tfm); 1406 kfree(new_tl_hash); 1407 kfree(new_ee_hash); 1408 kfree(new_conf); 1409 1410 reply->ret_code = retcode; 1411 drbd_reconfig_done(mdev); 1412 return 0; 1413} 1414 1415static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1416 struct drbd_nl_cfg_reply *reply) 1417{ 1418 int retcode; 1419 1420 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1421 1422 if (retcode == SS_NOTHING_TO_DO) 1423 goto done; 1424 else if (retcode == SS_ALREADY_STANDALONE) 1425 goto done; 1426 else if (retcode == SS_PRIMARY_NOP) { 1427 /* Our statche checking code wants to see the peer outdated. */ 1428 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1429 pdsk, D_OUTDATED)); 1430 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1431 /* The peer probably wants to see us outdated. */ 1432 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1433 disk, D_OUTDATED), 1434 CS_ORDERED); 1435 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1436 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1437 retcode = SS_SUCCESS; 1438 } 1439 } 1440 1441 if (retcode < SS_SUCCESS) 1442 goto fail; 1443 1444 if (wait_event_interruptible(mdev->state_wait, 1445 mdev->state.conn != C_DISCONNECTING)) { 1446 /* Do not test for mdev->state.conn == C_STANDALONE, since 1447 someone else might connect us in the mean time! */ 1448 retcode = ERR_INTR; 1449 goto fail; 1450 } 1451 1452 done: 1453 retcode = NO_ERROR; 1454 fail: 1455 drbd_md_sync(mdev); 1456 reply->ret_code = retcode; 1457 return 0; 1458} 1459 1460void resync_after_online_grow(struct drbd_conf *mdev) 1461{ 1462 int iass; /* I am sync source */ 1463 1464 dev_info(DEV, "Resync of new storage after online grow\n"); 1465 if (mdev->state.role != mdev->state.peer) 1466 iass = (mdev->state.role == R_PRIMARY); 1467 else 1468 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1469 1470 if (iass) 1471 drbd_start_resync(mdev, C_SYNC_SOURCE); 1472 else 1473 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1474} 1475 1476static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1477 struct drbd_nl_cfg_reply *reply) 1478{ 1479 struct resize rs; 1480 int retcode = NO_ERROR; 1481 int ldsc = 0; /* local disk size changed */ 1482 enum determine_dev_size dd; 1483 1484 memset(&rs, 0, sizeof(struct resize)); 1485 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1486 retcode = ERR_MANDATORY_TAG; 1487 goto fail; 1488 } 1489 1490 if (mdev->state.conn > C_CONNECTED) { 1491 retcode = ERR_RESIZE_RESYNC; 1492 goto fail; 1493 } 1494 1495 if (mdev->state.role == R_SECONDARY && 1496 mdev->state.peer == R_SECONDARY) { 1497 retcode = ERR_NO_PRIMARY; 1498 goto fail; 1499 } 1500 1501 if (!get_ldev(mdev)) { 1502 retcode = ERR_NO_DISK; 1503 goto fail; 1504 } 1505 1506 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 1507 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1508 ldsc = 1; 1509 } 1510 1511 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1512 dd = drbd_determin_dev_size(mdev, rs.resize_force); 1513 drbd_md_sync(mdev); 1514 put_ldev(mdev); 1515 if (dd == dev_size_error) { 1516 retcode = ERR_NOMEM_BITMAP; 1517 goto fail; 1518 } 1519 1520 if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) { 1521 if (dd == grew) 1522 set_bit(RESIZE_PENDING, &mdev->flags); 1523 1524 drbd_send_uuids(mdev); 1525 drbd_send_sizes(mdev, 1); 1526 } 1527 1528 fail: 1529 reply->ret_code = retcode; 1530 return 0; 1531} 1532 1533static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1534 struct drbd_nl_cfg_reply *reply) 1535{ 1536 int retcode = NO_ERROR; 1537 int err; 1538 int ovr; /* online verify running */ 1539 int rsr; /* re-sync running */ 1540 struct crypto_hash *verify_tfm = NULL; 1541 struct crypto_hash *csums_tfm = NULL; 1542 struct syncer_conf sc; 1543 cpumask_var_t new_cpu_mask; 1544 1545 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1546 retcode = ERR_NOMEM; 1547 goto fail; 1548 } 1549 1550 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1551 memset(&sc, 0, sizeof(struct syncer_conf)); 1552 sc.rate = DRBD_RATE_DEF; 1553 sc.after = DRBD_AFTER_DEF; 1554 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1555 } else 1556 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1557 1558 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1559 retcode = ERR_MANDATORY_TAG; 1560 goto fail; 1561 } 1562 1563 /* re-sync running */ 1564 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1565 mdev->state.conn == C_SYNC_TARGET || 1566 mdev->state.conn == C_PAUSED_SYNC_S || 1567 mdev->state.conn == C_PAUSED_SYNC_T ); 1568 1569 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1570 retcode = ERR_CSUMS_RESYNC_RUNNING; 1571 goto fail; 1572 } 1573 1574 if (!rsr && sc.csums_alg[0]) { 1575 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1576 if (IS_ERR(csums_tfm)) { 1577 csums_tfm = NULL; 1578 retcode = ERR_CSUMS_ALG; 1579 goto fail; 1580 } 1581 1582 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1583 retcode = ERR_CSUMS_ALG_ND; 1584 goto fail; 1585 } 1586 } 1587 1588 /* online verify running */ 1589 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1590 1591 if (ovr) { 1592 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1593 retcode = ERR_VERIFY_RUNNING; 1594 goto fail; 1595 } 1596 } 1597 1598 if (!ovr && sc.verify_alg[0]) { 1599 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1600 if (IS_ERR(verify_tfm)) { 1601 verify_tfm = NULL; 1602 retcode = ERR_VERIFY_ALG; 1603 goto fail; 1604 } 1605 1606 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1607 retcode = ERR_VERIFY_ALG_ND; 1608 goto fail; 1609 } 1610 } 1611 1612 /* silently ignore cpu mask on UP kernel */ 1613 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1614 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1615 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1616 if (err) { 1617 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1618 retcode = ERR_CPU_MASK_PARSE; 1619 goto fail; 1620 } 1621 } 1622 1623 ERR_IF (sc.rate < 1) sc.rate = 1; 1624 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1625#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1626 if (sc.al_extents > AL_MAX) { 1627 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1628 sc.al_extents = AL_MAX; 1629 } 1630#undef AL_MAX 1631 1632 /* most sanity checks done, try to assign the new sync-after 1633 * dependency. need to hold the global lock in there, 1634 * to avoid a race in the dependency loop check. */ 1635 retcode = drbd_alter_sa(mdev, sc.after); 1636 if (retcode != NO_ERROR) 1637 goto fail; 1638 1639 /* ok, assign the rest of it as well. 1640 * lock against receive_SyncParam() */ 1641 spin_lock(&mdev->peer_seq_lock); 1642 mdev->sync_conf = sc; 1643 1644 if (!rsr) { 1645 crypto_free_hash(mdev->csums_tfm); 1646 mdev->csums_tfm = csums_tfm; 1647 csums_tfm = NULL; 1648 } 1649 1650 if (!ovr) { 1651 crypto_free_hash(mdev->verify_tfm); 1652 mdev->verify_tfm = verify_tfm; 1653 verify_tfm = NULL; 1654 } 1655 spin_unlock(&mdev->peer_seq_lock); 1656 1657 if (get_ldev(mdev)) { 1658 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1659 drbd_al_shrink(mdev); 1660 err = drbd_check_al_size(mdev); 1661 lc_unlock(mdev->act_log); 1662 wake_up(&mdev->al_wait); 1663 1664 put_ldev(mdev); 1665 drbd_md_sync(mdev); 1666 1667 if (err) { 1668 retcode = ERR_NOMEM; 1669 goto fail; 1670 } 1671 } 1672 1673 if (mdev->state.conn >= C_CONNECTED) 1674 drbd_send_sync_param(mdev, &sc); 1675 1676 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1677 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1678 drbd_calc_cpu_mask(mdev); 1679 mdev->receiver.reset_cpu_mask = 1; 1680 mdev->asender.reset_cpu_mask = 1; 1681 mdev->worker.reset_cpu_mask = 1; 1682 } 1683 1684 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1685fail: 1686 free_cpumask_var(new_cpu_mask); 1687 crypto_free_hash(csums_tfm); 1688 crypto_free_hash(verify_tfm); 1689 reply->ret_code = retcode; 1690 return 0; 1691} 1692 1693static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1694 struct drbd_nl_cfg_reply *reply) 1695{ 1696 int retcode; 1697 1698 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1699 1700 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1701 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1702 1703 while (retcode == SS_NEED_CONNECTION) { 1704 spin_lock_irq(&mdev->req_lock); 1705 if (mdev->state.conn < C_CONNECTED) 1706 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1707 spin_unlock_irq(&mdev->req_lock); 1708 1709 if (retcode != SS_NEED_CONNECTION) 1710 break; 1711 1712 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1713 } 1714 1715 reply->ret_code = retcode; 1716 return 0; 1717} 1718 1719static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1720 struct drbd_nl_cfg_reply *reply) 1721{ 1722 1723 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1724 1725 return 0; 1726} 1727 1728static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1729 struct drbd_nl_cfg_reply *reply) 1730{ 1731 int retcode = NO_ERROR; 1732 1733 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1734 retcode = ERR_PAUSE_IS_SET; 1735 1736 reply->ret_code = retcode; 1737 return 0; 1738} 1739 1740static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1741 struct drbd_nl_cfg_reply *reply) 1742{ 1743 int retcode = NO_ERROR; 1744 1745 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1746 retcode = ERR_PAUSE_IS_CLEAR; 1747 1748 reply->ret_code = retcode; 1749 return 0; 1750} 1751 1752static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1753 struct drbd_nl_cfg_reply *reply) 1754{ 1755 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1756 1757 return 0; 1758} 1759 1760static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1761 struct drbd_nl_cfg_reply *reply) 1762{ 1763 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1764 return 0; 1765} 1766 1767static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1768 struct drbd_nl_cfg_reply *reply) 1769{ 1770 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1771 return 0; 1772} 1773 1774static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1775 struct drbd_nl_cfg_reply *reply) 1776{ 1777 unsigned short *tl; 1778 1779 tl = reply->tag_list; 1780 1781 if (get_ldev(mdev)) { 1782 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1783 put_ldev(mdev); 1784 } 1785 1786 if (get_net_conf(mdev)) { 1787 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1788 put_net_conf(mdev); 1789 } 1790 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1791 1792 put_unaligned(TT_END, tl++); /* Close the tag list */ 1793 1794 return (int)((char *)tl - (char *)reply->tag_list); 1795} 1796 1797static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1798 struct drbd_nl_cfg_reply *reply) 1799{ 1800 unsigned short *tl = reply->tag_list; 1801 union drbd_state s = mdev->state; 1802 unsigned long rs_left; 1803 unsigned int res; 1804 1805 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 1806 1807 /* no local ref, no bitmap, no syncer progress. */ 1808 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 1809 if (get_ldev(mdev)) { 1810 drbd_get_syncer_progress(mdev, &rs_left, &res); 1811 tl = tl_add_int(tl, T_sync_progress, &res); 1812 put_ldev(mdev); 1813 } 1814 } 1815 put_unaligned(TT_END, tl++); /* Close the tag list */ 1816 1817 return (int)((char *)tl - (char *)reply->tag_list); 1818} 1819 1820static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1821 struct drbd_nl_cfg_reply *reply) 1822{ 1823 unsigned short *tl; 1824 1825 tl = reply->tag_list; 1826 1827 if (get_ldev(mdev)) { 1828 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 1829 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 1830 put_ldev(mdev); 1831 } 1832 put_unaligned(TT_END, tl++); /* Close the tag list */ 1833 1834 return (int)((char *)tl - (char *)reply->tag_list); 1835} 1836 1837/** 1838 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 1839 * @mdev: DRBD device. 1840 * @nlp: Netlink/connector packet from drbdsetup 1841 * @reply: Reply packet for drbdsetup 1842 */ 1843static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1844 struct drbd_nl_cfg_reply *reply) 1845{ 1846 unsigned short *tl; 1847 char rv; 1848 1849 tl = reply->tag_list; 1850 1851 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 1852 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 1853 1854 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 1855 put_unaligned(TT_END, tl++); /* Close the tag list */ 1856 1857 return (int)((char *)tl - (char *)reply->tag_list); 1858} 1859 1860static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1861 struct drbd_nl_cfg_reply *reply) 1862{ 1863 /* default to resume from last known position, if possible */ 1864 struct start_ov args = 1865 { .start_sector = mdev->ov_start_sector }; 1866 1867 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 1868 reply->ret_code = ERR_MANDATORY_TAG; 1869 return 0; 1870 } 1871 /* w_make_ov_request expects position to be aligned */ 1872 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 1873 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 1874 return 0; 1875} 1876 1877 1878static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1879 struct drbd_nl_cfg_reply *reply) 1880{ 1881 int retcode = NO_ERROR; 1882 int skip_initial_sync = 0; 1883 int err; 1884 1885 struct new_c_uuid args; 1886 1887 memset(&args, 0, sizeof(struct new_c_uuid)); 1888 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 1889 reply->ret_code = ERR_MANDATORY_TAG; 1890 return 0; 1891 } 1892 1893 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 1894 1895 if (!get_ldev(mdev)) { 1896 retcode = ERR_NO_DISK; 1897 goto out; 1898 } 1899 1900 /* this is "skip initial sync", assume to be clean */ 1901 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 1902 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 1903 dev_info(DEV, "Preparing to skip initial sync\n"); 1904 skip_initial_sync = 1; 1905 } else if (mdev->state.conn != C_STANDALONE) { 1906 retcode = ERR_CONNECTED; 1907 goto out_dec; 1908 } 1909 1910 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 1911 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 1912 1913 if (args.clear_bm) { 1914 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 1915 if (err) { 1916 dev_err(DEV, "Writing bitmap failed with %d\n",err); 1917 retcode = ERR_IO_MD_DISK; 1918 } 1919 if (skip_initial_sync) { 1920 drbd_send_uuids_skip_initial_sync(mdev); 1921 _drbd_uuid_set(mdev, UI_BITMAP, 0); 1922 spin_lock_irq(&mdev->req_lock); 1923 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 1924 CS_VERBOSE, NULL); 1925 spin_unlock_irq(&mdev->req_lock); 1926 } 1927 } 1928 1929 drbd_md_sync(mdev); 1930out_dec: 1931 put_ldev(mdev); 1932out: 1933 mutex_unlock(&mdev->state_mutex); 1934 1935 reply->ret_code = retcode; 1936 return 0; 1937} 1938 1939static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) 1940{ 1941 struct drbd_conf *mdev; 1942 1943 if (nlp->drbd_minor >= minor_count) 1944 return NULL; 1945 1946 mdev = minor_to_mdev(nlp->drbd_minor); 1947 1948 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { 1949 struct gendisk *disk = NULL; 1950 mdev = drbd_new_device(nlp->drbd_minor); 1951 1952 spin_lock_irq(&drbd_pp_lock); 1953 if (minor_table[nlp->drbd_minor] == NULL) { 1954 minor_table[nlp->drbd_minor] = mdev; 1955 disk = mdev->vdisk; 1956 mdev = NULL; 1957 } /* else: we lost the race */ 1958 spin_unlock_irq(&drbd_pp_lock); 1959 1960 if (disk) /* we won the race above */ 1961 /* in case we ever add a drbd_delete_device(), 1962 * don't forget the del_gendisk! */ 1963 add_disk(disk); 1964 else /* we lost the race above */ 1965 drbd_free_mdev(mdev); 1966 1967 mdev = minor_to_mdev(nlp->drbd_minor); 1968 } 1969 1970 return mdev; 1971} 1972 1973struct cn_handler_struct { 1974 int (*function)(struct drbd_conf *, 1975 struct drbd_nl_cfg_req *, 1976 struct drbd_nl_cfg_reply *); 1977 int reply_body_size; 1978}; 1979 1980static struct cn_handler_struct cnd_table[] = { 1981 [ P_primary ] = { &drbd_nl_primary, 0 }, 1982 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 1983 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 1984 [ P_detach ] = { &drbd_nl_detach, 0 }, 1985 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 1986 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 1987 [ P_resize ] = { &drbd_nl_resize, 0 }, 1988 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 1989 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 1990 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 1991 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 1992 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 1993 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 1994 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 1995 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 1996 [ P_get_config ] = { &drbd_nl_get_config, 1997 sizeof(struct syncer_conf_tag_len_struct) + 1998 sizeof(struct disk_conf_tag_len_struct) + 1999 sizeof(struct net_conf_tag_len_struct) }, 2000 [ P_get_state ] = { &drbd_nl_get_state, 2001 sizeof(struct get_state_tag_len_struct) + 2002 sizeof(struct sync_progress_tag_len_struct) }, 2003 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2004 sizeof(struct get_uuids_tag_len_struct) }, 2005 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2006 sizeof(struct get_timeout_flag_tag_len_struct)}, 2007 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2008 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2009}; 2010 2011static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2012{ 2013 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2014 struct cn_handler_struct *cm; 2015 struct cn_msg *cn_reply; 2016 struct drbd_nl_cfg_reply *reply; 2017 struct drbd_conf *mdev; 2018 int retcode, rr; 2019 int reply_size = sizeof(struct cn_msg) 2020 + sizeof(struct drbd_nl_cfg_reply) 2021 + sizeof(short int); 2022 2023 if (!try_module_get(THIS_MODULE)) { 2024 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2025 return; 2026 } 2027 2028 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2029 retcode = ERR_PERM; 2030 goto fail; 2031 } 2032 2033 mdev = ensure_mdev(nlp); 2034 if (!mdev) { 2035 retcode = ERR_MINOR_INVALID; 2036 goto fail; 2037 } 2038 2039 if (nlp->packet_type >= P_nl_after_last_packet) { 2040 retcode = ERR_PACKET_NR; 2041 goto fail; 2042 } 2043 2044 cm = cnd_table + nlp->packet_type; 2045 2046 /* This may happen if packet number is 0: */ 2047 if (cm->function == NULL) { 2048 retcode = ERR_PACKET_NR; 2049 goto fail; 2050 } 2051 2052 reply_size += cm->reply_body_size; 2053 2054 /* allocation not in the IO path, cqueue thread context */ 2055 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2056 if (!cn_reply) { 2057 retcode = ERR_NOMEM; 2058 goto fail; 2059 } 2060 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2061 2062 reply->packet_type = 2063 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2064 reply->minor = nlp->drbd_minor; 2065 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2066 /* reply->tag_list; might be modified by cm->function. */ 2067 2068 rr = cm->function(mdev, nlp, reply); 2069 2070 cn_reply->id = req->id; 2071 cn_reply->seq = req->seq; 2072 cn_reply->ack = req->ack + 1; 2073 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2074 cn_reply->flags = 0; 2075 2076 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2077 if (rr && rr != -ESRCH) 2078 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2079 2080 kfree(cn_reply); 2081 module_put(THIS_MODULE); 2082 return; 2083 fail: 2084 drbd_nl_send_reply(req, retcode); 2085 module_put(THIS_MODULE); 2086} 2087 2088static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2089 2090static unsigned short * 2091__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2092 unsigned short len, int nul_terminated) 2093{ 2094 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2095 len = (len < l) ? len : l; 2096 put_unaligned(tag, tl++); 2097 put_unaligned(len, tl++); 2098 memcpy(tl, data, len); 2099 tl = (unsigned short*)((char*)tl + len); 2100 if (nul_terminated) 2101 *((char*)tl - 1) = 0; 2102 return tl; 2103} 2104 2105static unsigned short * 2106tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2107{ 2108 return __tl_add_blob(tl, tag, data, len, 0); 2109} 2110 2111static unsigned short * 2112tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2113{ 2114 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2115} 2116 2117static unsigned short * 2118tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2119{ 2120 put_unaligned(tag, tl++); 2121 switch(tag_type(tag)) { 2122 case TT_INTEGER: 2123 put_unaligned(sizeof(int), tl++); 2124 put_unaligned(*(int *)val, (int *)tl); 2125 tl = (unsigned short*)((char*)tl+sizeof(int)); 2126 break; 2127 case TT_INT64: 2128 put_unaligned(sizeof(u64), tl++); 2129 put_unaligned(*(u64 *)val, (u64 *)tl); 2130 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2131 break; 2132 default: 2133 /* someone did something stupid. */ 2134 ; 2135 } 2136 return tl; 2137} 2138 2139void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2140{ 2141 char buffer[sizeof(struct cn_msg)+ 2142 sizeof(struct drbd_nl_cfg_reply)+ 2143 sizeof(struct get_state_tag_len_struct)+ 2144 sizeof(short int)]; 2145 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2146 struct drbd_nl_cfg_reply *reply = 2147 (struct drbd_nl_cfg_reply *)cn_reply->data; 2148 unsigned short *tl = reply->tag_list; 2149 2150 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2151 2152 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2153 2154 put_unaligned(TT_END, tl++); /* Close the tag list */ 2155 2156 cn_reply->id.idx = CN_IDX_DRBD; 2157 cn_reply->id.val = CN_VAL_DRBD; 2158 2159 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2160 cn_reply->ack = 0; /* not used here. */ 2161 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2162 (int)((char *)tl - (char *)reply->tag_list); 2163 cn_reply->flags = 0; 2164 2165 reply->packet_type = P_get_state; 2166 reply->minor = mdev_to_minor(mdev); 2167 reply->ret_code = NO_ERROR; 2168 2169 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2170} 2171 2172void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2173{ 2174 char buffer[sizeof(struct cn_msg)+ 2175 sizeof(struct drbd_nl_cfg_reply)+ 2176 sizeof(struct call_helper_tag_len_struct)+ 2177 sizeof(short int)]; 2178 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2179 struct drbd_nl_cfg_reply *reply = 2180 (struct drbd_nl_cfg_reply *)cn_reply->data; 2181 unsigned short *tl = reply->tag_list; 2182 2183 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2184 2185 tl = tl_add_str(tl, T_helper, helper_name); 2186 put_unaligned(TT_END, tl++); /* Close the tag list */ 2187 2188 cn_reply->id.idx = CN_IDX_DRBD; 2189 cn_reply->id.val = CN_VAL_DRBD; 2190 2191 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2192 cn_reply->ack = 0; /* not used here. */ 2193 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2194 (int)((char *)tl - (char *)reply->tag_list); 2195 cn_reply->flags = 0; 2196 2197 reply->packet_type = P_call_helper; 2198 reply->minor = mdev_to_minor(mdev); 2199 reply->ret_code = NO_ERROR; 2200 2201 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2202} 2203 2204void drbd_bcast_ee(struct drbd_conf *mdev, 2205 const char *reason, const int dgs, 2206 const char* seen_hash, const char* calc_hash, 2207 const struct drbd_epoch_entry* e) 2208{ 2209 struct cn_msg *cn_reply; 2210 struct drbd_nl_cfg_reply *reply; 2211 struct bio_vec *bvec; 2212 unsigned short *tl; 2213 int i; 2214 2215 if (!e) 2216 return; 2217 if (!reason || !reason[0]) 2218 return; 2219 2220 /* apparently we have to memcpy twice, first to prepare the data for the 2221 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2222 * netlink skb. */ 2223 /* receiver thread context, which is not in the writeout path (of this node), 2224 * but may be in the writeout path of the _other_ node. 2225 * GFP_NOIO to avoid potential "distributed deadlock". */ 2226 cn_reply = kmalloc( 2227 sizeof(struct cn_msg)+ 2228 sizeof(struct drbd_nl_cfg_reply)+ 2229 sizeof(struct dump_ee_tag_len_struct)+ 2230 sizeof(short int), 2231 GFP_NOIO); 2232 2233 if (!cn_reply) { 2234 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2235 (unsigned long long)e->sector, e->size); 2236 return; 2237 } 2238 2239 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2240 tl = reply->tag_list; 2241 2242 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2243 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2244 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2245 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2246 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2247 2248 put_unaligned(T_ee_data, tl++); 2249 put_unaligned(e->size, tl++); 2250 2251 __bio_for_each_segment(bvec, e->private_bio, i, 0) { 2252 void *d = kmap(bvec->bv_page); 2253 memcpy(tl, d + bvec->bv_offset, bvec->bv_len); 2254 kunmap(bvec->bv_page); 2255 tl=(unsigned short*)((char*)tl + bvec->bv_len); 2256 } 2257 put_unaligned(TT_END, tl++); /* Close the tag list */ 2258 2259 cn_reply->id.idx = CN_IDX_DRBD; 2260 cn_reply->id.val = CN_VAL_DRBD; 2261 2262 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2263 cn_reply->ack = 0; // not used here. 2264 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2265 (int)((char*)tl - (char*)reply->tag_list); 2266 cn_reply->flags = 0; 2267 2268 reply->packet_type = P_dump_ee; 2269 reply->minor = mdev_to_minor(mdev); 2270 reply->ret_code = NO_ERROR; 2271 2272 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2273 kfree(cn_reply); 2274} 2275 2276void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2277{ 2278 char buffer[sizeof(struct cn_msg)+ 2279 sizeof(struct drbd_nl_cfg_reply)+ 2280 sizeof(struct sync_progress_tag_len_struct)+ 2281 sizeof(short int)]; 2282 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2283 struct drbd_nl_cfg_reply *reply = 2284 (struct drbd_nl_cfg_reply *)cn_reply->data; 2285 unsigned short *tl = reply->tag_list; 2286 unsigned long rs_left; 2287 unsigned int res; 2288 2289 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2290 if (!get_ldev(mdev)) 2291 return; 2292 drbd_get_syncer_progress(mdev, &rs_left, &res); 2293 put_ldev(mdev); 2294 2295 tl = tl_add_int(tl, T_sync_progress, &res); 2296 put_unaligned(TT_END, tl++); /* Close the tag list */ 2297 2298 cn_reply->id.idx = CN_IDX_DRBD; 2299 cn_reply->id.val = CN_VAL_DRBD; 2300 2301 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2302 cn_reply->ack = 0; /* not used here. */ 2303 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2304 (int)((char *)tl - (char *)reply->tag_list); 2305 cn_reply->flags = 0; 2306 2307 reply->packet_type = P_sync_progress; 2308 reply->minor = mdev_to_minor(mdev); 2309 reply->ret_code = NO_ERROR; 2310 2311 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2312} 2313 2314int __init drbd_nl_init(void) 2315{ 2316 static struct cb_id cn_id_drbd; 2317 int err, try=10; 2318 2319 cn_id_drbd.val = CN_VAL_DRBD; 2320 do { 2321 cn_id_drbd.idx = cn_idx; 2322 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2323 if (!err) 2324 break; 2325 cn_idx = (cn_idx + CN_IDX_STEP); 2326 } while (try--); 2327 2328 if (err) { 2329 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2330 return err; 2331 } 2332 2333 return 0; 2334} 2335 2336void drbd_nl_cleanup(void) 2337{ 2338 static struct cb_id cn_id_drbd; 2339 2340 cn_id_drbd.idx = cn_idx; 2341 cn_id_drbd.val = CN_VAL_DRBD; 2342 2343 cn_del_callback(&cn_id_drbd); 2344} 2345 2346void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2347{ 2348 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2349 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2350 struct drbd_nl_cfg_reply *reply = 2351 (struct drbd_nl_cfg_reply *)cn_reply->data; 2352 int rr; 2353 2354 cn_reply->id = req->id; 2355 2356 cn_reply->seq = req->seq; 2357 cn_reply->ack = req->ack + 1; 2358 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2359 cn_reply->flags = 0; 2360 2361 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2362 reply->ret_code = ret_code; 2363 2364 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2365 if (rr && rr != -ESRCH) 2366 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2367} 2368 2369