drbd_nl.c revision 894c6a946199cf91e52bc1864c3dc6529cceb3db
1/* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26#include <linux/module.h> 27#include <linux/drbd.h> 28#include <linux/in.h> 29#include <linux/fs.h> 30#include <linux/file.h> 31#include <linux/slab.h> 32#include <linux/connector.h> 33#include <linux/blkpg.h> 34#include <linux/cpumask.h> 35#include "drbd_int.h" 36#include "drbd_req.h" 37#include "drbd_wrappers.h" 38#include <asm/unaligned.h> 39#include <linux/drbd_tag_magic.h> 40#include <linux/drbd_limits.h> 41 42static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 43static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 44static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 45 46/* see get_sb_bdev and bd_claim */ 47static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 48 49/* Generate the tag_list to struct functions */ 50#define NL_PACKET(name, number, fields) \ 51static int name ## _from_tags(struct drbd_conf *mdev, \ 52 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 53static int name ## _from_tags(struct drbd_conf *mdev, \ 54 unsigned short *tags, struct name *arg) \ 55{ \ 56 int tag; \ 57 int dlen; \ 58 \ 59 while ((tag = get_unaligned(tags++)) != TT_END) { \ 60 dlen = get_unaligned(tags++); \ 61 switch (tag_number(tag)) { \ 62 fields \ 63 default: \ 64 if (tag & T_MANDATORY) { \ 65 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 66 return 0; \ 67 } \ 68 } \ 69 tags = (unsigned short *)((char *)tags + dlen); \ 70 } \ 71 return 1; \ 72} 73#define NL_INTEGER(pn, pr, member) \ 74 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 75 arg->member = get_unaligned((int *)(tags)); \ 76 break; 77#define NL_INT64(pn, pr, member) \ 78 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 79 arg->member = get_unaligned((u64 *)(tags)); \ 80 break; 81#define NL_BIT(pn, pr, member) \ 82 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 83 arg->member = *(char *)(tags) ? 1 : 0; \ 84 break; 85#define NL_STRING(pn, pr, member, len) \ 86 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 87 if (dlen > len) { \ 88 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 89 #member, dlen, (unsigned int)len); \ 90 return 0; \ 91 } \ 92 arg->member ## _len = dlen; \ 93 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 94 break; 95#include "linux/drbd_nl.h" 96 97/* Generate the struct to tag_list functions */ 98#define NL_PACKET(name, number, fields) \ 99static unsigned short* \ 100name ## _to_tags(struct drbd_conf *mdev, \ 101 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 102static unsigned short* \ 103name ## _to_tags(struct drbd_conf *mdev, \ 104 struct name *arg, unsigned short *tags) \ 105{ \ 106 fields \ 107 return tags; \ 108} 109 110#define NL_INTEGER(pn, pr, member) \ 111 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 112 put_unaligned(sizeof(int), tags++); \ 113 put_unaligned(arg->member, (int *)tags); \ 114 tags = (unsigned short *)((char *)tags+sizeof(int)); 115#define NL_INT64(pn, pr, member) \ 116 put_unaligned(pn | pr | TT_INT64, tags++); \ 117 put_unaligned(sizeof(u64), tags++); \ 118 put_unaligned(arg->member, (u64 *)tags); \ 119 tags = (unsigned short *)((char *)tags+sizeof(u64)); 120#define NL_BIT(pn, pr, member) \ 121 put_unaligned(pn | pr | TT_BIT, tags++); \ 122 put_unaligned(sizeof(char), tags++); \ 123 *(char *)tags = arg->member; \ 124 tags = (unsigned short *)((char *)tags+sizeof(char)); 125#define NL_STRING(pn, pr, member, len) \ 126 put_unaligned(pn | pr | TT_STRING, tags++); \ 127 put_unaligned(arg->member ## _len, tags++); \ 128 memcpy(tags, arg->member, arg->member ## _len); \ 129 tags = (unsigned short *)((char *)tags + arg->member ## _len); 130#include "linux/drbd_nl.h" 131 132void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 133void drbd_nl_send_reply(struct cn_msg *, int); 134 135int drbd_khelper(struct drbd_conf *mdev, char *cmd) 136{ 137 char *envp[] = { "HOME=/", 138 "TERM=linux", 139 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 140 NULL, /* Will be set to address family */ 141 NULL, /* Will be set to address */ 142 NULL }; 143 144 char mb[12], af[20], ad[60], *afs; 145 char *argv[] = {usermode_helper, cmd, mb, NULL }; 146 int ret; 147 148 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 149 150 if (get_net_conf(mdev)) { 151 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 152 case AF_INET6: 153 afs = "ipv6"; 154 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 155 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 156 break; 157 case AF_INET: 158 afs = "ipv4"; 159 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 160 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 161 break; 162 default: 163 afs = "ssocks"; 164 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 165 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 166 } 167 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 168 envp[3]=af; 169 envp[4]=ad; 170 put_net_conf(mdev); 171 } 172 173 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 174 175 drbd_bcast_ev_helper(mdev, cmd); 176 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 177 if (ret) 178 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 179 usermode_helper, cmd, mb, 180 (ret >> 8) & 0xff, ret); 181 else 182 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 183 usermode_helper, cmd, mb, 184 (ret >> 8) & 0xff, ret); 185 186 if (ret < 0) /* Ignore any ERRNOs we got. */ 187 ret = 0; 188 189 return ret; 190} 191 192enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 193{ 194 char *ex_to_string; 195 int r; 196 enum drbd_disk_state nps; 197 enum drbd_fencing_p fp; 198 199 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 200 201 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 202 fp = mdev->ldev->dc.fencing; 203 put_ldev(mdev); 204 } else { 205 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 206 return mdev->state.pdsk; 207 } 208 209 if (fp == FP_STONITH) 210 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); 211 212 r = drbd_khelper(mdev, "fence-peer"); 213 214 switch ((r>>8) & 0xff) { 215 case 3: /* peer is inconsistent */ 216 ex_to_string = "peer is inconsistent or worse"; 217 nps = D_INCONSISTENT; 218 break; 219 case 4: /* peer got outdated, or was already outdated */ 220 ex_to_string = "peer was fenced"; 221 nps = D_OUTDATED; 222 break; 223 case 5: /* peer was down */ 224 if (mdev->state.disk == D_UP_TO_DATE) { 225 /* we will(have) create(d) a new UUID anyways... */ 226 ex_to_string = "peer is unreachable, assumed to be dead"; 227 nps = D_OUTDATED; 228 } else { 229 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 230 nps = mdev->state.pdsk; 231 } 232 break; 233 case 6: /* Peer is primary, voluntarily outdate myself. 234 * This is useful when an unconnected R_SECONDARY is asked to 235 * become R_PRIMARY, but finds the other peer being active. */ 236 ex_to_string = "peer is active"; 237 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 238 nps = D_UNKNOWN; 239 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 240 break; 241 case 7: 242 if (fp != FP_STONITH) 243 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 244 ex_to_string = "peer was stonithed"; 245 nps = D_OUTDATED; 246 break; 247 default: 248 /* The script is broken ... */ 249 nps = D_UNKNOWN; 250 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 251 return nps; 252 } 253 254 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 255 (r>>8) & 0xff, ex_to_string); 256 return nps; 257} 258 259 260int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 261{ 262 const int max_tries = 4; 263 int r = 0; 264 int try = 0; 265 int forced = 0; 266 union drbd_state mask, val; 267 enum drbd_disk_state nps; 268 269 if (new_role == R_PRIMARY) 270 request_ping(mdev); /* Detect a dead peer ASAP */ 271 272 mutex_lock(&mdev->state_mutex); 273 274 mask.i = 0; mask.role = R_MASK; 275 val.i = 0; val.role = new_role; 276 277 while (try++ < max_tries) { 278 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 279 280 /* in case we first succeeded to outdate, 281 * but now suddenly could establish a connection */ 282 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 283 val.pdsk = 0; 284 mask.pdsk = 0; 285 continue; 286 } 287 288 if (r == SS_NO_UP_TO_DATE_DISK && force && 289 (mdev->state.disk < D_UP_TO_DATE && 290 mdev->state.disk >= D_INCONSISTENT)) { 291 mask.disk = D_MASK; 292 val.disk = D_UP_TO_DATE; 293 forced = 1; 294 continue; 295 } 296 297 if (r == SS_NO_UP_TO_DATE_DISK && 298 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 299 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 300 nps = drbd_try_outdate_peer(mdev); 301 302 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 303 val.disk = D_UP_TO_DATE; 304 mask.disk = D_MASK; 305 } 306 307 val.pdsk = nps; 308 mask.pdsk = D_MASK; 309 310 continue; 311 } 312 313 if (r == SS_NOTHING_TO_DO) 314 goto fail; 315 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 316 nps = drbd_try_outdate_peer(mdev); 317 318 if (force && nps > D_OUTDATED) { 319 dev_warn(DEV, "Forced into split brain situation!\n"); 320 nps = D_OUTDATED; 321 } 322 323 mask.pdsk = D_MASK; 324 val.pdsk = nps; 325 326 continue; 327 } 328 if (r == SS_TWO_PRIMARIES) { 329 /* Maybe the peer is detected as dead very soon... 330 retry at most once more in this case. */ 331 __set_current_state(TASK_INTERRUPTIBLE); 332 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 333 if (try < max_tries) 334 try = max_tries - 1; 335 continue; 336 } 337 if (r < SS_SUCCESS) { 338 r = _drbd_request_state(mdev, mask, val, 339 CS_VERBOSE + CS_WAIT_COMPLETE); 340 if (r < SS_SUCCESS) 341 goto fail; 342 } 343 break; 344 } 345 346 if (r < SS_SUCCESS) 347 goto fail; 348 349 if (forced) 350 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 351 352 /* Wait until nothing is on the fly :) */ 353 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 354 355 if (new_role == R_SECONDARY) { 356 set_disk_ro(mdev->vdisk, TRUE); 357 if (get_ldev(mdev)) { 358 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 359 put_ldev(mdev); 360 } 361 } else { 362 if (get_net_conf(mdev)) { 363 mdev->net_conf->want_lose = 0; 364 put_net_conf(mdev); 365 } 366 set_disk_ro(mdev->vdisk, FALSE); 367 if (get_ldev(mdev)) { 368 if (((mdev->state.conn < C_CONNECTED || 369 mdev->state.pdsk <= D_FAILED) 370 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 371 drbd_uuid_new_current(mdev); 372 373 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 374 put_ldev(mdev); 375 } 376 } 377 378 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 379 drbd_al_to_on_disk_bm(mdev); 380 put_ldev(mdev); 381 } 382 383 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 384 /* if this was forced, we should consider sync */ 385 if (forced) 386 drbd_send_uuids(mdev); 387 drbd_send_state(mdev); 388 } 389 390 drbd_md_sync(mdev); 391 392 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 393 fail: 394 mutex_unlock(&mdev->state_mutex); 395 return r; 396} 397 398 399static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 400 struct drbd_nl_cfg_reply *reply) 401{ 402 struct primary primary_args; 403 404 memset(&primary_args, 0, sizeof(struct primary)); 405 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 406 reply->ret_code = ERR_MANDATORY_TAG; 407 return 0; 408 } 409 410 reply->ret_code = 411 drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); 412 413 return 0; 414} 415 416static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 417 struct drbd_nl_cfg_reply *reply) 418{ 419 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 420 421 return 0; 422} 423 424/* initializes the md.*_offset members, so we are able to find 425 * the on disk meta data */ 426static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 427 struct drbd_backing_dev *bdev) 428{ 429 sector_t md_size_sect = 0; 430 switch (bdev->dc.meta_dev_idx) { 431 default: 432 /* v07 style fixed size indexed meta data */ 433 bdev->md.md_size_sect = MD_RESERVED_SECT; 434 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 435 bdev->md.al_offset = MD_AL_OFFSET; 436 bdev->md.bm_offset = MD_BM_OFFSET; 437 break; 438 case DRBD_MD_INDEX_FLEX_EXT: 439 /* just occupy the full device; unit: sectors */ 440 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 441 bdev->md.md_offset = 0; 442 bdev->md.al_offset = MD_AL_OFFSET; 443 bdev->md.bm_offset = MD_BM_OFFSET; 444 break; 445 case DRBD_MD_INDEX_INTERNAL: 446 case DRBD_MD_INDEX_FLEX_INT: 447 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 448 /* al size is still fixed */ 449 bdev->md.al_offset = -MD_AL_MAX_SIZE; 450 /* we need (slightly less than) ~ this much bitmap sectors: */ 451 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 452 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 453 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 454 md_size_sect = ALIGN(md_size_sect, 8); 455 456 /* plus the "drbd meta data super block", 457 * and the activity log; */ 458 md_size_sect += MD_BM_OFFSET; 459 460 bdev->md.md_size_sect = md_size_sect; 461 /* bitmap offset is adjusted by 'super' block size */ 462 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 463 break; 464 } 465} 466 467char *ppsize(char *buf, unsigned long long size) 468{ 469 /* Needs 9 bytes at max. */ 470 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 471 int base = 0; 472 while (size >= 10000) { 473 /* shift + round */ 474 size = (size >> 10) + !!(size & (1<<9)); 475 base++; 476 } 477 sprintf(buf, "%lu %cB", (long)size, units[base]); 478 479 return buf; 480} 481 482/* there is still a theoretical deadlock when called from receiver 483 * on an D_INCONSISTENT R_PRIMARY: 484 * remote READ does inc_ap_bio, receiver would need to receive answer 485 * packet from remote to dec_ap_bio again. 486 * receiver receive_sizes(), comes here, 487 * waits for ap_bio_cnt == 0. -> deadlock. 488 * but this cannot happen, actually, because: 489 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 490 * (not connected, or bad/no disk on peer): 491 * see drbd_fail_request_early, ap_bio_cnt is zero. 492 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 493 * peer may not initiate a resize. 494 */ 495void drbd_suspend_io(struct drbd_conf *mdev) 496{ 497 set_bit(SUSPEND_IO, &mdev->flags); 498 if (mdev->state.susp) 499 return; 500 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 501} 502 503void drbd_resume_io(struct drbd_conf *mdev) 504{ 505 clear_bit(SUSPEND_IO, &mdev->flags); 506 wake_up(&mdev->misc_wait); 507} 508 509/** 510 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 511 * @mdev: DRBD device. 512 * 513 * Returns 0 on success, negative return values indicate errors. 514 * You should call drbd_md_sync() after calling this function. 515 */ 516enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 517{ 518 sector_t prev_first_sect, prev_size; /* previous meta location */ 519 sector_t la_size; 520 sector_t size; 521 char ppb[10]; 522 523 int md_moved, la_size_changed; 524 enum determine_dev_size rv = unchanged; 525 526 /* race: 527 * application request passes inc_ap_bio, 528 * but then cannot get an AL-reference. 529 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 530 * 531 * to avoid that: 532 * Suspend IO right here. 533 * still lock the act_log to not trigger ASSERTs there. 534 */ 535 drbd_suspend_io(mdev); 536 537 /* no wait necessary anymore, actually we could assert that */ 538 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 539 540 prev_first_sect = drbd_md_first_sector(mdev->ldev); 541 prev_size = mdev->ldev->md.md_size_sect; 542 la_size = mdev->ldev->md.la_size_sect; 543 544 /* TODO: should only be some assert here, not (re)init... */ 545 drbd_md_set_sector_offsets(mdev, mdev->ldev); 546 547 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED); 548 549 if (drbd_get_capacity(mdev->this_bdev) != size || 550 drbd_bm_capacity(mdev) != size) { 551 int err; 552 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC)); 553 if (unlikely(err)) { 554 /* currently there is only one error: ENOMEM! */ 555 size = drbd_bm_capacity(mdev)>>1; 556 if (size == 0) { 557 dev_err(DEV, "OUT OF MEMORY! " 558 "Could not allocate bitmap!\n"); 559 } else { 560 dev_err(DEV, "BM resizing failed. " 561 "Leaving size unchanged at size = %lu KB\n", 562 (unsigned long)size); 563 } 564 rv = dev_size_error; 565 } 566 /* racy, see comments above. */ 567 drbd_set_my_capacity(mdev, size); 568 mdev->ldev->md.la_size_sect = size; 569 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 570 (unsigned long long)size>>1); 571 } 572 if (rv == dev_size_error) 573 goto out; 574 575 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 576 577 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 578 || prev_size != mdev->ldev->md.md_size_sect; 579 580 if (la_size_changed || md_moved) { 581 drbd_al_shrink(mdev); /* All extents inactive. */ 582 dev_info(DEV, "Writing the whole bitmap, %s\n", 583 la_size_changed && md_moved ? "size changed and md moved" : 584 la_size_changed ? "size changed" : "md moved"); 585 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 586 drbd_md_mark_dirty(mdev); 587 } 588 589 if (size > la_size) 590 rv = grew; 591 if (size < la_size) 592 rv = shrunk; 593out: 594 lc_unlock(mdev->act_log); 595 wake_up(&mdev->al_wait); 596 drbd_resume_io(mdev); 597 598 return rv; 599} 600 601sector_t 602drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space) 603{ 604 sector_t p_size = mdev->p_size; /* partner's disk size. */ 605 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 606 sector_t m_size; /* my size */ 607 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 608 sector_t size = 0; 609 610 m_size = drbd_get_max_capacity(bdev); 611 612 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) { 613 dev_warn(DEV, "Resize while not connected was forced by the user!\n"); 614 p_size = m_size; 615 } 616 617 if (p_size && m_size) { 618 size = min_t(sector_t, p_size, m_size); 619 } else { 620 if (la_size) { 621 size = la_size; 622 if (m_size && m_size < size) 623 size = m_size; 624 if (p_size && p_size < size) 625 size = p_size; 626 } else { 627 if (m_size) 628 size = m_size; 629 if (p_size) 630 size = p_size; 631 } 632 } 633 634 if (size == 0) 635 dev_err(DEV, "Both nodes diskless!\n"); 636 637 if (u_size) { 638 if (u_size > size) 639 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 640 (unsigned long)u_size>>1, (unsigned long)size>>1); 641 else 642 size = u_size; 643 } 644 645 return size; 646} 647 648/** 649 * drbd_check_al_size() - Ensures that the AL is of the right size 650 * @mdev: DRBD device. 651 * 652 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 653 * failed, and 0 on success. You should call drbd_md_sync() after you called 654 * this function. 655 */ 656static int drbd_check_al_size(struct drbd_conf *mdev) 657{ 658 struct lru_cache *n, *t; 659 struct lc_element *e; 660 unsigned int in_use; 661 int i; 662 663 ERR_IF(mdev->sync_conf.al_extents < 7) 664 mdev->sync_conf.al_extents = 127; 665 666 if (mdev->act_log && 667 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 668 return 0; 669 670 in_use = 0; 671 t = mdev->act_log; 672 n = lc_create("act_log", drbd_al_ext_cache, 673 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 674 675 if (n == NULL) { 676 dev_err(DEV, "Cannot allocate act_log lru!\n"); 677 return -ENOMEM; 678 } 679 spin_lock_irq(&mdev->al_lock); 680 if (t) { 681 for (i = 0; i < t->nr_elements; i++) { 682 e = lc_element_by_index(t, i); 683 if (e->refcnt) 684 dev_err(DEV, "refcnt(%d)==%d\n", 685 e->lc_number, e->refcnt); 686 in_use += e->refcnt; 687 } 688 } 689 if (!in_use) 690 mdev->act_log = n; 691 spin_unlock_irq(&mdev->al_lock); 692 if (in_use) { 693 dev_err(DEV, "Activity log still in use!\n"); 694 lc_destroy(n); 695 return -EBUSY; 696 } else { 697 if (t) 698 lc_destroy(t); 699 } 700 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 701 return 0; 702} 703 704void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 705{ 706 struct request_queue * const q = mdev->rq_queue; 707 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 708 int max_segments = mdev->ldev->dc.max_bio_bvecs; 709 710 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 711 712 blk_queue_max_hw_sectors(q, max_seg_s >> 9); 713 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 714 blk_queue_max_segment_size(q, max_seg_s); 715 blk_queue_logical_block_size(q, 512); 716 blk_queue_segment_boundary(q, PAGE_SIZE-1); 717 blk_stack_limits(&q->limits, &b->limits, 0); 718 719 if (b->merge_bvec_fn) 720 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 721 b->merge_bvec_fn); 722 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 723 724 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 725 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 726 q->backing_dev_info.ra_pages, 727 b->backing_dev_info.ra_pages); 728 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 729 } 730} 731 732/* serialize deconfig (worker exiting, doing cleanup) 733 * and reconfig (drbdsetup disk, drbdsetup net) 734 * 735 * wait for a potentially exiting worker, then restart it, 736 * or start a new one. 737 */ 738static void drbd_reconfig_start(struct drbd_conf *mdev) 739{ 740 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 741 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 742 drbd_thread_start(&mdev->worker); 743} 744 745/* if still unconfigured, stops worker again. 746 * if configured now, clears CONFIG_PENDING. 747 * wakes potential waiters */ 748static void drbd_reconfig_done(struct drbd_conf *mdev) 749{ 750 spin_lock_irq(&mdev->req_lock); 751 if (mdev->state.disk == D_DISKLESS && 752 mdev->state.conn == C_STANDALONE && 753 mdev->state.role == R_SECONDARY) { 754 set_bit(DEVICE_DYING, &mdev->flags); 755 drbd_thread_stop_nowait(&mdev->worker); 756 } else 757 clear_bit(CONFIG_PENDING, &mdev->flags); 758 spin_unlock_irq(&mdev->req_lock); 759 wake_up(&mdev->state_wait); 760} 761 762/* does always return 0; 763 * interesting return code is in reply->ret_code */ 764static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 765 struct drbd_nl_cfg_reply *reply) 766{ 767 enum drbd_ret_codes retcode; 768 enum determine_dev_size dd; 769 sector_t max_possible_sectors; 770 sector_t min_md_device_sectors; 771 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 772 struct inode *inode, *inode2; 773 struct lru_cache *resync_lru = NULL; 774 union drbd_state ns, os; 775 int rv; 776 int cp_discovered = 0; 777 int logical_block_size; 778 779 drbd_reconfig_start(mdev); 780 781 /* if you want to reconfigure, please tear down first */ 782 if (mdev->state.disk > D_DISKLESS) { 783 retcode = ERR_DISK_CONFIGURED; 784 goto fail; 785 } 786 787 /* allocation not in the IO path, cqueue thread context */ 788 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 789 if (!nbc) { 790 retcode = ERR_NOMEM; 791 goto fail; 792 } 793 794 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 795 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 796 nbc->dc.fencing = DRBD_FENCING_DEF; 797 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 798 799 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 800 retcode = ERR_MANDATORY_TAG; 801 goto fail; 802 } 803 804 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 805 retcode = ERR_MD_IDX_INVALID; 806 goto fail; 807 } 808 809 if (get_net_conf(mdev)) { 810 int prot = mdev->net_conf->wire_protocol; 811 put_net_conf(mdev); 812 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) { 813 retcode = ERR_STONITH_AND_PROT_A; 814 goto fail; 815 } 816 } 817 818 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 819 if (IS_ERR(nbc->lo_file)) { 820 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 821 PTR_ERR(nbc->lo_file)); 822 nbc->lo_file = NULL; 823 retcode = ERR_OPEN_DISK; 824 goto fail; 825 } 826 827 inode = nbc->lo_file->f_dentry->d_inode; 828 829 if (!S_ISBLK(inode->i_mode)) { 830 retcode = ERR_DISK_NOT_BDEV; 831 goto fail; 832 } 833 834 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 835 if (IS_ERR(nbc->md_file)) { 836 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 837 PTR_ERR(nbc->md_file)); 838 nbc->md_file = NULL; 839 retcode = ERR_OPEN_MD_DISK; 840 goto fail; 841 } 842 843 inode2 = nbc->md_file->f_dentry->d_inode; 844 845 if (!S_ISBLK(inode2->i_mode)) { 846 retcode = ERR_MD_NOT_BDEV; 847 goto fail; 848 } 849 850 nbc->backing_bdev = inode->i_bdev; 851 if (bd_claim(nbc->backing_bdev, mdev)) { 852 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 853 nbc->backing_bdev, mdev, 854 nbc->backing_bdev->bd_holder, 855 nbc->backing_bdev->bd_contains->bd_holder, 856 nbc->backing_bdev->bd_holders); 857 retcode = ERR_BDCLAIM_DISK; 858 goto fail; 859 } 860 861 resync_lru = lc_create("resync", drbd_bm_ext_cache, 862 61, sizeof(struct bm_extent), 863 offsetof(struct bm_extent, lce)); 864 if (!resync_lru) { 865 retcode = ERR_NOMEM; 866 goto release_bdev_fail; 867 } 868 869 /* meta_dev_idx >= 0: external fixed size, 870 * possibly multiple drbd sharing one meta device. 871 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 872 * not yet used by some other drbd minor! 873 * (if you use drbd.conf + drbdadm, 874 * that should check it for you already; but if you don't, or someone 875 * fooled it, we need to double check here) */ 876 nbc->md_bdev = inode2->i_bdev; 877 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 878 : (void *) drbd_m_holder)) { 879 retcode = ERR_BDCLAIM_MD_DISK; 880 goto release_bdev_fail; 881 } 882 883 if ((nbc->backing_bdev == nbc->md_bdev) != 884 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 885 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 886 retcode = ERR_MD_IDX_INVALID; 887 goto release_bdev2_fail; 888 } 889 890 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 891 drbd_md_set_sector_offsets(mdev, nbc); 892 893 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 894 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 895 (unsigned long long) drbd_get_max_capacity(nbc), 896 (unsigned long long) nbc->dc.disk_size); 897 retcode = ERR_DISK_TO_SMALL; 898 goto release_bdev2_fail; 899 } 900 901 if (nbc->dc.meta_dev_idx < 0) { 902 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 903 /* at least one MB, otherwise it does not make sense */ 904 min_md_device_sectors = (2<<10); 905 } else { 906 max_possible_sectors = DRBD_MAX_SECTORS; 907 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 908 } 909 910 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 911 retcode = ERR_MD_DISK_TO_SMALL; 912 dev_warn(DEV, "refusing attach: md-device too small, " 913 "at least %llu sectors needed for this meta-disk type\n", 914 (unsigned long long) min_md_device_sectors); 915 goto release_bdev2_fail; 916 } 917 918 /* Make sure the new disk is big enough 919 * (we may currently be R_PRIMARY with no local disk...) */ 920 if (drbd_get_max_capacity(nbc) < 921 drbd_get_capacity(mdev->this_bdev)) { 922 retcode = ERR_DISK_TO_SMALL; 923 goto release_bdev2_fail; 924 } 925 926 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 927 928 if (nbc->known_size > max_possible_sectors) { 929 dev_warn(DEV, "==> truncating very big lower level device " 930 "to currently maximum possible %llu sectors <==\n", 931 (unsigned long long) max_possible_sectors); 932 if (nbc->dc.meta_dev_idx >= 0) 933 dev_warn(DEV, "==>> using internal or flexible " 934 "meta data may help <<==\n"); 935 } 936 937 drbd_suspend_io(mdev); 938 /* also wait for the last barrier ack. */ 939 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); 940 /* and for any other previously queued work */ 941 drbd_flush_workqueue(mdev); 942 943 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 944 drbd_resume_io(mdev); 945 if (retcode < SS_SUCCESS) 946 goto release_bdev2_fail; 947 948 if (!get_ldev_if_state(mdev, D_ATTACHING)) 949 goto force_diskless; 950 951 drbd_md_set_sector_offsets(mdev, nbc); 952 953 /* allocate a second IO page if logical_block_size != 512 */ 954 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 955 if (logical_block_size == 0) 956 logical_block_size = MD_SECTOR_SIZE; 957 958 if (logical_block_size != MD_SECTOR_SIZE) { 959 if (!mdev->md_io_tmpp) { 960 struct page *page = alloc_page(GFP_NOIO); 961 if (!page) 962 goto force_diskless_dec; 963 964 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 965 logical_block_size, MD_SECTOR_SIZE); 966 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 967 968 mdev->md_io_tmpp = page; 969 } 970 } 971 972 if (!mdev->bitmap) { 973 if (drbd_bm_init(mdev)) { 974 retcode = ERR_NOMEM; 975 goto force_diskless_dec; 976 } 977 } 978 979 retcode = drbd_md_read(mdev, nbc); 980 if (retcode != NO_ERROR) 981 goto force_diskless_dec; 982 983 if (mdev->state.conn < C_CONNECTED && 984 mdev->state.role == R_PRIMARY && 985 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 986 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 987 (unsigned long long)mdev->ed_uuid); 988 retcode = ERR_DATA_NOT_CURRENT; 989 goto force_diskless_dec; 990 } 991 992 /* Since we are diskless, fix the activity log first... */ 993 if (drbd_check_al_size(mdev)) { 994 retcode = ERR_NOMEM; 995 goto force_diskless_dec; 996 } 997 998 /* Prevent shrinking of consistent devices ! */ 999 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 1000 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) { 1001 dev_warn(DEV, "refusing to truncate a consistent device\n"); 1002 retcode = ERR_DISK_TO_SMALL; 1003 goto force_diskless_dec; 1004 } 1005 1006 if (!drbd_al_read_log(mdev, nbc)) { 1007 retcode = ERR_IO_MD_DISK; 1008 goto force_diskless_dec; 1009 } 1010 1011 /* Reset the "barriers don't work" bits here, then force meta data to 1012 * be written, to ensure we determine if barriers are supported. */ 1013 if (nbc->dc.no_md_flush) 1014 set_bit(MD_NO_BARRIER, &mdev->flags); 1015 else 1016 clear_bit(MD_NO_BARRIER, &mdev->flags); 1017 1018 /* Point of no return reached. 1019 * Devices and memory are no longer released by error cleanup below. 1020 * now mdev takes over responsibility, and the state engine should 1021 * clean it up somewhere. */ 1022 D_ASSERT(mdev->ldev == NULL); 1023 mdev->ldev = nbc; 1024 mdev->resync = resync_lru; 1025 nbc = NULL; 1026 resync_lru = NULL; 1027 1028 mdev->write_ordering = WO_bio_barrier; 1029 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1030 1031 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1032 set_bit(CRASHED_PRIMARY, &mdev->flags); 1033 else 1034 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1035 1036 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1037 !(mdev->state.role == R_PRIMARY && mdev->state.susp && 1038 mdev->sync_conf.on_no_data == OND_SUSPEND_IO)) { 1039 set_bit(CRASHED_PRIMARY, &mdev->flags); 1040 cp_discovered = 1; 1041 } 1042 1043 mdev->send_cnt = 0; 1044 mdev->recv_cnt = 0; 1045 mdev->read_cnt = 0; 1046 mdev->writ_cnt = 0; 1047 1048 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); 1049 1050 /* If I am currently not R_PRIMARY, 1051 * but meta data primary indicator is set, 1052 * I just now recover from a hard crash, 1053 * and have been R_PRIMARY before that crash. 1054 * 1055 * Now, if I had no connection before that crash 1056 * (have been degraded R_PRIMARY), chances are that 1057 * I won't find my peer now either. 1058 * 1059 * In that case, and _only_ in that case, 1060 * we use the degr-wfc-timeout instead of the default, 1061 * so we can automatically recover from a crash of a 1062 * degraded but active "cluster" after a certain timeout. 1063 */ 1064 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1065 if (mdev->state.role != R_PRIMARY && 1066 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1067 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1068 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1069 1070 dd = drbd_determin_dev_size(mdev, 0); 1071 if (dd == dev_size_error) { 1072 retcode = ERR_NOMEM_BITMAP; 1073 goto force_diskless_dec; 1074 } else if (dd == grew) 1075 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1076 1077 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1078 dev_info(DEV, "Assuming that all blocks are out of sync " 1079 "(aka FullSync)\n"); 1080 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1081 retcode = ERR_IO_MD_DISK; 1082 goto force_diskless_dec; 1083 } 1084 } else { 1085 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1086 retcode = ERR_IO_MD_DISK; 1087 goto force_diskless_dec; 1088 } 1089 } 1090 1091 if (cp_discovered) { 1092 drbd_al_apply_to_bm(mdev); 1093 drbd_al_to_on_disk_bm(mdev); 1094 } 1095 1096 spin_lock_irq(&mdev->req_lock); 1097 os = mdev->state; 1098 ns.i = os.i; 1099 /* If MDF_CONSISTENT is not set go into inconsistent state, 1100 otherwise investigate MDF_WasUpToDate... 1101 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1102 otherwise into D_CONSISTENT state. 1103 */ 1104 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1105 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1106 ns.disk = D_CONSISTENT; 1107 else 1108 ns.disk = D_OUTDATED; 1109 } else { 1110 ns.disk = D_INCONSISTENT; 1111 } 1112 1113 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1114 ns.pdsk = D_OUTDATED; 1115 1116 if ( ns.disk == D_CONSISTENT && 1117 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1118 ns.disk = D_UP_TO_DATE; 1119 1120 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1121 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1122 this point, because drbd_request_state() modifies these 1123 flags. */ 1124 1125 /* In case we are C_CONNECTED postpone any decision on the new disk 1126 state after the negotiation phase. */ 1127 if (mdev->state.conn == C_CONNECTED) { 1128 mdev->new_state_tmp.i = ns.i; 1129 ns.i = os.i; 1130 ns.disk = D_NEGOTIATING; 1131 1132 /* We expect to receive up-to-date UUIDs soon. 1133 To avoid a race in receive_state, free p_uuid while 1134 holding req_lock. I.e. atomic with the state change */ 1135 kfree(mdev->p_uuid); 1136 mdev->p_uuid = NULL; 1137 } 1138 1139 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1140 ns = mdev->state; 1141 spin_unlock_irq(&mdev->req_lock); 1142 1143 if (rv < SS_SUCCESS) 1144 goto force_diskless_dec; 1145 1146 if (mdev->state.role == R_PRIMARY) 1147 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1148 else 1149 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1150 1151 drbd_md_mark_dirty(mdev); 1152 drbd_md_sync(mdev); 1153 1154 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1155 put_ldev(mdev); 1156 reply->ret_code = retcode; 1157 drbd_reconfig_done(mdev); 1158 return 0; 1159 1160 force_diskless_dec: 1161 put_ldev(mdev); 1162 force_diskless: 1163 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1164 drbd_md_sync(mdev); 1165 release_bdev2_fail: 1166 if (nbc) 1167 bd_release(nbc->md_bdev); 1168 release_bdev_fail: 1169 if (nbc) 1170 bd_release(nbc->backing_bdev); 1171 fail: 1172 if (nbc) { 1173 if (nbc->lo_file) 1174 fput(nbc->lo_file); 1175 if (nbc->md_file) 1176 fput(nbc->md_file); 1177 kfree(nbc); 1178 } 1179 lc_destroy(resync_lru); 1180 1181 reply->ret_code = retcode; 1182 drbd_reconfig_done(mdev); 1183 return 0; 1184} 1185 1186static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1187 struct drbd_nl_cfg_reply *reply) 1188{ 1189 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1190 return 0; 1191} 1192 1193static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1194 struct drbd_nl_cfg_reply *reply) 1195{ 1196 int i, ns; 1197 enum drbd_ret_codes retcode; 1198 struct net_conf *new_conf = NULL; 1199 struct crypto_hash *tfm = NULL; 1200 struct crypto_hash *integrity_w_tfm = NULL; 1201 struct crypto_hash *integrity_r_tfm = NULL; 1202 struct hlist_head *new_tl_hash = NULL; 1203 struct hlist_head *new_ee_hash = NULL; 1204 struct drbd_conf *odev; 1205 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1206 void *int_dig_out = NULL; 1207 void *int_dig_in = NULL; 1208 void *int_dig_vv = NULL; 1209 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1210 1211 drbd_reconfig_start(mdev); 1212 1213 if (mdev->state.conn > C_STANDALONE) { 1214 retcode = ERR_NET_CONFIGURED; 1215 goto fail; 1216 } 1217 1218 /* allocation not in the IO path, cqueue thread context */ 1219 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 1220 if (!new_conf) { 1221 retcode = ERR_NOMEM; 1222 goto fail; 1223 } 1224 1225 new_conf->timeout = DRBD_TIMEOUT_DEF; 1226 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1227 new_conf->ping_int = DRBD_PING_INT_DEF; 1228 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1229 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1230 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1231 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1232 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1233 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1234 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1235 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1236 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1237 new_conf->want_lose = 0; 1238 new_conf->two_primaries = 0; 1239 new_conf->wire_protocol = DRBD_PROT_C; 1240 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1241 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1242 1243 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1244 retcode = ERR_MANDATORY_TAG; 1245 goto fail; 1246 } 1247 1248 if (new_conf->two_primaries 1249 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1250 retcode = ERR_NOT_PROTO_C; 1251 goto fail; 1252 } 1253 1254 if (get_ldev(mdev)) { 1255 enum drbd_fencing_p fp = mdev->ldev->dc.fencing; 1256 put_ldev(mdev); 1257 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) { 1258 retcode = ERR_STONITH_AND_PROT_A; 1259 goto fail; 1260 } 1261 } 1262 1263 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1264 retcode = ERR_DISCARD; 1265 goto fail; 1266 } 1267 1268 retcode = NO_ERROR; 1269 1270 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1271 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1272 for (i = 0; i < minor_count; i++) { 1273 odev = minor_to_mdev(i); 1274 if (!odev || odev == mdev) 1275 continue; 1276 if (get_net_conf(odev)) { 1277 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1278 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1279 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1280 retcode = ERR_LOCAL_ADDR; 1281 1282 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1283 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1284 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1285 retcode = ERR_PEER_ADDR; 1286 1287 put_net_conf(odev); 1288 if (retcode != NO_ERROR) 1289 goto fail; 1290 } 1291 } 1292 1293 if (new_conf->cram_hmac_alg[0] != 0) { 1294 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1295 new_conf->cram_hmac_alg); 1296 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1297 if (IS_ERR(tfm)) { 1298 tfm = NULL; 1299 retcode = ERR_AUTH_ALG; 1300 goto fail; 1301 } 1302 1303 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 1304 retcode = ERR_AUTH_ALG_ND; 1305 goto fail; 1306 } 1307 } 1308 1309 if (new_conf->integrity_alg[0]) { 1310 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1311 if (IS_ERR(integrity_w_tfm)) { 1312 integrity_w_tfm = NULL; 1313 retcode=ERR_INTEGRITY_ALG; 1314 goto fail; 1315 } 1316 1317 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1318 retcode=ERR_INTEGRITY_ALG_ND; 1319 goto fail; 1320 } 1321 1322 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1323 if (IS_ERR(integrity_r_tfm)) { 1324 integrity_r_tfm = NULL; 1325 retcode=ERR_INTEGRITY_ALG; 1326 goto fail; 1327 } 1328 } 1329 1330 ns = new_conf->max_epoch_size/8; 1331 if (mdev->tl_hash_s != ns) { 1332 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1333 if (!new_tl_hash) { 1334 retcode = ERR_NOMEM; 1335 goto fail; 1336 } 1337 } 1338 1339 ns = new_conf->max_buffers/8; 1340 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1341 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1342 if (!new_ee_hash) { 1343 retcode = ERR_NOMEM; 1344 goto fail; 1345 } 1346 } 1347 1348 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1349 1350 if (integrity_w_tfm) { 1351 i = crypto_hash_digestsize(integrity_w_tfm); 1352 int_dig_out = kmalloc(i, GFP_KERNEL); 1353 if (!int_dig_out) { 1354 retcode = ERR_NOMEM; 1355 goto fail; 1356 } 1357 int_dig_in = kmalloc(i, GFP_KERNEL); 1358 if (!int_dig_in) { 1359 retcode = ERR_NOMEM; 1360 goto fail; 1361 } 1362 int_dig_vv = kmalloc(i, GFP_KERNEL); 1363 if (!int_dig_vv) { 1364 retcode = ERR_NOMEM; 1365 goto fail; 1366 } 1367 } 1368 1369 if (!mdev->bitmap) { 1370 if(drbd_bm_init(mdev)) { 1371 retcode = ERR_NOMEM; 1372 goto fail; 1373 } 1374 } 1375 1376 spin_lock_irq(&mdev->req_lock); 1377 if (mdev->net_conf != NULL) { 1378 retcode = ERR_NET_CONFIGURED; 1379 spin_unlock_irq(&mdev->req_lock); 1380 goto fail; 1381 } 1382 mdev->net_conf = new_conf; 1383 1384 mdev->send_cnt = 0; 1385 mdev->recv_cnt = 0; 1386 1387 if (new_tl_hash) { 1388 kfree(mdev->tl_hash); 1389 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1390 mdev->tl_hash = new_tl_hash; 1391 } 1392 1393 if (new_ee_hash) { 1394 kfree(mdev->ee_hash); 1395 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1396 mdev->ee_hash = new_ee_hash; 1397 } 1398 1399 crypto_free_hash(mdev->cram_hmac_tfm); 1400 mdev->cram_hmac_tfm = tfm; 1401 1402 crypto_free_hash(mdev->integrity_w_tfm); 1403 mdev->integrity_w_tfm = integrity_w_tfm; 1404 1405 crypto_free_hash(mdev->integrity_r_tfm); 1406 mdev->integrity_r_tfm = integrity_r_tfm; 1407 1408 kfree(mdev->int_dig_out); 1409 kfree(mdev->int_dig_in); 1410 kfree(mdev->int_dig_vv); 1411 mdev->int_dig_out=int_dig_out; 1412 mdev->int_dig_in=int_dig_in; 1413 mdev->int_dig_vv=int_dig_vv; 1414 spin_unlock_irq(&mdev->req_lock); 1415 1416 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); 1417 1418 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1419 reply->ret_code = retcode; 1420 drbd_reconfig_done(mdev); 1421 return 0; 1422 1423fail: 1424 kfree(int_dig_out); 1425 kfree(int_dig_in); 1426 kfree(int_dig_vv); 1427 crypto_free_hash(tfm); 1428 crypto_free_hash(integrity_w_tfm); 1429 crypto_free_hash(integrity_r_tfm); 1430 kfree(new_tl_hash); 1431 kfree(new_ee_hash); 1432 kfree(new_conf); 1433 1434 reply->ret_code = retcode; 1435 drbd_reconfig_done(mdev); 1436 return 0; 1437} 1438 1439static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1440 struct drbd_nl_cfg_reply *reply) 1441{ 1442 int retcode; 1443 1444 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1445 1446 if (retcode == SS_NOTHING_TO_DO) 1447 goto done; 1448 else if (retcode == SS_ALREADY_STANDALONE) 1449 goto done; 1450 else if (retcode == SS_PRIMARY_NOP) { 1451 /* Our statche checking code wants to see the peer outdated. */ 1452 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1453 pdsk, D_OUTDATED)); 1454 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1455 /* The peer probably wants to see us outdated. */ 1456 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1457 disk, D_OUTDATED), 1458 CS_ORDERED); 1459 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1460 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1461 retcode = SS_SUCCESS; 1462 } 1463 } 1464 1465 if (retcode < SS_SUCCESS) 1466 goto fail; 1467 1468 if (wait_event_interruptible(mdev->state_wait, 1469 mdev->state.conn != C_DISCONNECTING)) { 1470 /* Do not test for mdev->state.conn == C_STANDALONE, since 1471 someone else might connect us in the mean time! */ 1472 retcode = ERR_INTR; 1473 goto fail; 1474 } 1475 1476 done: 1477 retcode = NO_ERROR; 1478 fail: 1479 drbd_md_sync(mdev); 1480 reply->ret_code = retcode; 1481 return 0; 1482} 1483 1484void resync_after_online_grow(struct drbd_conf *mdev) 1485{ 1486 int iass; /* I am sync source */ 1487 1488 dev_info(DEV, "Resync of new storage after online grow\n"); 1489 if (mdev->state.role != mdev->state.peer) 1490 iass = (mdev->state.role == R_PRIMARY); 1491 else 1492 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1493 1494 if (iass) 1495 drbd_start_resync(mdev, C_SYNC_SOURCE); 1496 else 1497 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1498} 1499 1500static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1501 struct drbd_nl_cfg_reply *reply) 1502{ 1503 struct resize rs; 1504 int retcode = NO_ERROR; 1505 enum determine_dev_size dd; 1506 enum dds_flags ddsf; 1507 1508 memset(&rs, 0, sizeof(struct resize)); 1509 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1510 retcode = ERR_MANDATORY_TAG; 1511 goto fail; 1512 } 1513 1514 if (mdev->state.conn > C_CONNECTED) { 1515 retcode = ERR_RESIZE_RESYNC; 1516 goto fail; 1517 } 1518 1519 if (mdev->state.role == R_SECONDARY && 1520 mdev->state.peer == R_SECONDARY) { 1521 retcode = ERR_NO_PRIMARY; 1522 goto fail; 1523 } 1524 1525 if (!get_ldev(mdev)) { 1526 retcode = ERR_NO_DISK; 1527 goto fail; 1528 } 1529 1530 if (rs.no_resync && mdev->agreed_pro_version < 93) { 1531 retcode = ERR_NEED_APV_93; 1532 goto fail; 1533 } 1534 1535 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) 1536 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1537 1538 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1539 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1540 dd = drbd_determin_dev_size(mdev, ddsf); 1541 drbd_md_sync(mdev); 1542 put_ldev(mdev); 1543 if (dd == dev_size_error) { 1544 retcode = ERR_NOMEM_BITMAP; 1545 goto fail; 1546 } 1547 1548 if (mdev->state.conn == C_CONNECTED) { 1549 if (dd == grew) 1550 set_bit(RESIZE_PENDING, &mdev->flags); 1551 1552 drbd_send_uuids(mdev); 1553 drbd_send_sizes(mdev, 1, ddsf); 1554 } 1555 1556 fail: 1557 reply->ret_code = retcode; 1558 return 0; 1559} 1560 1561static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1562 struct drbd_nl_cfg_reply *reply) 1563{ 1564 int retcode = NO_ERROR; 1565 int err; 1566 int ovr; /* online verify running */ 1567 int rsr; /* re-sync running */ 1568 struct crypto_hash *verify_tfm = NULL; 1569 struct crypto_hash *csums_tfm = NULL; 1570 struct syncer_conf sc; 1571 cpumask_var_t new_cpu_mask; 1572 1573 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1574 retcode = ERR_NOMEM; 1575 goto fail; 1576 } 1577 1578 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1579 memset(&sc, 0, sizeof(struct syncer_conf)); 1580 sc.rate = DRBD_RATE_DEF; 1581 sc.after = DRBD_AFTER_DEF; 1582 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1583 sc.on_no_data = DRBD_ON_NO_DATA_DEF; 1584 } else 1585 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1586 1587 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1588 retcode = ERR_MANDATORY_TAG; 1589 goto fail; 1590 } 1591 1592 /* re-sync running */ 1593 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1594 mdev->state.conn == C_SYNC_TARGET || 1595 mdev->state.conn == C_PAUSED_SYNC_S || 1596 mdev->state.conn == C_PAUSED_SYNC_T ); 1597 1598 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1599 retcode = ERR_CSUMS_RESYNC_RUNNING; 1600 goto fail; 1601 } 1602 1603 if (!rsr && sc.csums_alg[0]) { 1604 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1605 if (IS_ERR(csums_tfm)) { 1606 csums_tfm = NULL; 1607 retcode = ERR_CSUMS_ALG; 1608 goto fail; 1609 } 1610 1611 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1612 retcode = ERR_CSUMS_ALG_ND; 1613 goto fail; 1614 } 1615 } 1616 1617 /* online verify running */ 1618 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1619 1620 if (ovr) { 1621 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1622 retcode = ERR_VERIFY_RUNNING; 1623 goto fail; 1624 } 1625 } 1626 1627 if (!ovr && sc.verify_alg[0]) { 1628 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1629 if (IS_ERR(verify_tfm)) { 1630 verify_tfm = NULL; 1631 retcode = ERR_VERIFY_ALG; 1632 goto fail; 1633 } 1634 1635 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1636 retcode = ERR_VERIFY_ALG_ND; 1637 goto fail; 1638 } 1639 } 1640 1641 /* silently ignore cpu mask on UP kernel */ 1642 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1643 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1644 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1645 if (err) { 1646 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1647 retcode = ERR_CPU_MASK_PARSE; 1648 goto fail; 1649 } 1650 } 1651 1652 ERR_IF (sc.rate < 1) sc.rate = 1; 1653 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1654#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1655 if (sc.al_extents > AL_MAX) { 1656 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1657 sc.al_extents = AL_MAX; 1658 } 1659#undef AL_MAX 1660 1661 /* most sanity checks done, try to assign the new sync-after 1662 * dependency. need to hold the global lock in there, 1663 * to avoid a race in the dependency loop check. */ 1664 retcode = drbd_alter_sa(mdev, sc.after); 1665 if (retcode != NO_ERROR) 1666 goto fail; 1667 1668 /* ok, assign the rest of it as well. 1669 * lock against receive_SyncParam() */ 1670 spin_lock(&mdev->peer_seq_lock); 1671 mdev->sync_conf = sc; 1672 1673 if (!rsr) { 1674 crypto_free_hash(mdev->csums_tfm); 1675 mdev->csums_tfm = csums_tfm; 1676 csums_tfm = NULL; 1677 } 1678 1679 if (!ovr) { 1680 crypto_free_hash(mdev->verify_tfm); 1681 mdev->verify_tfm = verify_tfm; 1682 verify_tfm = NULL; 1683 } 1684 spin_unlock(&mdev->peer_seq_lock); 1685 1686 if (get_ldev(mdev)) { 1687 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1688 drbd_al_shrink(mdev); 1689 err = drbd_check_al_size(mdev); 1690 lc_unlock(mdev->act_log); 1691 wake_up(&mdev->al_wait); 1692 1693 put_ldev(mdev); 1694 drbd_md_sync(mdev); 1695 1696 if (err) { 1697 retcode = ERR_NOMEM; 1698 goto fail; 1699 } 1700 } 1701 1702 if (mdev->state.conn >= C_CONNECTED) 1703 drbd_send_sync_param(mdev, &sc); 1704 1705 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1706 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1707 drbd_calc_cpu_mask(mdev); 1708 mdev->receiver.reset_cpu_mask = 1; 1709 mdev->asender.reset_cpu_mask = 1; 1710 mdev->worker.reset_cpu_mask = 1; 1711 } 1712 1713 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1714fail: 1715 free_cpumask_var(new_cpu_mask); 1716 crypto_free_hash(csums_tfm); 1717 crypto_free_hash(verify_tfm); 1718 reply->ret_code = retcode; 1719 return 0; 1720} 1721 1722static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1723 struct drbd_nl_cfg_reply *reply) 1724{ 1725 int retcode; 1726 1727 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1728 1729 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1730 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1731 1732 while (retcode == SS_NEED_CONNECTION) { 1733 spin_lock_irq(&mdev->req_lock); 1734 if (mdev->state.conn < C_CONNECTED) 1735 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1736 spin_unlock_irq(&mdev->req_lock); 1737 1738 if (retcode != SS_NEED_CONNECTION) 1739 break; 1740 1741 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1742 } 1743 1744 reply->ret_code = retcode; 1745 return 0; 1746} 1747 1748static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1749 struct drbd_nl_cfg_reply *reply) 1750{ 1751 1752 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1753 1754 return 0; 1755} 1756 1757static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1758 struct drbd_nl_cfg_reply *reply) 1759{ 1760 int retcode = NO_ERROR; 1761 1762 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1763 retcode = ERR_PAUSE_IS_SET; 1764 1765 reply->ret_code = retcode; 1766 return 0; 1767} 1768 1769static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1770 struct drbd_nl_cfg_reply *reply) 1771{ 1772 int retcode = NO_ERROR; 1773 1774 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1775 retcode = ERR_PAUSE_IS_CLEAR; 1776 1777 reply->ret_code = retcode; 1778 return 0; 1779} 1780 1781static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1782 struct drbd_nl_cfg_reply *reply) 1783{ 1784 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1785 1786 return 0; 1787} 1788 1789static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1790 struct drbd_nl_cfg_reply *reply) 1791{ 1792 drbd_suspend_io(mdev); 1793 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1794 if (reply->ret_code == SS_SUCCESS) { 1795 if (mdev->state.conn < C_CONNECTED) 1796 tl_clear(mdev); 1797 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) 1798 tl_restart(mdev, fail_frozen_disk_io); 1799 } 1800 drbd_resume_io(mdev); 1801 1802 return 0; 1803} 1804 1805static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1806 struct drbd_nl_cfg_reply *reply) 1807{ 1808 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1809 return 0; 1810} 1811 1812static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1813 struct drbd_nl_cfg_reply *reply) 1814{ 1815 unsigned short *tl; 1816 1817 tl = reply->tag_list; 1818 1819 if (get_ldev(mdev)) { 1820 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1821 put_ldev(mdev); 1822 } 1823 1824 if (get_net_conf(mdev)) { 1825 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1826 put_net_conf(mdev); 1827 } 1828 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1829 1830 put_unaligned(TT_END, tl++); /* Close the tag list */ 1831 1832 return (int)((char *)tl - (char *)reply->tag_list); 1833} 1834 1835static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1836 struct drbd_nl_cfg_reply *reply) 1837{ 1838 unsigned short *tl = reply->tag_list; 1839 union drbd_state s = mdev->state; 1840 unsigned long rs_left; 1841 unsigned int res; 1842 1843 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 1844 1845 /* no local ref, no bitmap, no syncer progress. */ 1846 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 1847 if (get_ldev(mdev)) { 1848 drbd_get_syncer_progress(mdev, &rs_left, &res); 1849 tl = tl_add_int(tl, T_sync_progress, &res); 1850 put_ldev(mdev); 1851 } 1852 } 1853 put_unaligned(TT_END, tl++); /* Close the tag list */ 1854 1855 return (int)((char *)tl - (char *)reply->tag_list); 1856} 1857 1858static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1859 struct drbd_nl_cfg_reply *reply) 1860{ 1861 unsigned short *tl; 1862 1863 tl = reply->tag_list; 1864 1865 if (get_ldev(mdev)) { 1866 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 1867 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 1868 put_ldev(mdev); 1869 } 1870 put_unaligned(TT_END, tl++); /* Close the tag list */ 1871 1872 return (int)((char *)tl - (char *)reply->tag_list); 1873} 1874 1875/** 1876 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 1877 * @mdev: DRBD device. 1878 * @nlp: Netlink/connector packet from drbdsetup 1879 * @reply: Reply packet for drbdsetup 1880 */ 1881static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1882 struct drbd_nl_cfg_reply *reply) 1883{ 1884 unsigned short *tl; 1885 char rv; 1886 1887 tl = reply->tag_list; 1888 1889 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 1890 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 1891 1892 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 1893 put_unaligned(TT_END, tl++); /* Close the tag list */ 1894 1895 return (int)((char *)tl - (char *)reply->tag_list); 1896} 1897 1898static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1899 struct drbd_nl_cfg_reply *reply) 1900{ 1901 /* default to resume from last known position, if possible */ 1902 struct start_ov args = 1903 { .start_sector = mdev->ov_start_sector }; 1904 1905 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 1906 reply->ret_code = ERR_MANDATORY_TAG; 1907 return 0; 1908 } 1909 /* w_make_ov_request expects position to be aligned */ 1910 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 1911 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 1912 return 0; 1913} 1914 1915 1916static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1917 struct drbd_nl_cfg_reply *reply) 1918{ 1919 int retcode = NO_ERROR; 1920 int skip_initial_sync = 0; 1921 int err; 1922 1923 struct new_c_uuid args; 1924 1925 memset(&args, 0, sizeof(struct new_c_uuid)); 1926 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 1927 reply->ret_code = ERR_MANDATORY_TAG; 1928 return 0; 1929 } 1930 1931 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 1932 1933 if (!get_ldev(mdev)) { 1934 retcode = ERR_NO_DISK; 1935 goto out; 1936 } 1937 1938 /* this is "skip initial sync", assume to be clean */ 1939 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 1940 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 1941 dev_info(DEV, "Preparing to skip initial sync\n"); 1942 skip_initial_sync = 1; 1943 } else if (mdev->state.conn != C_STANDALONE) { 1944 retcode = ERR_CONNECTED; 1945 goto out_dec; 1946 } 1947 1948 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 1949 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 1950 1951 if (args.clear_bm) { 1952 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 1953 if (err) { 1954 dev_err(DEV, "Writing bitmap failed with %d\n",err); 1955 retcode = ERR_IO_MD_DISK; 1956 } 1957 if (skip_initial_sync) { 1958 drbd_send_uuids_skip_initial_sync(mdev); 1959 _drbd_uuid_set(mdev, UI_BITMAP, 0); 1960 spin_lock_irq(&mdev->req_lock); 1961 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 1962 CS_VERBOSE, NULL); 1963 spin_unlock_irq(&mdev->req_lock); 1964 } 1965 } 1966 1967 drbd_md_sync(mdev); 1968out_dec: 1969 put_ldev(mdev); 1970out: 1971 mutex_unlock(&mdev->state_mutex); 1972 1973 reply->ret_code = retcode; 1974 return 0; 1975} 1976 1977static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) 1978{ 1979 struct drbd_conf *mdev; 1980 1981 if (nlp->drbd_minor >= minor_count) 1982 return NULL; 1983 1984 mdev = minor_to_mdev(nlp->drbd_minor); 1985 1986 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { 1987 struct gendisk *disk = NULL; 1988 mdev = drbd_new_device(nlp->drbd_minor); 1989 1990 spin_lock_irq(&drbd_pp_lock); 1991 if (minor_table[nlp->drbd_minor] == NULL) { 1992 minor_table[nlp->drbd_minor] = mdev; 1993 disk = mdev->vdisk; 1994 mdev = NULL; 1995 } /* else: we lost the race */ 1996 spin_unlock_irq(&drbd_pp_lock); 1997 1998 if (disk) /* we won the race above */ 1999 /* in case we ever add a drbd_delete_device(), 2000 * don't forget the del_gendisk! */ 2001 add_disk(disk); 2002 else /* we lost the race above */ 2003 drbd_free_mdev(mdev); 2004 2005 mdev = minor_to_mdev(nlp->drbd_minor); 2006 } 2007 2008 return mdev; 2009} 2010 2011struct cn_handler_struct { 2012 int (*function)(struct drbd_conf *, 2013 struct drbd_nl_cfg_req *, 2014 struct drbd_nl_cfg_reply *); 2015 int reply_body_size; 2016}; 2017 2018static struct cn_handler_struct cnd_table[] = { 2019 [ P_primary ] = { &drbd_nl_primary, 0 }, 2020 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 2021 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 2022 [ P_detach ] = { &drbd_nl_detach, 0 }, 2023 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 2024 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 2025 [ P_resize ] = { &drbd_nl_resize, 0 }, 2026 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 2027 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 2028 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 2029 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 2030 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 2031 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 2032 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 2033 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 2034 [ P_get_config ] = { &drbd_nl_get_config, 2035 sizeof(struct syncer_conf_tag_len_struct) + 2036 sizeof(struct disk_conf_tag_len_struct) + 2037 sizeof(struct net_conf_tag_len_struct) }, 2038 [ P_get_state ] = { &drbd_nl_get_state, 2039 sizeof(struct get_state_tag_len_struct) + 2040 sizeof(struct sync_progress_tag_len_struct) }, 2041 [ P_get_uuids ] = { &drbd_nl_get_uuids, 2042 sizeof(struct get_uuids_tag_len_struct) }, 2043 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2044 sizeof(struct get_timeout_flag_tag_len_struct)}, 2045 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2046 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2047}; 2048 2049static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2050{ 2051 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2052 struct cn_handler_struct *cm; 2053 struct cn_msg *cn_reply; 2054 struct drbd_nl_cfg_reply *reply; 2055 struct drbd_conf *mdev; 2056 int retcode, rr; 2057 int reply_size = sizeof(struct cn_msg) 2058 + sizeof(struct drbd_nl_cfg_reply) 2059 + sizeof(short int); 2060 2061 if (!try_module_get(THIS_MODULE)) { 2062 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2063 return; 2064 } 2065 2066 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2067 retcode = ERR_PERM; 2068 goto fail; 2069 } 2070 2071 mdev = ensure_mdev(nlp); 2072 if (!mdev) { 2073 retcode = ERR_MINOR_INVALID; 2074 goto fail; 2075 } 2076 2077 if (nlp->packet_type >= P_nl_after_last_packet) { 2078 retcode = ERR_PACKET_NR; 2079 goto fail; 2080 } 2081 2082 cm = cnd_table + nlp->packet_type; 2083 2084 /* This may happen if packet number is 0: */ 2085 if (cm->function == NULL) { 2086 retcode = ERR_PACKET_NR; 2087 goto fail; 2088 } 2089 2090 reply_size += cm->reply_body_size; 2091 2092 /* allocation not in the IO path, cqueue thread context */ 2093 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2094 if (!cn_reply) { 2095 retcode = ERR_NOMEM; 2096 goto fail; 2097 } 2098 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2099 2100 reply->packet_type = 2101 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2102 reply->minor = nlp->drbd_minor; 2103 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2104 /* reply->tag_list; might be modified by cm->function. */ 2105 2106 rr = cm->function(mdev, nlp, reply); 2107 2108 cn_reply->id = req->id; 2109 cn_reply->seq = req->seq; 2110 cn_reply->ack = req->ack + 1; 2111 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2112 cn_reply->flags = 0; 2113 2114 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2115 if (rr && rr != -ESRCH) 2116 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2117 2118 kfree(cn_reply); 2119 module_put(THIS_MODULE); 2120 return; 2121 fail: 2122 drbd_nl_send_reply(req, retcode); 2123 module_put(THIS_MODULE); 2124} 2125 2126static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2127 2128static unsigned short * 2129__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2130 unsigned short len, int nul_terminated) 2131{ 2132 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2133 len = (len < l) ? len : l; 2134 put_unaligned(tag, tl++); 2135 put_unaligned(len, tl++); 2136 memcpy(tl, data, len); 2137 tl = (unsigned short*)((char*)tl + len); 2138 if (nul_terminated) 2139 *((char*)tl - 1) = 0; 2140 return tl; 2141} 2142 2143static unsigned short * 2144tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2145{ 2146 return __tl_add_blob(tl, tag, data, len, 0); 2147} 2148 2149static unsigned short * 2150tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2151{ 2152 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2153} 2154 2155static unsigned short * 2156tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2157{ 2158 put_unaligned(tag, tl++); 2159 switch(tag_type(tag)) { 2160 case TT_INTEGER: 2161 put_unaligned(sizeof(int), tl++); 2162 put_unaligned(*(int *)val, (int *)tl); 2163 tl = (unsigned short*)((char*)tl+sizeof(int)); 2164 break; 2165 case TT_INT64: 2166 put_unaligned(sizeof(u64), tl++); 2167 put_unaligned(*(u64 *)val, (u64 *)tl); 2168 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2169 break; 2170 default: 2171 /* someone did something stupid. */ 2172 ; 2173 } 2174 return tl; 2175} 2176 2177void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2178{ 2179 char buffer[sizeof(struct cn_msg)+ 2180 sizeof(struct drbd_nl_cfg_reply)+ 2181 sizeof(struct get_state_tag_len_struct)+ 2182 sizeof(short int)]; 2183 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2184 struct drbd_nl_cfg_reply *reply = 2185 (struct drbd_nl_cfg_reply *)cn_reply->data; 2186 unsigned short *tl = reply->tag_list; 2187 2188 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2189 2190 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2191 2192 put_unaligned(TT_END, tl++); /* Close the tag list */ 2193 2194 cn_reply->id.idx = CN_IDX_DRBD; 2195 cn_reply->id.val = CN_VAL_DRBD; 2196 2197 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2198 cn_reply->ack = 0; /* not used here. */ 2199 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2200 (int)((char *)tl - (char *)reply->tag_list); 2201 cn_reply->flags = 0; 2202 2203 reply->packet_type = P_get_state; 2204 reply->minor = mdev_to_minor(mdev); 2205 reply->ret_code = NO_ERROR; 2206 2207 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2208} 2209 2210void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2211{ 2212 char buffer[sizeof(struct cn_msg)+ 2213 sizeof(struct drbd_nl_cfg_reply)+ 2214 sizeof(struct call_helper_tag_len_struct)+ 2215 sizeof(short int)]; 2216 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2217 struct drbd_nl_cfg_reply *reply = 2218 (struct drbd_nl_cfg_reply *)cn_reply->data; 2219 unsigned short *tl = reply->tag_list; 2220 2221 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2222 2223 tl = tl_add_str(tl, T_helper, helper_name); 2224 put_unaligned(TT_END, tl++); /* Close the tag list */ 2225 2226 cn_reply->id.idx = CN_IDX_DRBD; 2227 cn_reply->id.val = CN_VAL_DRBD; 2228 2229 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2230 cn_reply->ack = 0; /* not used here. */ 2231 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2232 (int)((char *)tl - (char *)reply->tag_list); 2233 cn_reply->flags = 0; 2234 2235 reply->packet_type = P_call_helper; 2236 reply->minor = mdev_to_minor(mdev); 2237 reply->ret_code = NO_ERROR; 2238 2239 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2240} 2241 2242void drbd_bcast_ee(struct drbd_conf *mdev, 2243 const char *reason, const int dgs, 2244 const char* seen_hash, const char* calc_hash, 2245 const struct drbd_epoch_entry* e) 2246{ 2247 struct cn_msg *cn_reply; 2248 struct drbd_nl_cfg_reply *reply; 2249 unsigned short *tl; 2250 struct page *page; 2251 unsigned len; 2252 2253 if (!e) 2254 return; 2255 if (!reason || !reason[0]) 2256 return; 2257 2258 /* apparently we have to memcpy twice, first to prepare the data for the 2259 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2260 * netlink skb. */ 2261 /* receiver thread context, which is not in the writeout path (of this node), 2262 * but may be in the writeout path of the _other_ node. 2263 * GFP_NOIO to avoid potential "distributed deadlock". */ 2264 cn_reply = kmalloc( 2265 sizeof(struct cn_msg)+ 2266 sizeof(struct drbd_nl_cfg_reply)+ 2267 sizeof(struct dump_ee_tag_len_struct)+ 2268 sizeof(short int), 2269 GFP_NOIO); 2270 2271 if (!cn_reply) { 2272 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2273 (unsigned long long)e->sector, e->size); 2274 return; 2275 } 2276 2277 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2278 tl = reply->tag_list; 2279 2280 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2281 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2282 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2283 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2284 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2285 2286 put_unaligned(T_ee_data, tl++); 2287 put_unaligned(e->size, tl++); 2288 2289 len = e->size; 2290 page = e->pages; 2291 page_chain_for_each(page) { 2292 void *d = kmap_atomic(page, KM_USER0); 2293 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2294 memcpy(tl, d, l); 2295 kunmap_atomic(d, KM_USER0); 2296 tl = (unsigned short*)((char*)tl + l); 2297 len -= l; 2298 } 2299 put_unaligned(TT_END, tl++); /* Close the tag list */ 2300 2301 cn_reply->id.idx = CN_IDX_DRBD; 2302 cn_reply->id.val = CN_VAL_DRBD; 2303 2304 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2305 cn_reply->ack = 0; // not used here. 2306 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2307 (int)((char*)tl - (char*)reply->tag_list); 2308 cn_reply->flags = 0; 2309 2310 reply->packet_type = P_dump_ee; 2311 reply->minor = mdev_to_minor(mdev); 2312 reply->ret_code = NO_ERROR; 2313 2314 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2315 kfree(cn_reply); 2316} 2317 2318void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2319{ 2320 char buffer[sizeof(struct cn_msg)+ 2321 sizeof(struct drbd_nl_cfg_reply)+ 2322 sizeof(struct sync_progress_tag_len_struct)+ 2323 sizeof(short int)]; 2324 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2325 struct drbd_nl_cfg_reply *reply = 2326 (struct drbd_nl_cfg_reply *)cn_reply->data; 2327 unsigned short *tl = reply->tag_list; 2328 unsigned long rs_left; 2329 unsigned int res; 2330 2331 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2332 if (!get_ldev(mdev)) 2333 return; 2334 drbd_get_syncer_progress(mdev, &rs_left, &res); 2335 put_ldev(mdev); 2336 2337 tl = tl_add_int(tl, T_sync_progress, &res); 2338 put_unaligned(TT_END, tl++); /* Close the tag list */ 2339 2340 cn_reply->id.idx = CN_IDX_DRBD; 2341 cn_reply->id.val = CN_VAL_DRBD; 2342 2343 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2344 cn_reply->ack = 0; /* not used here. */ 2345 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2346 (int)((char *)tl - (char *)reply->tag_list); 2347 cn_reply->flags = 0; 2348 2349 reply->packet_type = P_sync_progress; 2350 reply->minor = mdev_to_minor(mdev); 2351 reply->ret_code = NO_ERROR; 2352 2353 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2354} 2355 2356int __init drbd_nl_init(void) 2357{ 2358 static struct cb_id cn_id_drbd; 2359 int err, try=10; 2360 2361 cn_id_drbd.val = CN_VAL_DRBD; 2362 do { 2363 cn_id_drbd.idx = cn_idx; 2364 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2365 if (!err) 2366 break; 2367 cn_idx = (cn_idx + CN_IDX_STEP); 2368 } while (try--); 2369 2370 if (err) { 2371 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2372 return err; 2373 } 2374 2375 return 0; 2376} 2377 2378void drbd_nl_cleanup(void) 2379{ 2380 static struct cb_id cn_id_drbd; 2381 2382 cn_id_drbd.idx = cn_idx; 2383 cn_id_drbd.val = CN_VAL_DRBD; 2384 2385 cn_del_callback(&cn_id_drbd); 2386} 2387 2388void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2389{ 2390 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2391 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2392 struct drbd_nl_cfg_reply *reply = 2393 (struct drbd_nl_cfg_reply *)cn_reply->data; 2394 int rr; 2395 2396 cn_reply->id = req->id; 2397 2398 cn_reply->seq = req->seq; 2399 cn_reply->ack = req->ack + 1; 2400 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2401 cn_reply->flags = 0; 2402 2403 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2404 reply->ret_code = ret_code; 2405 2406 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2407 if (rr && rr != -ESRCH) 2408 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2409} 2410 2411