drbd_nl.c revision 3e3a7766c2e6995ac98e7855017abc3544d54e08
1/*
2   drbd_nl.c
3
4   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10   drbd is free software; you can redistribute it and/or modify
11   it under the terms of the GNU General Public License as published by
12   the Free Software Foundation; either version 2, or (at your option)
13   any later version.
14
15   drbd is distributed in the hope that it will be useful,
16   but WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   GNU General Public License for more details.
19
20   You should have received a copy of the GNU General Public License
21   along with drbd; see the file COPYING.  If not, write to
22   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
32#include <linux/connector.h>
33#include <linux/blkpg.h>
34#include <linux/cpumask.h>
35#include "drbd_int.h"
36#include "drbd_req.h"
37#include "drbd_wrappers.h"
38#include <asm/unaligned.h>
39#include <linux/drbd_tag_magic.h>
40#include <linux/drbd_limits.h>
41#include <linux/compiler.h>
42#include <linux/kthread.h>
43
44static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
45static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
46static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
47
48/* see get_sb_bdev and bd_claim */
49static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
50
51/* Generate the tag_list to struct functions */
52#define NL_PACKET(name, number, fields) \
53static int name ## _from_tags(struct drbd_conf *mdev, \
54	unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
55static int name ## _from_tags(struct drbd_conf *mdev, \
56	unsigned short *tags, struct name *arg) \
57{ \
58	int tag; \
59	int dlen; \
60	\
61	while ((tag = get_unaligned(tags++)) != TT_END) {	\
62		dlen = get_unaligned(tags++);			\
63		switch (tag_number(tag)) { \
64		fields \
65		default: \
66			if (tag & T_MANDATORY) { \
67				dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
68				return 0; \
69			} \
70		} \
71		tags = (unsigned short *)((char *)tags + dlen); \
72	} \
73	return 1; \
74}
75#define NL_INTEGER(pn, pr, member) \
76	case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
77		arg->member = get_unaligned((int *)(tags));	\
78		break;
79#define NL_INT64(pn, pr, member) \
80	case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
81		arg->member = get_unaligned((u64 *)(tags));	\
82		break;
83#define NL_BIT(pn, pr, member) \
84	case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
85		arg->member = *(char *)(tags) ? 1 : 0; \
86		break;
87#define NL_STRING(pn, pr, member, len) \
88	case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
89		if (dlen > len) { \
90			dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
91				#member, dlen, (unsigned int)len); \
92			return 0; \
93		} \
94		 arg->member ## _len = dlen; \
95		 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
96		 break;
97#include "linux/drbd_nl.h"
98
99/* Generate the struct to tag_list functions */
100#define NL_PACKET(name, number, fields) \
101static unsigned short* \
102name ## _to_tags(struct drbd_conf *mdev, \
103	struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
104static unsigned short* \
105name ## _to_tags(struct drbd_conf *mdev, \
106	struct name *arg, unsigned short *tags) \
107{ \
108	fields \
109	return tags; \
110}
111
112#define NL_INTEGER(pn, pr, member) \
113	put_unaligned(pn | pr | TT_INTEGER, tags++);	\
114	put_unaligned(sizeof(int), tags++);		\
115	put_unaligned(arg->member, (int *)tags);	\
116	tags = (unsigned short *)((char *)tags+sizeof(int));
117#define NL_INT64(pn, pr, member) \
118	put_unaligned(pn | pr | TT_INT64, tags++);	\
119	put_unaligned(sizeof(u64), tags++);		\
120	put_unaligned(arg->member, (u64 *)tags);	\
121	tags = (unsigned short *)((char *)tags+sizeof(u64));
122#define NL_BIT(pn, pr, member) \
123	put_unaligned(pn | pr | TT_BIT, tags++);	\
124	put_unaligned(sizeof(char), tags++);		\
125	*(char *)tags = arg->member; \
126	tags = (unsigned short *)((char *)tags+sizeof(char));
127#define NL_STRING(pn, pr, member, len) \
128	put_unaligned(pn | pr | TT_STRING, tags++);	\
129	put_unaligned(arg->member ## _len, tags++);	\
130	memcpy(tags, arg->member, arg->member ## _len); \
131	tags = (unsigned short *)((char *)tags + arg->member ## _len);
132#include "linux/drbd_nl.h"
133
134void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
135void drbd_nl_send_reply(struct cn_msg *, int);
136
137int drbd_khelper(struct drbd_conf *mdev, char *cmd)
138{
139	char *envp[] = { "HOME=/",
140			"TERM=linux",
141			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
142			NULL, /* Will be set to address family */
143			NULL, /* Will be set to address */
144			NULL };
145
146	char mb[12], af[20], ad[60], *afs;
147	char *argv[] = {usermode_helper, cmd, mb, NULL };
148	int ret;
149
150	snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
151
152	if (get_net_conf(mdev)) {
153		switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
154		case AF_INET6:
155			afs = "ipv6";
156			snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
157				 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
158			break;
159		case AF_INET:
160			afs = "ipv4";
161			snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
162				 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
163			break;
164		default:
165			afs = "ssocks";
166			snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
167				 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
168		}
169		snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
170		envp[3]=af;
171		envp[4]=ad;
172		put_net_conf(mdev);
173	}
174
175	/* The helper may take some time.
176	 * write out any unsynced meta data changes now */
177	drbd_md_sync(mdev);
178
179	dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
180
181	drbd_bcast_ev_helper(mdev, cmd);
182	ret = call_usermodehelper(usermode_helper, argv, envp, 1);
183	if (ret)
184		dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
185				usermode_helper, cmd, mb,
186				(ret >> 8) & 0xff, ret);
187	else
188		dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
189				usermode_helper, cmd, mb,
190				(ret >> 8) & 0xff, ret);
191
192	if (ret < 0) /* Ignore any ERRNOs we got. */
193		ret = 0;
194
195	return ret;
196}
197
198enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
199{
200	char *ex_to_string;
201	int r;
202	enum drbd_disk_state nps;
203	enum drbd_fencing_p fp;
204
205	D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
206
207	if (get_ldev_if_state(mdev, D_CONSISTENT)) {
208		fp = mdev->ldev->dc.fencing;
209		put_ldev(mdev);
210	} else {
211		dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
212		nps = mdev->state.pdsk;
213		goto out;
214	}
215
216	r = drbd_khelper(mdev, "fence-peer");
217
218	switch ((r>>8) & 0xff) {
219	case 3: /* peer is inconsistent */
220		ex_to_string = "peer is inconsistent or worse";
221		nps = D_INCONSISTENT;
222		break;
223	case 4: /* peer got outdated, or was already outdated */
224		ex_to_string = "peer was fenced";
225		nps = D_OUTDATED;
226		break;
227	case 5: /* peer was down */
228		if (mdev->state.disk == D_UP_TO_DATE) {
229			/* we will(have) create(d) a new UUID anyways... */
230			ex_to_string = "peer is unreachable, assumed to be dead";
231			nps = D_OUTDATED;
232		} else {
233			ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
234			nps = mdev->state.pdsk;
235		}
236		break;
237	case 6: /* Peer is primary, voluntarily outdate myself.
238		 * This is useful when an unconnected R_SECONDARY is asked to
239		 * become R_PRIMARY, but finds the other peer being active. */
240		ex_to_string = "peer is active";
241		dev_warn(DEV, "Peer is primary, outdating myself.\n");
242		nps = D_UNKNOWN;
243		_drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
244		break;
245	case 7:
246		if (fp != FP_STONITH)
247			dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
248		ex_to_string = "peer was stonithed";
249		nps = D_OUTDATED;
250		break;
251	default:
252		/* The script is broken ... */
253		nps = D_UNKNOWN;
254		dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
255		return nps;
256	}
257
258	dev_info(DEV, "fence-peer helper returned %d (%s)\n",
259			(r>>8) & 0xff, ex_to_string);
260
261out:
262	if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
263		/* The handler was not successful... unfreeze here, the
264		   state engine can not unfreeze... */
265		_drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
266	}
267
268	return nps;
269}
270
271static int _try_outdate_peer_async(void *data)
272{
273	struct drbd_conf *mdev = (struct drbd_conf *)data;
274	enum drbd_disk_state nps;
275
276	nps = drbd_try_outdate_peer(mdev);
277	drbd_request_state(mdev, NS(pdsk, nps));
278
279	return 0;
280}
281
282void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
283{
284	struct task_struct *opa;
285
286	opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
287	if (IS_ERR(opa))
288		dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
289}
290
291int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
292{
293	const int max_tries = 4;
294	int r = 0;
295	int try = 0;
296	int forced = 0;
297	union drbd_state mask, val;
298	enum drbd_disk_state nps;
299
300	if (new_role == R_PRIMARY)
301		request_ping(mdev); /* Detect a dead peer ASAP */
302
303	mutex_lock(&mdev->state_mutex);
304
305	mask.i = 0; mask.role = R_MASK;
306	val.i  = 0; val.role  = new_role;
307
308	while (try++ < max_tries) {
309		r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
310
311		/* in case we first succeeded to outdate,
312		 * but now suddenly could establish a connection */
313		if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
314			val.pdsk = 0;
315			mask.pdsk = 0;
316			continue;
317		}
318
319		if (r == SS_NO_UP_TO_DATE_DISK && force &&
320		    (mdev->state.disk < D_UP_TO_DATE &&
321		     mdev->state.disk >= D_INCONSISTENT)) {
322			mask.disk = D_MASK;
323			val.disk  = D_UP_TO_DATE;
324			forced = 1;
325			continue;
326		}
327
328		if (r == SS_NO_UP_TO_DATE_DISK &&
329		    mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
330			D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
331			nps = drbd_try_outdate_peer(mdev);
332
333			if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
334				val.disk = D_UP_TO_DATE;
335				mask.disk = D_MASK;
336			}
337
338			val.pdsk = nps;
339			mask.pdsk = D_MASK;
340
341			continue;
342		}
343
344		if (r == SS_NOTHING_TO_DO)
345			goto fail;
346		if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
347			nps = drbd_try_outdate_peer(mdev);
348
349			if (force && nps > D_OUTDATED) {
350				dev_warn(DEV, "Forced into split brain situation!\n");
351				nps = D_OUTDATED;
352			}
353
354			mask.pdsk = D_MASK;
355			val.pdsk  = nps;
356
357			continue;
358		}
359		if (r == SS_TWO_PRIMARIES) {
360			/* Maybe the peer is detected as dead very soon...
361			   retry at most once more in this case. */
362			__set_current_state(TASK_INTERRUPTIBLE);
363			schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
364			if (try < max_tries)
365				try = max_tries - 1;
366			continue;
367		}
368		if (r < SS_SUCCESS) {
369			r = _drbd_request_state(mdev, mask, val,
370						CS_VERBOSE + CS_WAIT_COMPLETE);
371			if (r < SS_SUCCESS)
372				goto fail;
373		}
374		break;
375	}
376
377	if (r < SS_SUCCESS)
378		goto fail;
379
380	if (forced)
381		dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
382
383	/* Wait until nothing is on the fly :) */
384	wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
385
386	if (new_role == R_SECONDARY) {
387		set_disk_ro(mdev->vdisk, TRUE);
388		if (get_ldev(mdev)) {
389			mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
390			put_ldev(mdev);
391		}
392	} else {
393		if (get_net_conf(mdev)) {
394			mdev->net_conf->want_lose = 0;
395			put_net_conf(mdev);
396		}
397		set_disk_ro(mdev->vdisk, FALSE);
398		if (get_ldev(mdev)) {
399			if (((mdev->state.conn < C_CONNECTED ||
400			       mdev->state.pdsk <= D_FAILED)
401			      && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
402				drbd_uuid_new_current(mdev);
403
404			mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
405			put_ldev(mdev);
406		}
407	}
408
409	if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
410		drbd_al_to_on_disk_bm(mdev);
411		put_ldev(mdev);
412	}
413
414	if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
415		/* if this was forced, we should consider sync */
416		if (forced)
417			drbd_send_uuids(mdev);
418		drbd_send_state(mdev);
419	}
420
421	drbd_md_sync(mdev);
422
423	kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
424 fail:
425	mutex_unlock(&mdev->state_mutex);
426	return r;
427}
428
429static struct drbd_conf *ensure_mdev(int minor, int create)
430{
431	struct drbd_conf *mdev;
432
433	if (minor >= minor_count)
434		return NULL;
435
436	mdev = minor_to_mdev(minor);
437
438	if (!mdev && create) {
439		struct gendisk *disk = NULL;
440		mdev = drbd_new_device(minor);
441
442		spin_lock_irq(&drbd_pp_lock);
443		if (minor_table[minor] == NULL) {
444			minor_table[minor] = mdev;
445			disk = mdev->vdisk;
446			mdev = NULL;
447		} /* else: we lost the race */
448		spin_unlock_irq(&drbd_pp_lock);
449
450		if (disk) /* we won the race above */
451			/* in case we ever add a drbd_delete_device(),
452			 * don't forget the del_gendisk! */
453			add_disk(disk);
454		else /* we lost the race above */
455			drbd_free_mdev(mdev);
456
457		mdev = minor_to_mdev(minor);
458	}
459
460	return mdev;
461}
462
463static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
464			   struct drbd_nl_cfg_reply *reply)
465{
466	struct primary primary_args;
467
468	memset(&primary_args, 0, sizeof(struct primary));
469	if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
470		reply->ret_code = ERR_MANDATORY_TAG;
471		return 0;
472	}
473
474	reply->ret_code =
475		drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
476
477	return 0;
478}
479
480static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
481			     struct drbd_nl_cfg_reply *reply)
482{
483	reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
484
485	return 0;
486}
487
488/* initializes the md.*_offset members, so we are able to find
489 * the on disk meta data */
490static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
491				       struct drbd_backing_dev *bdev)
492{
493	sector_t md_size_sect = 0;
494	switch (bdev->dc.meta_dev_idx) {
495	default:
496		/* v07 style fixed size indexed meta data */
497		bdev->md.md_size_sect = MD_RESERVED_SECT;
498		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
499		bdev->md.al_offset = MD_AL_OFFSET;
500		bdev->md.bm_offset = MD_BM_OFFSET;
501		break;
502	case DRBD_MD_INDEX_FLEX_EXT:
503		/* just occupy the full device; unit: sectors */
504		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
505		bdev->md.md_offset = 0;
506		bdev->md.al_offset = MD_AL_OFFSET;
507		bdev->md.bm_offset = MD_BM_OFFSET;
508		break;
509	case DRBD_MD_INDEX_INTERNAL:
510	case DRBD_MD_INDEX_FLEX_INT:
511		bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
512		/* al size is still fixed */
513		bdev->md.al_offset = -MD_AL_MAX_SIZE;
514		/* we need (slightly less than) ~ this much bitmap sectors: */
515		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
516		md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
517		md_size_sect = BM_SECT_TO_EXT(md_size_sect);
518		md_size_sect = ALIGN(md_size_sect, 8);
519
520		/* plus the "drbd meta data super block",
521		 * and the activity log; */
522		md_size_sect += MD_BM_OFFSET;
523
524		bdev->md.md_size_sect = md_size_sect;
525		/* bitmap offset is adjusted by 'super' block size */
526		bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
527		break;
528	}
529}
530
531char *ppsize(char *buf, unsigned long long size)
532{
533	/* Needs 9 bytes at max. */
534	static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
535	int base = 0;
536	while (size >= 10000) {
537		/* shift + round */
538		size = (size >> 10) + !!(size & (1<<9));
539		base++;
540	}
541	sprintf(buf, "%lu %cB", (long)size, units[base]);
542
543	return buf;
544}
545
546/* there is still a theoretical deadlock when called from receiver
547 * on an D_INCONSISTENT R_PRIMARY:
548 *  remote READ does inc_ap_bio, receiver would need to receive answer
549 *  packet from remote to dec_ap_bio again.
550 *  receiver receive_sizes(), comes here,
551 *  waits for ap_bio_cnt == 0. -> deadlock.
552 * but this cannot happen, actually, because:
553 *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
554 *  (not connected, or bad/no disk on peer):
555 *  see drbd_fail_request_early, ap_bio_cnt is zero.
556 *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
557 *  peer may not initiate a resize.
558 */
559void drbd_suspend_io(struct drbd_conf *mdev)
560{
561	set_bit(SUSPEND_IO, &mdev->flags);
562	if (is_susp(mdev->state))
563		return;
564	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
565}
566
567void drbd_resume_io(struct drbd_conf *mdev)
568{
569	clear_bit(SUSPEND_IO, &mdev->flags);
570	wake_up(&mdev->misc_wait);
571}
572
573/**
574 * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
575 * @mdev:	DRBD device.
576 *
577 * Returns 0 on success, negative return values indicate errors.
578 * You should call drbd_md_sync() after calling this function.
579 */
580enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
581{
582	sector_t prev_first_sect, prev_size; /* previous meta location */
583	sector_t la_size;
584	sector_t size;
585	char ppb[10];
586
587	int md_moved, la_size_changed;
588	enum determine_dev_size rv = unchanged;
589
590	/* race:
591	 * application request passes inc_ap_bio,
592	 * but then cannot get an AL-reference.
593	 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
594	 *
595	 * to avoid that:
596	 * Suspend IO right here.
597	 * still lock the act_log to not trigger ASSERTs there.
598	 */
599	drbd_suspend_io(mdev);
600
601	/* no wait necessary anymore, actually we could assert that */
602	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
603
604	prev_first_sect = drbd_md_first_sector(mdev->ldev);
605	prev_size = mdev->ldev->md.md_size_sect;
606	la_size = mdev->ldev->md.la_size_sect;
607
608	/* TODO: should only be some assert here, not (re)init... */
609	drbd_md_set_sector_offsets(mdev, mdev->ldev);
610
611	size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
612
613	if (drbd_get_capacity(mdev->this_bdev) != size ||
614	    drbd_bm_capacity(mdev) != size) {
615		int err;
616		err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
617		if (unlikely(err)) {
618			/* currently there is only one error: ENOMEM! */
619			size = drbd_bm_capacity(mdev)>>1;
620			if (size == 0) {
621				dev_err(DEV, "OUT OF MEMORY! "
622				    "Could not allocate bitmap!\n");
623			} else {
624				dev_err(DEV, "BM resizing failed. "
625				    "Leaving size unchanged at size = %lu KB\n",
626				    (unsigned long)size);
627			}
628			rv = dev_size_error;
629		}
630		/* racy, see comments above. */
631		drbd_set_my_capacity(mdev, size);
632		mdev->ldev->md.la_size_sect = size;
633		dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
634		     (unsigned long long)size>>1);
635	}
636	if (rv == dev_size_error)
637		goto out;
638
639	la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
640
641	md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
642		|| prev_size	   != mdev->ldev->md.md_size_sect;
643
644	if (la_size_changed || md_moved) {
645		drbd_al_shrink(mdev); /* All extents inactive. */
646		dev_info(DEV, "Writing the whole bitmap, %s\n",
647			 la_size_changed && md_moved ? "size changed and md moved" :
648			 la_size_changed ? "size changed" : "md moved");
649		rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
650		drbd_md_mark_dirty(mdev);
651	}
652
653	if (size > la_size)
654		rv = grew;
655	if (size < la_size)
656		rv = shrunk;
657out:
658	lc_unlock(mdev->act_log);
659	wake_up(&mdev->al_wait);
660	drbd_resume_io(mdev);
661
662	return rv;
663}
664
665sector_t
666drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
667{
668	sector_t p_size = mdev->p_size;   /* partner's disk size. */
669	sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
670	sector_t m_size; /* my size */
671	sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
672	sector_t size = 0;
673
674	m_size = drbd_get_max_capacity(bdev);
675
676	if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
677		dev_warn(DEV, "Resize while not connected was forced by the user!\n");
678		p_size = m_size;
679	}
680
681	if (p_size && m_size) {
682		size = min_t(sector_t, p_size, m_size);
683	} else {
684		if (la_size) {
685			size = la_size;
686			if (m_size && m_size < size)
687				size = m_size;
688			if (p_size && p_size < size)
689				size = p_size;
690		} else {
691			if (m_size)
692				size = m_size;
693			if (p_size)
694				size = p_size;
695		}
696	}
697
698	if (size == 0)
699		dev_err(DEV, "Both nodes diskless!\n");
700
701	if (u_size) {
702		if (u_size > size)
703			dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
704			    (unsigned long)u_size>>1, (unsigned long)size>>1);
705		else
706			size = u_size;
707	}
708
709	return size;
710}
711
712/**
713 * drbd_check_al_size() - Ensures that the AL is of the right size
714 * @mdev:	DRBD device.
715 *
716 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
717 * failed, and 0 on success. You should call drbd_md_sync() after you called
718 * this function.
719 */
720static int drbd_check_al_size(struct drbd_conf *mdev)
721{
722	struct lru_cache *n, *t;
723	struct lc_element *e;
724	unsigned int in_use;
725	int i;
726
727	ERR_IF(mdev->sync_conf.al_extents < 7)
728		mdev->sync_conf.al_extents = 127;
729
730	if (mdev->act_log &&
731	    mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
732		return 0;
733
734	in_use = 0;
735	t = mdev->act_log;
736	n = lc_create("act_log", drbd_al_ext_cache,
737		mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
738
739	if (n == NULL) {
740		dev_err(DEV, "Cannot allocate act_log lru!\n");
741		return -ENOMEM;
742	}
743	spin_lock_irq(&mdev->al_lock);
744	if (t) {
745		for (i = 0; i < t->nr_elements; i++) {
746			e = lc_element_by_index(t, i);
747			if (e->refcnt)
748				dev_err(DEV, "refcnt(%d)==%d\n",
749				    e->lc_number, e->refcnt);
750			in_use += e->refcnt;
751		}
752	}
753	if (!in_use)
754		mdev->act_log = n;
755	spin_unlock_irq(&mdev->al_lock);
756	if (in_use) {
757		dev_err(DEV, "Activity log still in use!\n");
758		lc_destroy(n);
759		return -EBUSY;
760	} else {
761		if (t)
762			lc_destroy(t);
763	}
764	drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
765	return 0;
766}
767
768void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local)
769{
770	struct request_queue * const q = mdev->rq_queue;
771	struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
772	int max_segments = mdev->ldev->dc.max_bio_bvecs;
773	int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
774
775	blk_queue_logical_block_size(q, 512);
776	blk_queue_max_hw_sectors(q, max_hw_sectors);
777	/* This is the workaround for "bio would need to, but cannot, be split" */
778	blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
779	blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
780	blk_queue_stack_limits(q, b);
781
782	dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9);
783
784	if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
785		dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
786		     q->backing_dev_info.ra_pages,
787		     b->backing_dev_info.ra_pages);
788		q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
789	}
790}
791
792/* serialize deconfig (worker exiting, doing cleanup)
793 * and reconfig (drbdsetup disk, drbdsetup net)
794 *
795 * Wait for a potentially exiting worker, then restart it,
796 * or start a new one.  Flush any pending work, there may still be an
797 * after_state_change queued.
798 */
799static void drbd_reconfig_start(struct drbd_conf *mdev)
800{
801	wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
802	wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
803	drbd_thread_start(&mdev->worker);
804	drbd_flush_workqueue(mdev);
805}
806
807/* if still unconfigured, stops worker again.
808 * if configured now, clears CONFIG_PENDING.
809 * wakes potential waiters */
810static void drbd_reconfig_done(struct drbd_conf *mdev)
811{
812	spin_lock_irq(&mdev->req_lock);
813	if (mdev->state.disk == D_DISKLESS &&
814	    mdev->state.conn == C_STANDALONE &&
815	    mdev->state.role == R_SECONDARY) {
816		set_bit(DEVICE_DYING, &mdev->flags);
817		drbd_thread_stop_nowait(&mdev->worker);
818	} else
819		clear_bit(CONFIG_PENDING, &mdev->flags);
820	spin_unlock_irq(&mdev->req_lock);
821	wake_up(&mdev->state_wait);
822}
823
824/* Make sure IO is suspended before calling this function(). */
825static void drbd_suspend_al(struct drbd_conf *mdev)
826{
827	int s = 0;
828
829	if (lc_try_lock(mdev->act_log)) {
830		drbd_al_shrink(mdev);
831		lc_unlock(mdev->act_log);
832	} else {
833		dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
834		return;
835	}
836
837	spin_lock_irq(&mdev->req_lock);
838	if (mdev->state.conn < C_CONNECTED)
839		s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
840
841	spin_unlock_irq(&mdev->req_lock);
842
843	if (s)
844		dev_info(DEV, "Suspended AL updates\n");
845}
846
847/* does always return 0;
848 * interesting return code is in reply->ret_code */
849static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
850			     struct drbd_nl_cfg_reply *reply)
851{
852	enum drbd_ret_codes retcode;
853	enum determine_dev_size dd;
854	sector_t max_possible_sectors;
855	sector_t min_md_device_sectors;
856	struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
857	struct block_device *bdev;
858	struct lru_cache *resync_lru = NULL;
859	union drbd_state ns, os;
860	unsigned int max_bio_size;
861	int rv;
862	int cp_discovered = 0;
863	int logical_block_size;
864
865	drbd_reconfig_start(mdev);
866
867	/* if you want to reconfigure, please tear down first */
868	if (mdev->state.disk > D_DISKLESS) {
869		retcode = ERR_DISK_CONFIGURED;
870		goto fail;
871	}
872	/* It may just now have detached because of IO error.  Make sure
873	 * drbd_ldev_destroy is done already, we may end up here very fast,
874	 * e.g. if someone calls attach from the on-io-error handler,
875	 * to realize a "hot spare" feature (not that I'd recommend that) */
876	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
877
878	/* allocation not in the IO path, cqueue thread context */
879	nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
880	if (!nbc) {
881		retcode = ERR_NOMEM;
882		goto fail;
883	}
884
885	nbc->dc.disk_size     = DRBD_DISK_SIZE_SECT_DEF;
886	nbc->dc.on_io_error   = DRBD_ON_IO_ERROR_DEF;
887	nbc->dc.fencing       = DRBD_FENCING_DEF;
888	nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
889
890	if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
891		retcode = ERR_MANDATORY_TAG;
892		goto fail;
893	}
894
895	if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
896		retcode = ERR_MD_IDX_INVALID;
897		goto fail;
898	}
899
900	if (get_net_conf(mdev)) {
901		int prot = mdev->net_conf->wire_protocol;
902		put_net_conf(mdev);
903		if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
904			retcode = ERR_STONITH_AND_PROT_A;
905			goto fail;
906		}
907	}
908
909	bdev = blkdev_get_by_path(nbc->dc.backing_dev,
910				  FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
911	if (IS_ERR(bdev)) {
912		dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
913			PTR_ERR(bdev));
914		retcode = ERR_OPEN_DISK;
915		goto fail;
916	}
917	nbc->backing_bdev = bdev;
918
919	/*
920	 * meta_dev_idx >= 0: external fixed size, possibly multiple
921	 * drbd sharing one meta device.  TODO in that case, paranoia
922	 * check that [md_bdev, meta_dev_idx] is not yet used by some
923	 * other drbd minor!  (if you use drbd.conf + drbdadm, that
924	 * should check it for you already; but if you don't, or
925	 * someone fooled it, we need to double check here)
926	 */
927	bdev = blkdev_get_by_path(nbc->dc.meta_dev,
928				  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
929				  (nbc->dc.meta_dev_idx < 0) ?
930				  (void *)mdev : (void *)drbd_m_holder);
931	if (IS_ERR(bdev)) {
932		dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
933			PTR_ERR(bdev));
934		retcode = ERR_OPEN_MD_DISK;
935		goto fail;
936	}
937	nbc->md_bdev = bdev;
938
939	if ((nbc->backing_bdev == nbc->md_bdev) !=
940	    (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
941	     nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
942		retcode = ERR_MD_IDX_INVALID;
943		goto fail;
944	}
945
946	resync_lru = lc_create("resync", drbd_bm_ext_cache,
947			61, sizeof(struct bm_extent),
948			offsetof(struct bm_extent, lce));
949	if (!resync_lru) {
950		retcode = ERR_NOMEM;
951		goto fail;
952	}
953
954	/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
955	drbd_md_set_sector_offsets(mdev, nbc);
956
957	if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
958		dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
959			(unsigned long long) drbd_get_max_capacity(nbc),
960			(unsigned long long) nbc->dc.disk_size);
961		retcode = ERR_DISK_TO_SMALL;
962		goto fail;
963	}
964
965	if (nbc->dc.meta_dev_idx < 0) {
966		max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
967		/* at least one MB, otherwise it does not make sense */
968		min_md_device_sectors = (2<<10);
969	} else {
970		max_possible_sectors = DRBD_MAX_SECTORS;
971		min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
972	}
973
974	if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
975		retcode = ERR_MD_DISK_TO_SMALL;
976		dev_warn(DEV, "refusing attach: md-device too small, "
977		     "at least %llu sectors needed for this meta-disk type\n",
978		     (unsigned long long) min_md_device_sectors);
979		goto fail;
980	}
981
982	/* Make sure the new disk is big enough
983	 * (we may currently be R_PRIMARY with no local disk...) */
984	if (drbd_get_max_capacity(nbc) <
985	    drbd_get_capacity(mdev->this_bdev)) {
986		retcode = ERR_DISK_TO_SMALL;
987		goto fail;
988	}
989
990	nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
991
992	if (nbc->known_size > max_possible_sectors) {
993		dev_warn(DEV, "==> truncating very big lower level device "
994			"to currently maximum possible %llu sectors <==\n",
995			(unsigned long long) max_possible_sectors);
996		if (nbc->dc.meta_dev_idx >= 0)
997			dev_warn(DEV, "==>> using internal or flexible "
998				      "meta data may help <<==\n");
999	}
1000
1001	drbd_suspend_io(mdev);
1002	/* also wait for the last barrier ack. */
1003	wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
1004	/* and for any other previously queued work */
1005	drbd_flush_workqueue(mdev);
1006
1007	retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1008	drbd_resume_io(mdev);
1009	if (retcode < SS_SUCCESS)
1010		goto fail;
1011
1012	if (!get_ldev_if_state(mdev, D_ATTACHING))
1013		goto force_diskless;
1014
1015	drbd_md_set_sector_offsets(mdev, nbc);
1016
1017	/* allocate a second IO page if logical_block_size != 512 */
1018	logical_block_size = bdev_logical_block_size(nbc->md_bdev);
1019	if (logical_block_size == 0)
1020		logical_block_size = MD_SECTOR_SIZE;
1021
1022	if (logical_block_size != MD_SECTOR_SIZE) {
1023		if (!mdev->md_io_tmpp) {
1024			struct page *page = alloc_page(GFP_NOIO);
1025			if (!page)
1026				goto force_diskless_dec;
1027
1028			dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
1029			     logical_block_size, MD_SECTOR_SIZE);
1030			dev_warn(DEV, "Workaround engaged (has performance impact).\n");
1031
1032			mdev->md_io_tmpp = page;
1033		}
1034	}
1035
1036	if (!mdev->bitmap) {
1037		if (drbd_bm_init(mdev)) {
1038			retcode = ERR_NOMEM;
1039			goto force_diskless_dec;
1040		}
1041	}
1042
1043	retcode = drbd_md_read(mdev, nbc);
1044	if (retcode != NO_ERROR)
1045		goto force_diskless_dec;
1046
1047	if (mdev->state.conn < C_CONNECTED &&
1048	    mdev->state.role == R_PRIMARY &&
1049	    (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1050		dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1051		    (unsigned long long)mdev->ed_uuid);
1052		retcode = ERR_DATA_NOT_CURRENT;
1053		goto force_diskless_dec;
1054	}
1055
1056	/* Since we are diskless, fix the activity log first... */
1057	if (drbd_check_al_size(mdev)) {
1058		retcode = ERR_NOMEM;
1059		goto force_diskless_dec;
1060	}
1061
1062	/* Prevent shrinking of consistent devices ! */
1063	if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1064	    drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1065		dev_warn(DEV, "refusing to truncate a consistent device\n");
1066		retcode = ERR_DISK_TO_SMALL;
1067		goto force_diskless_dec;
1068	}
1069
1070	if (!drbd_al_read_log(mdev, nbc)) {
1071		retcode = ERR_IO_MD_DISK;
1072		goto force_diskless_dec;
1073	}
1074
1075	/* Reset the "barriers don't work" bits here, then force meta data to
1076	 * be written, to ensure we determine if barriers are supported. */
1077	if (nbc->dc.no_md_flush)
1078		set_bit(MD_NO_FUA, &mdev->flags);
1079	else
1080		clear_bit(MD_NO_FUA, &mdev->flags);
1081
1082	/* Point of no return reached.
1083	 * Devices and memory are no longer released by error cleanup below.
1084	 * now mdev takes over responsibility, and the state engine should
1085	 * clean it up somewhere.  */
1086	D_ASSERT(mdev->ldev == NULL);
1087	mdev->ldev = nbc;
1088	mdev->resync = resync_lru;
1089	nbc = NULL;
1090	resync_lru = NULL;
1091
1092	mdev->write_ordering = WO_bdev_flush;
1093	drbd_bump_write_ordering(mdev, WO_bdev_flush);
1094
1095	if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1096		set_bit(CRASHED_PRIMARY, &mdev->flags);
1097	else
1098		clear_bit(CRASHED_PRIMARY, &mdev->flags);
1099
1100	if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1101	    !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1102		set_bit(CRASHED_PRIMARY, &mdev->flags);
1103		cp_discovered = 1;
1104	}
1105
1106	mdev->send_cnt = 0;
1107	mdev->recv_cnt = 0;
1108	mdev->read_cnt = 0;
1109	mdev->writ_cnt = 0;
1110
1111	max_bio_size = DRBD_MAX_BIO_SIZE;
1112	if (mdev->state.conn == C_CONNECTED) {
1113		/* We are Primary, Connected, and now attach a new local
1114		 * backing store. We must not increase the user visible maximum
1115		 * bio size on this device to something the peer may not be
1116		 * able to handle. */
1117		if (mdev->agreed_pro_version < 94)
1118			max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
1119		else if (mdev->agreed_pro_version == 94)
1120			max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
1121		/* else: drbd 8.3.9 and later, stay with default */
1122	}
1123
1124	drbd_setup_queue_param(mdev, max_bio_size);
1125
1126	/* If I am currently not R_PRIMARY,
1127	 * but meta data primary indicator is set,
1128	 * I just now recover from a hard crash,
1129	 * and have been R_PRIMARY before that crash.
1130	 *
1131	 * Now, if I had no connection before that crash
1132	 * (have been degraded R_PRIMARY), chances are that
1133	 * I won't find my peer now either.
1134	 *
1135	 * In that case, and _only_ in that case,
1136	 * we use the degr-wfc-timeout instead of the default,
1137	 * so we can automatically recover from a crash of a
1138	 * degraded but active "cluster" after a certain timeout.
1139	 */
1140	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1141	if (mdev->state.role != R_PRIMARY &&
1142	     drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1143	    !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1144		set_bit(USE_DEGR_WFC_T, &mdev->flags);
1145
1146	dd = drbd_determin_dev_size(mdev, 0);
1147	if (dd == dev_size_error) {
1148		retcode = ERR_NOMEM_BITMAP;
1149		goto force_diskless_dec;
1150	} else if (dd == grew)
1151		set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1152
1153	if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1154		dev_info(DEV, "Assuming that all blocks are out of sync "
1155		     "(aka FullSync)\n");
1156		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
1157			retcode = ERR_IO_MD_DISK;
1158			goto force_diskless_dec;
1159		}
1160	} else {
1161		if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
1162			retcode = ERR_IO_MD_DISK;
1163			goto force_diskless_dec;
1164		}
1165	}
1166
1167	if (cp_discovered) {
1168		drbd_al_apply_to_bm(mdev);
1169		drbd_al_to_on_disk_bm(mdev);
1170	}
1171
1172	if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1173		drbd_suspend_al(mdev); /* IO is still suspended here... */
1174
1175	spin_lock_irq(&mdev->req_lock);
1176	os = mdev->state;
1177	ns.i = os.i;
1178	/* If MDF_CONSISTENT is not set go into inconsistent state,
1179	   otherwise investigate MDF_WasUpToDate...
1180	   If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1181	   otherwise into D_CONSISTENT state.
1182	*/
1183	if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1184		if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1185			ns.disk = D_CONSISTENT;
1186		else
1187			ns.disk = D_OUTDATED;
1188	} else {
1189		ns.disk = D_INCONSISTENT;
1190	}
1191
1192	if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1193		ns.pdsk = D_OUTDATED;
1194
1195	if ( ns.disk == D_CONSISTENT &&
1196	    (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1197		ns.disk = D_UP_TO_DATE;
1198
1199	/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1200	   MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1201	   this point, because drbd_request_state() modifies these
1202	   flags. */
1203
1204	/* In case we are C_CONNECTED postpone any decision on the new disk
1205	   state after the negotiation phase. */
1206	if (mdev->state.conn == C_CONNECTED) {
1207		mdev->new_state_tmp.i = ns.i;
1208		ns.i = os.i;
1209		ns.disk = D_NEGOTIATING;
1210
1211		/* We expect to receive up-to-date UUIDs soon.
1212		   To avoid a race in receive_state, free p_uuid while
1213		   holding req_lock. I.e. atomic with the state change */
1214		kfree(mdev->p_uuid);
1215		mdev->p_uuid = NULL;
1216	}
1217
1218	rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1219	ns = mdev->state;
1220	spin_unlock_irq(&mdev->req_lock);
1221
1222	if (rv < SS_SUCCESS)
1223		goto force_diskless_dec;
1224
1225	if (mdev->state.role == R_PRIMARY)
1226		mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1227	else
1228		mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1229
1230	drbd_md_mark_dirty(mdev);
1231	drbd_md_sync(mdev);
1232
1233	kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1234	put_ldev(mdev);
1235	reply->ret_code = retcode;
1236	drbd_reconfig_done(mdev);
1237	return 0;
1238
1239 force_diskless_dec:
1240	put_ldev(mdev);
1241 force_diskless:
1242	drbd_force_state(mdev, NS(disk, D_FAILED));
1243	drbd_md_sync(mdev);
1244 fail:
1245	if (nbc) {
1246		if (nbc->backing_bdev)
1247			blkdev_put(nbc->backing_bdev,
1248				   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1249		if (nbc->md_bdev)
1250			blkdev_put(nbc->md_bdev,
1251				   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1252		kfree(nbc);
1253	}
1254	lc_destroy(resync_lru);
1255
1256	reply->ret_code = retcode;
1257	drbd_reconfig_done(mdev);
1258	return 0;
1259}
1260
1261/* Detaching the disk is a process in multiple stages.  First we need to lock
1262 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1263 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1264 * internal references as well.
1265 * Only then we have finally detached. */
1266static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1267			  struct drbd_nl_cfg_reply *reply)
1268{
1269	drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1270	reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1271	if (mdev->state.disk == D_DISKLESS)
1272		wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1273	drbd_resume_io(mdev);
1274	return 0;
1275}
1276
1277static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1278			    struct drbd_nl_cfg_reply *reply)
1279{
1280	int i, ns;
1281	enum drbd_ret_codes retcode;
1282	struct net_conf *new_conf = NULL;
1283	struct crypto_hash *tfm = NULL;
1284	struct crypto_hash *integrity_w_tfm = NULL;
1285	struct crypto_hash *integrity_r_tfm = NULL;
1286	struct hlist_head *new_tl_hash = NULL;
1287	struct hlist_head *new_ee_hash = NULL;
1288	struct drbd_conf *odev;
1289	char hmac_name[CRYPTO_MAX_ALG_NAME];
1290	void *int_dig_out = NULL;
1291	void *int_dig_in = NULL;
1292	void *int_dig_vv = NULL;
1293	struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1294
1295	drbd_reconfig_start(mdev);
1296
1297	if (mdev->state.conn > C_STANDALONE) {
1298		retcode = ERR_NET_CONFIGURED;
1299		goto fail;
1300	}
1301
1302	/* allocation not in the IO path, cqueue thread context */
1303	new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1304	if (!new_conf) {
1305		retcode = ERR_NOMEM;
1306		goto fail;
1307	}
1308
1309	new_conf->timeout	   = DRBD_TIMEOUT_DEF;
1310	new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
1311	new_conf->ping_int	   = DRBD_PING_INT_DEF;
1312	new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
1313	new_conf->max_buffers	   = DRBD_MAX_BUFFERS_DEF;
1314	new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1315	new_conf->sndbuf_size	   = DRBD_SNDBUF_SIZE_DEF;
1316	new_conf->rcvbuf_size	   = DRBD_RCVBUF_SIZE_DEF;
1317	new_conf->ko_count	   = DRBD_KO_COUNT_DEF;
1318	new_conf->after_sb_0p	   = DRBD_AFTER_SB_0P_DEF;
1319	new_conf->after_sb_1p	   = DRBD_AFTER_SB_1P_DEF;
1320	new_conf->after_sb_2p	   = DRBD_AFTER_SB_2P_DEF;
1321	new_conf->want_lose	   = 0;
1322	new_conf->two_primaries    = 0;
1323	new_conf->wire_protocol    = DRBD_PROT_C;
1324	new_conf->ping_timeo	   = DRBD_PING_TIMEO_DEF;
1325	new_conf->rr_conflict	   = DRBD_RR_CONFLICT_DEF;
1326	new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
1327	new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
1328
1329	if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
1330		retcode = ERR_MANDATORY_TAG;
1331		goto fail;
1332	}
1333
1334	if (new_conf->two_primaries
1335	    && (new_conf->wire_protocol != DRBD_PROT_C)) {
1336		retcode = ERR_NOT_PROTO_C;
1337		goto fail;
1338	}
1339
1340	if (get_ldev(mdev)) {
1341		enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1342		put_ldev(mdev);
1343		if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1344			retcode = ERR_STONITH_AND_PROT_A;
1345			goto fail;
1346		}
1347	}
1348
1349	if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1350		retcode = ERR_CONG_NOT_PROTO_A;
1351		goto fail;
1352	}
1353
1354	if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1355		retcode = ERR_DISCARD;
1356		goto fail;
1357	}
1358
1359	retcode = NO_ERROR;
1360
1361	new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1362	new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1363	for (i = 0; i < minor_count; i++) {
1364		odev = minor_to_mdev(i);
1365		if (!odev || odev == mdev)
1366			continue;
1367		if (get_net_conf(odev)) {
1368			taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
1369			if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
1370			    !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1371				retcode = ERR_LOCAL_ADDR;
1372
1373			taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
1374			if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
1375			    !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1376				retcode = ERR_PEER_ADDR;
1377
1378			put_net_conf(odev);
1379			if (retcode != NO_ERROR)
1380				goto fail;
1381		}
1382	}
1383
1384	if (new_conf->cram_hmac_alg[0] != 0) {
1385		snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1386			new_conf->cram_hmac_alg);
1387		tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1388		if (IS_ERR(tfm)) {
1389			tfm = NULL;
1390			retcode = ERR_AUTH_ALG;
1391			goto fail;
1392		}
1393
1394		if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1395			retcode = ERR_AUTH_ALG_ND;
1396			goto fail;
1397		}
1398	}
1399
1400	if (new_conf->integrity_alg[0]) {
1401		integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1402		if (IS_ERR(integrity_w_tfm)) {
1403			integrity_w_tfm = NULL;
1404			retcode=ERR_INTEGRITY_ALG;
1405			goto fail;
1406		}
1407
1408		if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1409			retcode=ERR_INTEGRITY_ALG_ND;
1410			goto fail;
1411		}
1412
1413		integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1414		if (IS_ERR(integrity_r_tfm)) {
1415			integrity_r_tfm = NULL;
1416			retcode=ERR_INTEGRITY_ALG;
1417			goto fail;
1418		}
1419	}
1420
1421	ns = new_conf->max_epoch_size/8;
1422	if (mdev->tl_hash_s != ns) {
1423		new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1424		if (!new_tl_hash) {
1425			retcode = ERR_NOMEM;
1426			goto fail;
1427		}
1428	}
1429
1430	ns = new_conf->max_buffers/8;
1431	if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
1432		new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
1433		if (!new_ee_hash) {
1434			retcode = ERR_NOMEM;
1435			goto fail;
1436		}
1437	}
1438
1439	((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1440
1441	if (integrity_w_tfm) {
1442		i = crypto_hash_digestsize(integrity_w_tfm);
1443		int_dig_out = kmalloc(i, GFP_KERNEL);
1444		if (!int_dig_out) {
1445			retcode = ERR_NOMEM;
1446			goto fail;
1447		}
1448		int_dig_in = kmalloc(i, GFP_KERNEL);
1449		if (!int_dig_in) {
1450			retcode = ERR_NOMEM;
1451			goto fail;
1452		}
1453		int_dig_vv = kmalloc(i, GFP_KERNEL);
1454		if (!int_dig_vv) {
1455			retcode = ERR_NOMEM;
1456			goto fail;
1457		}
1458	}
1459
1460	if (!mdev->bitmap) {
1461		if(drbd_bm_init(mdev)) {
1462			retcode = ERR_NOMEM;
1463			goto fail;
1464		}
1465	}
1466
1467	drbd_flush_workqueue(mdev);
1468	spin_lock_irq(&mdev->req_lock);
1469	if (mdev->net_conf != NULL) {
1470		retcode = ERR_NET_CONFIGURED;
1471		spin_unlock_irq(&mdev->req_lock);
1472		goto fail;
1473	}
1474	mdev->net_conf = new_conf;
1475
1476	mdev->send_cnt = 0;
1477	mdev->recv_cnt = 0;
1478
1479	if (new_tl_hash) {
1480		kfree(mdev->tl_hash);
1481		mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
1482		mdev->tl_hash = new_tl_hash;
1483	}
1484
1485	if (new_ee_hash) {
1486		kfree(mdev->ee_hash);
1487		mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
1488		mdev->ee_hash = new_ee_hash;
1489	}
1490
1491	crypto_free_hash(mdev->cram_hmac_tfm);
1492	mdev->cram_hmac_tfm = tfm;
1493
1494	crypto_free_hash(mdev->integrity_w_tfm);
1495	mdev->integrity_w_tfm = integrity_w_tfm;
1496
1497	crypto_free_hash(mdev->integrity_r_tfm);
1498	mdev->integrity_r_tfm = integrity_r_tfm;
1499
1500	kfree(mdev->int_dig_out);
1501	kfree(mdev->int_dig_in);
1502	kfree(mdev->int_dig_vv);
1503	mdev->int_dig_out=int_dig_out;
1504	mdev->int_dig_in=int_dig_in;
1505	mdev->int_dig_vv=int_dig_vv;
1506	retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
1507	spin_unlock_irq(&mdev->req_lock);
1508
1509	kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1510	reply->ret_code = retcode;
1511	drbd_reconfig_done(mdev);
1512	return 0;
1513
1514fail:
1515	kfree(int_dig_out);
1516	kfree(int_dig_in);
1517	kfree(int_dig_vv);
1518	crypto_free_hash(tfm);
1519	crypto_free_hash(integrity_w_tfm);
1520	crypto_free_hash(integrity_r_tfm);
1521	kfree(new_tl_hash);
1522	kfree(new_ee_hash);
1523	kfree(new_conf);
1524
1525	reply->ret_code = retcode;
1526	drbd_reconfig_done(mdev);
1527	return 0;
1528}
1529
1530static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1531			      struct drbd_nl_cfg_reply *reply)
1532{
1533	int retcode;
1534
1535	retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
1536
1537	if (retcode == SS_NOTHING_TO_DO)
1538		goto done;
1539	else if (retcode == SS_ALREADY_STANDALONE)
1540		goto done;
1541	else if (retcode == SS_PRIMARY_NOP) {
1542		/* Our statche checking code wants to see the peer outdated. */
1543		retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1544						      pdsk, D_OUTDATED));
1545	} else if (retcode == SS_CW_FAILED_BY_PEER) {
1546		/* The peer probably wants to see us outdated. */
1547		retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
1548							disk, D_OUTDATED),
1549					      CS_ORDERED);
1550		if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1551			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1552			retcode = SS_SUCCESS;
1553		}
1554	}
1555
1556	if (retcode < SS_SUCCESS)
1557		goto fail;
1558
1559	if (wait_event_interruptible(mdev->state_wait,
1560				     mdev->state.conn != C_DISCONNECTING)) {
1561		/* Do not test for mdev->state.conn == C_STANDALONE, since
1562		   someone else might connect us in the mean time! */
1563		retcode = ERR_INTR;
1564		goto fail;
1565	}
1566
1567 done:
1568	retcode = NO_ERROR;
1569 fail:
1570	drbd_md_sync(mdev);
1571	reply->ret_code = retcode;
1572	return 0;
1573}
1574
1575void resync_after_online_grow(struct drbd_conf *mdev)
1576{
1577	int iass; /* I am sync source */
1578
1579	dev_info(DEV, "Resync of new storage after online grow\n");
1580	if (mdev->state.role != mdev->state.peer)
1581		iass = (mdev->state.role == R_PRIMARY);
1582	else
1583		iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1584
1585	if (iass)
1586		drbd_start_resync(mdev, C_SYNC_SOURCE);
1587	else
1588		_drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1589}
1590
1591static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1592			  struct drbd_nl_cfg_reply *reply)
1593{
1594	struct resize rs;
1595	int retcode = NO_ERROR;
1596	enum determine_dev_size dd;
1597	enum dds_flags ddsf;
1598
1599	memset(&rs, 0, sizeof(struct resize));
1600	if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
1601		retcode = ERR_MANDATORY_TAG;
1602		goto fail;
1603	}
1604
1605	if (mdev->state.conn > C_CONNECTED) {
1606		retcode = ERR_RESIZE_RESYNC;
1607		goto fail;
1608	}
1609
1610	if (mdev->state.role == R_SECONDARY &&
1611	    mdev->state.peer == R_SECONDARY) {
1612		retcode = ERR_NO_PRIMARY;
1613		goto fail;
1614	}
1615
1616	if (!get_ldev(mdev)) {
1617		retcode = ERR_NO_DISK;
1618		goto fail;
1619	}
1620
1621	if (rs.no_resync && mdev->agreed_pro_version < 93) {
1622		retcode = ERR_NEED_APV_93;
1623		goto fail;
1624	}
1625
1626	if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1627		mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1628
1629	mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1630	ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1631	dd = drbd_determin_dev_size(mdev, ddsf);
1632	drbd_md_sync(mdev);
1633	put_ldev(mdev);
1634	if (dd == dev_size_error) {
1635		retcode = ERR_NOMEM_BITMAP;
1636		goto fail;
1637	}
1638
1639	if (mdev->state.conn == C_CONNECTED) {
1640		if (dd == grew)
1641			set_bit(RESIZE_PENDING, &mdev->flags);
1642
1643		drbd_send_uuids(mdev);
1644		drbd_send_sizes(mdev, 1, ddsf);
1645	}
1646
1647 fail:
1648	reply->ret_code = retcode;
1649	return 0;
1650}
1651
1652static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1653			       struct drbd_nl_cfg_reply *reply)
1654{
1655	int retcode = NO_ERROR;
1656	int err;
1657	int ovr; /* online verify running */
1658	int rsr; /* re-sync running */
1659	struct crypto_hash *verify_tfm = NULL;
1660	struct crypto_hash *csums_tfm = NULL;
1661	struct syncer_conf sc;
1662	cpumask_var_t new_cpu_mask;
1663	int *rs_plan_s = NULL;
1664	int fifo_size;
1665
1666	if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1667		retcode = ERR_NOMEM;
1668		goto fail;
1669	}
1670
1671	if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
1672		memset(&sc, 0, sizeof(struct syncer_conf));
1673		sc.rate       = DRBD_RATE_DEF;
1674		sc.after      = DRBD_AFTER_DEF;
1675		sc.al_extents = DRBD_AL_EXTENTS_DEF;
1676		sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
1677		sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1678		sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1679		sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1680		sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1681		sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1682	} else
1683		memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1684
1685	if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
1686		retcode = ERR_MANDATORY_TAG;
1687		goto fail;
1688	}
1689
1690	/* re-sync running */
1691	rsr = (	mdev->state.conn == C_SYNC_SOURCE ||
1692		mdev->state.conn == C_SYNC_TARGET ||
1693		mdev->state.conn == C_PAUSED_SYNC_S ||
1694		mdev->state.conn == C_PAUSED_SYNC_T );
1695
1696	if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1697		retcode = ERR_CSUMS_RESYNC_RUNNING;
1698		goto fail;
1699	}
1700
1701	if (!rsr && sc.csums_alg[0]) {
1702		csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1703		if (IS_ERR(csums_tfm)) {
1704			csums_tfm = NULL;
1705			retcode = ERR_CSUMS_ALG;
1706			goto fail;
1707		}
1708
1709		if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1710			retcode = ERR_CSUMS_ALG_ND;
1711			goto fail;
1712		}
1713	}
1714
1715	/* online verify running */
1716	ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1717
1718	if (ovr) {
1719		if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1720			retcode = ERR_VERIFY_RUNNING;
1721			goto fail;
1722		}
1723	}
1724
1725	if (!ovr && sc.verify_alg[0]) {
1726		verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1727		if (IS_ERR(verify_tfm)) {
1728			verify_tfm = NULL;
1729			retcode = ERR_VERIFY_ALG;
1730			goto fail;
1731		}
1732
1733		if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1734			retcode = ERR_VERIFY_ALG_ND;
1735			goto fail;
1736		}
1737	}
1738
1739	/* silently ignore cpu mask on UP kernel */
1740	if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1741		err = __bitmap_parse(sc.cpu_mask, 32, 0,
1742				cpumask_bits(new_cpu_mask), nr_cpu_ids);
1743		if (err) {
1744			dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1745			retcode = ERR_CPU_MASK_PARSE;
1746			goto fail;
1747		}
1748	}
1749
1750	ERR_IF (sc.rate < 1) sc.rate = 1;
1751	ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
1752#define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1753	if (sc.al_extents > AL_MAX) {
1754		dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
1755		sc.al_extents = AL_MAX;
1756	}
1757#undef AL_MAX
1758
1759	/* to avoid spurious errors when configuring minors before configuring
1760	 * the minors they depend on: if necessary, first create the minor we
1761	 * depend on */
1762	if (sc.after >= 0)
1763		ensure_mdev(sc.after, 1);
1764
1765	/* most sanity checks done, try to assign the new sync-after
1766	 * dependency.  need to hold the global lock in there,
1767	 * to avoid a race in the dependency loop check. */
1768	retcode = drbd_alter_sa(mdev, sc.after);
1769	if (retcode != NO_ERROR)
1770		goto fail;
1771
1772	fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1773	if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1774		rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1775		if (!rs_plan_s) {
1776			dev_err(DEV, "kmalloc of fifo_buffer failed");
1777			retcode = ERR_NOMEM;
1778			goto fail;
1779		}
1780	}
1781
1782	/* ok, assign the rest of it as well.
1783	 * lock against receive_SyncParam() */
1784	spin_lock(&mdev->peer_seq_lock);
1785	mdev->sync_conf = sc;
1786
1787	if (!rsr) {
1788		crypto_free_hash(mdev->csums_tfm);
1789		mdev->csums_tfm = csums_tfm;
1790		csums_tfm = NULL;
1791	}
1792
1793	if (!ovr) {
1794		crypto_free_hash(mdev->verify_tfm);
1795		mdev->verify_tfm = verify_tfm;
1796		verify_tfm = NULL;
1797	}
1798
1799	if (fifo_size != mdev->rs_plan_s.size) {
1800		kfree(mdev->rs_plan_s.values);
1801		mdev->rs_plan_s.values = rs_plan_s;
1802		mdev->rs_plan_s.size   = fifo_size;
1803		mdev->rs_planed = 0;
1804		rs_plan_s = NULL;
1805	}
1806
1807	spin_unlock(&mdev->peer_seq_lock);
1808
1809	if (get_ldev(mdev)) {
1810		wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1811		drbd_al_shrink(mdev);
1812		err = drbd_check_al_size(mdev);
1813		lc_unlock(mdev->act_log);
1814		wake_up(&mdev->al_wait);
1815
1816		put_ldev(mdev);
1817		drbd_md_sync(mdev);
1818
1819		if (err) {
1820			retcode = ERR_NOMEM;
1821			goto fail;
1822		}
1823	}
1824
1825	if (mdev->state.conn >= C_CONNECTED)
1826		drbd_send_sync_param(mdev, &sc);
1827
1828	if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
1829		cpumask_copy(mdev->cpu_mask, new_cpu_mask);
1830		drbd_calc_cpu_mask(mdev);
1831		mdev->receiver.reset_cpu_mask = 1;
1832		mdev->asender.reset_cpu_mask = 1;
1833		mdev->worker.reset_cpu_mask = 1;
1834	}
1835
1836	kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1837fail:
1838	kfree(rs_plan_s);
1839	free_cpumask_var(new_cpu_mask);
1840	crypto_free_hash(csums_tfm);
1841	crypto_free_hash(verify_tfm);
1842	reply->ret_code = retcode;
1843	return 0;
1844}
1845
1846static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1847			      struct drbd_nl_cfg_reply *reply)
1848{
1849	int retcode;
1850
1851	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
1852
1853	if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
1854		retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1855
1856	while (retcode == SS_NEED_CONNECTION) {
1857		spin_lock_irq(&mdev->req_lock);
1858		if (mdev->state.conn < C_CONNECTED)
1859			retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
1860		spin_unlock_irq(&mdev->req_lock);
1861
1862		if (retcode != SS_NEED_CONNECTION)
1863			break;
1864
1865		retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
1866	}
1867
1868	reply->ret_code = retcode;
1869	return 0;
1870}
1871
1872static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
1873{
1874	int rv;
1875
1876	rv = drbd_bmio_set_n_write(mdev);
1877	drbd_suspend_al(mdev);
1878	return rv;
1879}
1880
1881static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1882				   struct drbd_nl_cfg_reply *reply)
1883{
1884	int retcode;
1885
1886	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
1887
1888	if (retcode < SS_SUCCESS) {
1889		if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
1890			/* The peer will get a resync upon connect anyways. Just make that
1891			   into a full resync. */
1892			retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
1893			if (retcode >= SS_SUCCESS) {
1894				/* open coded drbd_bitmap_io() */
1895				if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
1896						   "set_n_write from invalidate_peer"))
1897					retcode = ERR_IO_MD_DISK;
1898			}
1899		} else
1900			retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
1901	}
1902
1903	reply->ret_code = retcode;
1904	return 0;
1905}
1906
1907static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1908			      struct drbd_nl_cfg_reply *reply)
1909{
1910	int retcode = NO_ERROR;
1911
1912	if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
1913		retcode = ERR_PAUSE_IS_SET;
1914
1915	reply->ret_code = retcode;
1916	return 0;
1917}
1918
1919static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1920			       struct drbd_nl_cfg_reply *reply)
1921{
1922	int retcode = NO_ERROR;
1923
1924	if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
1925		retcode = ERR_PAUSE_IS_CLEAR;
1926
1927	reply->ret_code = retcode;
1928	return 0;
1929}
1930
1931static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1932			      struct drbd_nl_cfg_reply *reply)
1933{
1934	reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
1935
1936	return 0;
1937}
1938
1939static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1940			     struct drbd_nl_cfg_reply *reply)
1941{
1942	if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1943		drbd_uuid_new_current(mdev);
1944		clear_bit(NEW_CUR_UUID, &mdev->flags);
1945	}
1946	drbd_suspend_io(mdev);
1947	reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
1948	if (reply->ret_code == SS_SUCCESS) {
1949		if (mdev->state.conn < C_CONNECTED)
1950			tl_clear(mdev);
1951		if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
1952			tl_restart(mdev, fail_frozen_disk_io);
1953	}
1954	drbd_resume_io(mdev);
1955
1956	return 0;
1957}
1958
1959static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1960			   struct drbd_nl_cfg_reply *reply)
1961{
1962	reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
1963	return 0;
1964}
1965
1966static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1967			   struct drbd_nl_cfg_reply *reply)
1968{
1969	unsigned short *tl;
1970
1971	tl = reply->tag_list;
1972
1973	if (get_ldev(mdev)) {
1974		tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
1975		put_ldev(mdev);
1976	}
1977
1978	if (get_net_conf(mdev)) {
1979		tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
1980		put_net_conf(mdev);
1981	}
1982	tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
1983
1984	put_unaligned(TT_END, tl++); /* Close the tag list */
1985
1986	return (int)((char *)tl - (char *)reply->tag_list);
1987}
1988
1989static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1990			     struct drbd_nl_cfg_reply *reply)
1991{
1992	unsigned short *tl = reply->tag_list;
1993	union drbd_state s = mdev->state;
1994	unsigned long rs_left;
1995	unsigned int res;
1996
1997	tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
1998
1999	/* no local ref, no bitmap, no syncer progress. */
2000	if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
2001		if (get_ldev(mdev)) {
2002			drbd_get_syncer_progress(mdev, &rs_left, &res);
2003			tl = tl_add_int(tl, T_sync_progress, &res);
2004			put_ldev(mdev);
2005		}
2006	}
2007	put_unaligned(TT_END, tl++); /* Close the tag list */
2008
2009	return (int)((char *)tl - (char *)reply->tag_list);
2010}
2011
2012static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2013			     struct drbd_nl_cfg_reply *reply)
2014{
2015	unsigned short *tl;
2016
2017	tl = reply->tag_list;
2018
2019	if (get_ldev(mdev)) {
2020		tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
2021		tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
2022		put_ldev(mdev);
2023	}
2024	put_unaligned(TT_END, tl++); /* Close the tag list */
2025
2026	return (int)((char *)tl - (char *)reply->tag_list);
2027}
2028
2029/**
2030 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
2031 * @mdev:	DRBD device.
2032 * @nlp:	Netlink/connector packet from drbdsetup
2033 * @reply:	Reply packet for drbdsetup
2034 */
2035static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2036				    struct drbd_nl_cfg_reply *reply)
2037{
2038	unsigned short *tl;
2039	char rv;
2040
2041	tl = reply->tag_list;
2042
2043	rv = mdev->state.pdsk == D_OUTDATED        ? UT_PEER_OUTDATED :
2044	  test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
2045
2046	tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
2047	put_unaligned(TT_END, tl++); /* Close the tag list */
2048
2049	return (int)((char *)tl - (char *)reply->tag_list);
2050}
2051
2052static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2053				    struct drbd_nl_cfg_reply *reply)
2054{
2055	/* default to resume from last known position, if possible */
2056	struct start_ov args =
2057		{ .start_sector = mdev->ov_start_sector };
2058
2059	if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
2060		reply->ret_code = ERR_MANDATORY_TAG;
2061		return 0;
2062	}
2063	/* w_make_ov_request expects position to be aligned */
2064	mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
2065	reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2066	return 0;
2067}
2068
2069
2070static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2071			      struct drbd_nl_cfg_reply *reply)
2072{
2073	int retcode = NO_ERROR;
2074	int skip_initial_sync = 0;
2075	int err;
2076
2077	struct new_c_uuid args;
2078
2079	memset(&args, 0, sizeof(struct new_c_uuid));
2080	if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
2081		reply->ret_code = ERR_MANDATORY_TAG;
2082		return 0;
2083	}
2084
2085	mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
2086
2087	if (!get_ldev(mdev)) {
2088		retcode = ERR_NO_DISK;
2089		goto out;
2090	}
2091
2092	/* this is "skip initial sync", assume to be clean */
2093	if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
2094	    mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2095		dev_info(DEV, "Preparing to skip initial sync\n");
2096		skip_initial_sync = 1;
2097	} else if (mdev->state.conn != C_STANDALONE) {
2098		retcode = ERR_CONNECTED;
2099		goto out_dec;
2100	}
2101
2102	drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2103	drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2104
2105	if (args.clear_bm) {
2106		err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
2107		if (err) {
2108			dev_err(DEV, "Writing bitmap failed with %d\n",err);
2109			retcode = ERR_IO_MD_DISK;
2110		}
2111		if (skip_initial_sync) {
2112			drbd_send_uuids_skip_initial_sync(mdev);
2113			_drbd_uuid_set(mdev, UI_BITMAP, 0);
2114			spin_lock_irq(&mdev->req_lock);
2115			_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2116					CS_VERBOSE, NULL);
2117			spin_unlock_irq(&mdev->req_lock);
2118		}
2119	}
2120
2121	drbd_md_sync(mdev);
2122out_dec:
2123	put_ldev(mdev);
2124out:
2125	mutex_unlock(&mdev->state_mutex);
2126
2127	reply->ret_code = retcode;
2128	return 0;
2129}
2130
2131struct cn_handler_struct {
2132	int (*function)(struct drbd_conf *,
2133			 struct drbd_nl_cfg_req *,
2134			 struct drbd_nl_cfg_reply *);
2135	int reply_body_size;
2136};
2137
2138static struct cn_handler_struct cnd_table[] = {
2139	[ P_primary ]		= { &drbd_nl_primary,		0 },
2140	[ P_secondary ]		= { &drbd_nl_secondary,		0 },
2141	[ P_disk_conf ]		= { &drbd_nl_disk_conf,		0 },
2142	[ P_detach ]		= { &drbd_nl_detach,		0 },
2143	[ P_net_conf ]		= { &drbd_nl_net_conf,		0 },
2144	[ P_disconnect ]	= { &drbd_nl_disconnect,	0 },
2145	[ P_resize ]		= { &drbd_nl_resize,		0 },
2146	[ P_syncer_conf ]	= { &drbd_nl_syncer_conf,	0 },
2147	[ P_invalidate ]	= { &drbd_nl_invalidate,	0 },
2148	[ P_invalidate_peer ]	= { &drbd_nl_invalidate_peer,	0 },
2149	[ P_pause_sync ]	= { &drbd_nl_pause_sync,	0 },
2150	[ P_resume_sync ]	= { &drbd_nl_resume_sync,	0 },
2151	[ P_suspend_io ]	= { &drbd_nl_suspend_io,	0 },
2152	[ P_resume_io ]		= { &drbd_nl_resume_io,		0 },
2153	[ P_outdate ]		= { &drbd_nl_outdate,		0 },
2154	[ P_get_config ]	= { &drbd_nl_get_config,
2155				    sizeof(struct syncer_conf_tag_len_struct) +
2156				    sizeof(struct disk_conf_tag_len_struct) +
2157				    sizeof(struct net_conf_tag_len_struct) },
2158	[ P_get_state ]		= { &drbd_nl_get_state,
2159				    sizeof(struct get_state_tag_len_struct) +
2160				    sizeof(struct sync_progress_tag_len_struct)	},
2161	[ P_get_uuids ]		= { &drbd_nl_get_uuids,
2162				    sizeof(struct get_uuids_tag_len_struct) },
2163	[ P_get_timeout_flag ]	= { &drbd_nl_get_timeout_flag,
2164				    sizeof(struct get_timeout_flag_tag_len_struct)},
2165	[ P_start_ov ]		= { &drbd_nl_start_ov,		0 },
2166	[ P_new_c_uuid ]	= { &drbd_nl_new_c_uuid,	0 },
2167};
2168
2169static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
2170{
2171	struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
2172	struct cn_handler_struct *cm;
2173	struct cn_msg *cn_reply;
2174	struct drbd_nl_cfg_reply *reply;
2175	struct drbd_conf *mdev;
2176	int retcode, rr;
2177	int reply_size = sizeof(struct cn_msg)
2178		+ sizeof(struct drbd_nl_cfg_reply)
2179		+ sizeof(short int);
2180
2181	if (!try_module_get(THIS_MODULE)) {
2182		printk(KERN_ERR "drbd: try_module_get() failed!\n");
2183		return;
2184	}
2185
2186	if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
2187		retcode = ERR_PERM;
2188		goto fail;
2189	}
2190
2191	mdev = ensure_mdev(nlp->drbd_minor,
2192			(nlp->flags & DRBD_NL_CREATE_DEVICE));
2193	if (!mdev) {
2194		retcode = ERR_MINOR_INVALID;
2195		goto fail;
2196	}
2197
2198	if (nlp->packet_type >= P_nl_after_last_packet) {
2199		retcode = ERR_PACKET_NR;
2200		goto fail;
2201	}
2202
2203	cm = cnd_table + nlp->packet_type;
2204
2205	/* This may happen if packet number is 0: */
2206	if (cm->function == NULL) {
2207		retcode = ERR_PACKET_NR;
2208		goto fail;
2209	}
2210
2211	reply_size += cm->reply_body_size;
2212
2213	/* allocation not in the IO path, cqueue thread context */
2214	cn_reply = kzalloc(reply_size, GFP_KERNEL);
2215	if (!cn_reply) {
2216		retcode = ERR_NOMEM;
2217		goto fail;
2218	}
2219	reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
2220
2221	reply->packet_type =
2222		cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
2223	reply->minor = nlp->drbd_minor;
2224	reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
2225	/* reply->tag_list; might be modified by cm->function. */
2226
2227	rr = cm->function(mdev, nlp, reply);
2228
2229	cn_reply->id = req->id;
2230	cn_reply->seq = req->seq;
2231	cn_reply->ack = req->ack  + 1;
2232	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
2233	cn_reply->flags = 0;
2234
2235	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
2236	if (rr && rr != -ESRCH)
2237		printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2238
2239	kfree(cn_reply);
2240	module_put(THIS_MODULE);
2241	return;
2242 fail:
2243	drbd_nl_send_reply(req, retcode);
2244	module_put(THIS_MODULE);
2245}
2246
2247static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
2248
2249static unsigned short *
2250__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
2251	unsigned short len, int nul_terminated)
2252{
2253	unsigned short l = tag_descriptions[tag_number(tag)].max_len;
2254	len = (len < l) ? len :  l;
2255	put_unaligned(tag, tl++);
2256	put_unaligned(len, tl++);
2257	memcpy(tl, data, len);
2258	tl = (unsigned short*)((char*)tl + len);
2259	if (nul_terminated)
2260		*((char*)tl - 1) = 0;
2261	return tl;
2262}
2263
2264static unsigned short *
2265tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
2266{
2267	return __tl_add_blob(tl, tag, data, len, 0);
2268}
2269
2270static unsigned short *
2271tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
2272{
2273	return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
2274}
2275
2276static unsigned short *
2277tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
2278{
2279	put_unaligned(tag, tl++);
2280	switch(tag_type(tag)) {
2281	case TT_INTEGER:
2282		put_unaligned(sizeof(int), tl++);
2283		put_unaligned(*(int *)val, (int *)tl);
2284		tl = (unsigned short*)((char*)tl+sizeof(int));
2285		break;
2286	case TT_INT64:
2287		put_unaligned(sizeof(u64), tl++);
2288		put_unaligned(*(u64 *)val, (u64 *)tl);
2289		tl = (unsigned short*)((char*)tl+sizeof(u64));
2290		break;
2291	default:
2292		/* someone did something stupid. */
2293		;
2294	}
2295	return tl;
2296}
2297
2298void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
2299{
2300	char buffer[sizeof(struct cn_msg)+
2301		    sizeof(struct drbd_nl_cfg_reply)+
2302		    sizeof(struct get_state_tag_len_struct)+
2303		    sizeof(short int)];
2304	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2305	struct drbd_nl_cfg_reply *reply =
2306		(struct drbd_nl_cfg_reply *)cn_reply->data;
2307	unsigned short *tl = reply->tag_list;
2308
2309	/* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2310
2311	tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
2312
2313	put_unaligned(TT_END, tl++); /* Close the tag list */
2314
2315	cn_reply->id.idx = CN_IDX_DRBD;
2316	cn_reply->id.val = CN_VAL_DRBD;
2317
2318	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2319	cn_reply->ack = 0; /* not used here. */
2320	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2321		(int)((char *)tl - (char *)reply->tag_list);
2322	cn_reply->flags = 0;
2323
2324	reply->packet_type = P_get_state;
2325	reply->minor = mdev_to_minor(mdev);
2326	reply->ret_code = NO_ERROR;
2327
2328	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2329}
2330
2331void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
2332{
2333	char buffer[sizeof(struct cn_msg)+
2334		    sizeof(struct drbd_nl_cfg_reply)+
2335		    sizeof(struct call_helper_tag_len_struct)+
2336		    sizeof(short int)];
2337	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2338	struct drbd_nl_cfg_reply *reply =
2339		(struct drbd_nl_cfg_reply *)cn_reply->data;
2340	unsigned short *tl = reply->tag_list;
2341
2342	/* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2343
2344	tl = tl_add_str(tl, T_helper, helper_name);
2345	put_unaligned(TT_END, tl++); /* Close the tag list */
2346
2347	cn_reply->id.idx = CN_IDX_DRBD;
2348	cn_reply->id.val = CN_VAL_DRBD;
2349
2350	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2351	cn_reply->ack = 0; /* not used here. */
2352	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2353		(int)((char *)tl - (char *)reply->tag_list);
2354	cn_reply->flags = 0;
2355
2356	reply->packet_type = P_call_helper;
2357	reply->minor = mdev_to_minor(mdev);
2358	reply->ret_code = NO_ERROR;
2359
2360	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2361}
2362
2363void drbd_bcast_ee(struct drbd_conf *mdev,
2364		const char *reason, const int dgs,
2365		const char* seen_hash, const char* calc_hash,
2366		const struct drbd_epoch_entry* e)
2367{
2368	struct cn_msg *cn_reply;
2369	struct drbd_nl_cfg_reply *reply;
2370	unsigned short *tl;
2371	struct page *page;
2372	unsigned len;
2373
2374	if (!e)
2375		return;
2376	if (!reason || !reason[0])
2377		return;
2378
2379	/* apparently we have to memcpy twice, first to prepare the data for the
2380	 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2381	 * netlink skb. */
2382	/* receiver thread context, which is not in the writeout path (of this node),
2383	 * but may be in the writeout path of the _other_ node.
2384	 * GFP_NOIO to avoid potential "distributed deadlock". */
2385	cn_reply = kzalloc(
2386		sizeof(struct cn_msg)+
2387		sizeof(struct drbd_nl_cfg_reply)+
2388		sizeof(struct dump_ee_tag_len_struct)+
2389		sizeof(short int),
2390		GFP_NOIO);
2391
2392	if (!cn_reply) {
2393		dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2394				(unsigned long long)e->sector, e->size);
2395		return;
2396	}
2397
2398	reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
2399	tl = reply->tag_list;
2400
2401	tl = tl_add_str(tl, T_dump_ee_reason, reason);
2402	tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
2403	tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
2404	tl = tl_add_int(tl, T_ee_sector, &e->sector);
2405	tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
2406
2407	/* dump the first 32k */
2408	len = min_t(unsigned, e->size, 32 << 10);
2409	put_unaligned(T_ee_data, tl++);
2410	put_unaligned(len, tl++);
2411
2412	page = e->pages;
2413	page_chain_for_each(page) {
2414		void *d = kmap_atomic(page, KM_USER0);
2415		unsigned l = min_t(unsigned, len, PAGE_SIZE);
2416		memcpy(tl, d, l);
2417		kunmap_atomic(d, KM_USER0);
2418		tl = (unsigned short*)((char*)tl + l);
2419		len -= l;
2420		if (len == 0)
2421			break;
2422	}
2423	put_unaligned(TT_END, tl++); /* Close the tag list */
2424
2425	cn_reply->id.idx = CN_IDX_DRBD;
2426	cn_reply->id.val = CN_VAL_DRBD;
2427
2428	cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
2429	cn_reply->ack = 0; // not used here.
2430	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2431		(int)((char*)tl - (char*)reply->tag_list);
2432	cn_reply->flags = 0;
2433
2434	reply->packet_type = P_dump_ee;
2435	reply->minor = mdev_to_minor(mdev);
2436	reply->ret_code = NO_ERROR;
2437
2438	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2439	kfree(cn_reply);
2440}
2441
2442void drbd_bcast_sync_progress(struct drbd_conf *mdev)
2443{
2444	char buffer[sizeof(struct cn_msg)+
2445		    sizeof(struct drbd_nl_cfg_reply)+
2446		    sizeof(struct sync_progress_tag_len_struct)+
2447		    sizeof(short int)];
2448	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2449	struct drbd_nl_cfg_reply *reply =
2450		(struct drbd_nl_cfg_reply *)cn_reply->data;
2451	unsigned short *tl = reply->tag_list;
2452	unsigned long rs_left;
2453	unsigned int res;
2454
2455	/* no local ref, no bitmap, no syncer progress, no broadcast. */
2456	if (!get_ldev(mdev))
2457		return;
2458	drbd_get_syncer_progress(mdev, &rs_left, &res);
2459	put_ldev(mdev);
2460
2461	tl = tl_add_int(tl, T_sync_progress, &res);
2462	put_unaligned(TT_END, tl++); /* Close the tag list */
2463
2464	cn_reply->id.idx = CN_IDX_DRBD;
2465	cn_reply->id.val = CN_VAL_DRBD;
2466
2467	cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
2468	cn_reply->ack = 0; /* not used here. */
2469	cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
2470		(int)((char *)tl - (char *)reply->tag_list);
2471	cn_reply->flags = 0;
2472
2473	reply->packet_type = P_sync_progress;
2474	reply->minor = mdev_to_minor(mdev);
2475	reply->ret_code = NO_ERROR;
2476
2477	cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2478}
2479
2480int __init drbd_nl_init(void)
2481{
2482	static struct cb_id cn_id_drbd;
2483	int err, try=10;
2484
2485	cn_id_drbd.val = CN_VAL_DRBD;
2486	do {
2487		cn_id_drbd.idx = cn_idx;
2488		err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
2489		if (!err)
2490			break;
2491		cn_idx = (cn_idx + CN_IDX_STEP);
2492	} while (try--);
2493
2494	if (err) {
2495		printk(KERN_ERR "drbd: cn_drbd failed to register\n");
2496		return err;
2497	}
2498
2499	return 0;
2500}
2501
2502void drbd_nl_cleanup(void)
2503{
2504	static struct cb_id cn_id_drbd;
2505
2506	cn_id_drbd.idx = cn_idx;
2507	cn_id_drbd.val = CN_VAL_DRBD;
2508
2509	cn_del_callback(&cn_id_drbd);
2510}
2511
2512void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
2513{
2514	char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
2515	struct cn_msg *cn_reply = (struct cn_msg *) buffer;
2516	struct drbd_nl_cfg_reply *reply =
2517		(struct drbd_nl_cfg_reply *)cn_reply->data;
2518	int rr;
2519
2520	memset(buffer, 0, sizeof(buffer));
2521	cn_reply->id = req->id;
2522
2523	cn_reply->seq = req->seq;
2524	cn_reply->ack = req->ack  + 1;
2525	cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
2526	cn_reply->flags = 0;
2527
2528	reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
2529	reply->ret_code = ret_code;
2530
2531	rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
2532	if (rr && rr != -ESRCH)
2533		printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
2534}
2535
2536