4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/module.h>
29 #include <linux/drbd.h>
32 #include <linux/file.h>
33 #include <linux/slab.h>
34 #include <linux/blkpg.h>
35 #include <linux/cpumask.h>
37 #include "drbd_protocol.h"
39 #include "drbd_state_change.h"
40 #include <asm/unaligned.h>
41 #include <linux/drbd_limits.h>
42 #include <linux/kthread.h>
44 #include <net/genetlink.h>
47 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
48 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
74 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
76 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
78 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
79 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
81 #include <linux/drbd_genl_api.h>
83 #include <linux/genl_magic_func.h>
85 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
86 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
88 DEFINE_MUTEX(notification_mutex);
90 /* used blkdev_get_by_path, to claim our meta data device(s) */
91 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
93 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
95 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
96 if (genlmsg_reply(skb, info))
97 pr_err("error sending genl reply\n");
100 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
101 * reason it could fail was no space in skb, and there are 4k available. */
102 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
107 if (!info || !info[0])
110 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
114 err = nla_put_string(skb, T_info_text, info);
116 nla_nest_cancel(skb, nla);
119 nla_nest_end(skb, nla);
123 /* This would be a good candidate for a "pre_doit" hook,
124 * and per-family private info->pointers.
125 * But we need to stay compatible with older kernels.
126 * If it returns successfully, adm_ctx members are valid.
128 * At this point, we still rely on the global genl_lock().
129 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
130 * to add additional synchronization against object destruction/modification.
132 #define DRBD_ADM_NEED_MINOR 1
133 #define DRBD_ADM_NEED_RESOURCE 2
134 #define DRBD_ADM_NEED_CONNECTION 4
135 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
136 struct sk_buff *skb, struct genl_info *info, unsigned flags)
138 struct drbd_genlmsghdr *d_in = info->userhdr;
139 const u8 cmd = info->genlhdr->cmd;
142 memset(adm_ctx, 0, sizeof(*adm_ctx));
144 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
145 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
148 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
149 if (!adm_ctx->reply_skb) {
154 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
155 info, &drbd_genl_family, 0, cmd);
156 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
158 if (!adm_ctx->reply_dh) {
163 adm_ctx->reply_dh->minor = d_in->minor;
164 adm_ctx->reply_dh->ret_code = NO_ERROR;
166 adm_ctx->volume = VOLUME_UNSPECIFIED;
167 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
169 /* parse and validate only */
170 err = drbd_cfg_context_from_attrs(NULL, info);
174 /* It was present, and valid,
175 * copy it over to the reply skb. */
176 err = nla_put_nohdr(adm_ctx->reply_skb,
177 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
178 info->attrs[DRBD_NLA_CFG_CONTEXT]);
182 /* and assign stuff to the adm_ctx */
183 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
185 adm_ctx->volume = nla_get_u32(nla);
186 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
188 adm_ctx->resource_name = nla_data(nla);
189 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
190 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
191 if ((adm_ctx->my_addr &&
192 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
193 (adm_ctx->peer_addr &&
194 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
200 adm_ctx->minor = d_in->minor;
201 adm_ctx->device = minor_to_device(d_in->minor);
203 /* We are protected by the global genl_lock().
204 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
205 * so make sure this object stays around. */
207 kref_get(&adm_ctx->device->kref);
209 if (adm_ctx->resource_name) {
210 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
213 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
214 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
215 return ERR_MINOR_INVALID;
217 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
218 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
219 if (adm_ctx->resource_name)
220 return ERR_RES_NOT_KNOWN;
221 return ERR_INVALID_REQUEST;
224 if (flags & DRBD_ADM_NEED_CONNECTION) {
225 if (adm_ctx->resource) {
226 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
227 return ERR_INVALID_REQUEST;
229 if (adm_ctx->device) {
230 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
231 return ERR_INVALID_REQUEST;
233 if (adm_ctx->my_addr && adm_ctx->peer_addr)
234 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
235 nla_len(adm_ctx->my_addr),
236 nla_data(adm_ctx->peer_addr),
237 nla_len(adm_ctx->peer_addr));
238 if (!adm_ctx->connection) {
239 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
240 return ERR_INVALID_REQUEST;
244 /* some more paranoia, if the request was over-determined */
245 if (adm_ctx->device && adm_ctx->resource &&
246 adm_ctx->device->resource != adm_ctx->resource) {
247 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
248 adm_ctx->minor, adm_ctx->resource->name,
249 adm_ctx->device->resource->name);
250 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
251 return ERR_INVALID_REQUEST;
253 if (adm_ctx->device &&
254 adm_ctx->volume != VOLUME_UNSPECIFIED &&
255 adm_ctx->volume != adm_ctx->device->vnr) {
256 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
257 adm_ctx->minor, adm_ctx->volume,
258 adm_ctx->device->vnr,
259 adm_ctx->device->resource->name);
260 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
261 return ERR_INVALID_REQUEST;
264 /* still, provide adm_ctx->resource always, if possible. */
265 if (!adm_ctx->resource) {
266 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
267 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
268 if (adm_ctx->resource)
269 kref_get(&adm_ctx->resource->kref);
275 nlmsg_free(adm_ctx->reply_skb);
276 adm_ctx->reply_skb = NULL;
280 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
281 struct genl_info *info, int retcode)
283 if (adm_ctx->device) {
284 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
285 adm_ctx->device = NULL;
287 if (adm_ctx->connection) {
288 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
289 adm_ctx->connection = NULL;
291 if (adm_ctx->resource) {
292 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
293 adm_ctx->resource = NULL;
296 if (!adm_ctx->reply_skb)
299 adm_ctx->reply_dh->ret_code = retcode;
300 drbd_adm_send_reply(adm_ctx->reply_skb, info);
304 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
308 /* FIXME: A future version will not allow this case. */
309 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
312 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
315 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
316 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
320 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
321 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
325 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
326 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
328 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
331 int drbd_khelper(struct drbd_device *device, char *cmd)
333 char *envp[] = { "HOME=/",
335 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
336 (char[20]) { }, /* address family */
337 (char[60]) { }, /* address */
340 char *argv[] = {usermode_helper, cmd, mb, NULL };
341 struct drbd_connection *connection = first_peer_device(device)->connection;
345 if (current == connection->worker.task)
346 set_bit(CALLBACK_PENDING, &connection->flags);
348 snprintf(mb, 12, "minor-%d", device_to_minor(device));
349 setup_khelper_env(connection, envp);
351 /* The helper may take some time.
352 * write out any unsynced meta data changes now */
353 drbd_md_sync(device);
355 drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
356 sib.sib_reason = SIB_HELPER_PRE;
357 sib.helper_name = cmd;
358 drbd_bcast_event(device, &sib);
359 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
360 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
362 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
363 usermode_helper, cmd, mb,
364 (ret >> 8) & 0xff, ret);
366 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
367 usermode_helper, cmd, mb,
368 (ret >> 8) & 0xff, ret);
369 sib.sib_reason = SIB_HELPER_POST;
370 sib.helper_exit_code = ret;
371 drbd_bcast_event(device, &sib);
372 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
374 if (current == connection->worker.task)
375 clear_bit(CALLBACK_PENDING, &connection->flags);
377 if (ret < 0) /* Ignore any ERRNOs we got. */
383 static int conn_khelper(struct drbd_connection *connection, char *cmd)
385 char *envp[] = { "HOME=/",
387 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
388 (char[20]) { }, /* address family */
389 (char[60]) { }, /* address */
391 char *resource_name = connection->resource->name;
392 char *argv[] = {usermode_helper, cmd, resource_name, NULL };
395 setup_khelper_env(connection, envp);
396 conn_md_sync(connection);
398 drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
399 /* TODO: conn_bcast_event() ?? */
400 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
402 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
404 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
405 usermode_helper, cmd, resource_name,
406 (ret >> 8) & 0xff, ret);
408 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
409 usermode_helper, cmd, resource_name,
410 (ret >> 8) & 0xff, ret);
411 /* TODO: conn_bcast_event() ?? */
412 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
414 if (ret < 0) /* Ignore any ERRNOs we got. */
420 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
422 enum drbd_fencing_p fp = FP_NOT_AVAIL;
423 struct drbd_peer_device *peer_device;
427 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
428 struct drbd_device *device = peer_device->device;
429 if (get_ldev_if_state(device, D_CONSISTENT)) {
430 struct disk_conf *disk_conf =
431 rcu_dereference(peer_device->device->ldev->disk_conf);
432 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
438 if (fp == FP_NOT_AVAIL) {
439 /* IO Suspending works on the whole resource.
440 Do it only for one device. */
442 peer_device = idr_get_next(&connection->peer_devices, &vnr);
443 drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0));
449 bool conn_try_outdate_peer(struct drbd_connection *connection)
451 unsigned int connect_cnt;
452 union drbd_state mask = { };
453 union drbd_state val = { };
454 enum drbd_fencing_p fp;
458 spin_lock_irq(&connection->resource->req_lock);
459 if (connection->cstate >= C_WF_REPORT_PARAMS) {
460 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
461 spin_unlock_irq(&connection->resource->req_lock);
465 connect_cnt = connection->connect_cnt;
466 spin_unlock_irq(&connection->resource->req_lock);
468 fp = highest_fencing_policy(connection);
471 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
478 r = conn_khelper(connection, "fence-peer");
480 switch ((r>>8) & 0xff) {
481 case 3: /* peer is inconsistent */
482 ex_to_string = "peer is inconsistent or worse";
484 val.pdsk = D_INCONSISTENT;
486 case 4: /* peer got outdated, or was already outdated */
487 ex_to_string = "peer was fenced";
489 val.pdsk = D_OUTDATED;
491 case 5: /* peer was down */
492 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
493 /* we will(have) create(d) a new UUID anyways... */
494 ex_to_string = "peer is unreachable, assumed to be dead";
496 val.pdsk = D_OUTDATED;
498 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
501 case 6: /* Peer is primary, voluntarily outdate myself.
502 * This is useful when an unconnected R_SECONDARY is asked to
503 * become R_PRIMARY, but finds the other peer being active. */
504 ex_to_string = "peer is active";
505 drbd_warn(connection, "Peer is primary, outdating myself.\n");
507 val.disk = D_OUTDATED;
510 if (fp != FP_STONITH)
511 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
512 ex_to_string = "peer was stonithed";
514 val.pdsk = D_OUTDATED;
517 /* The script is broken ... */
518 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
519 return false; /* Eventually leave IO frozen */
522 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
523 (r>>8) & 0xff, ex_to_string);
528 conn_request_state(connection, mask, val, CS_VERBOSE);
529 here, because we might were able to re-establish the connection in the
531 spin_lock_irq(&connection->resource->req_lock);
532 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
533 if (connection->connect_cnt != connect_cnt)
534 /* In case the connection was established and droped
535 while the fence-peer handler was running, ignore it */
536 drbd_info(connection, "Ignoring fence-peer exit code\n");
538 _conn_request_state(connection, mask, val, CS_VERBOSE);
540 spin_unlock_irq(&connection->resource->req_lock);
542 return conn_highest_pdsk(connection) <= D_OUTDATED;
545 static int _try_outdate_peer_async(void *data)
547 struct drbd_connection *connection = (struct drbd_connection *)data;
549 conn_try_outdate_peer(connection);
551 kref_put(&connection->kref, drbd_destroy_connection);
555 void conn_try_outdate_peer_async(struct drbd_connection *connection)
557 struct task_struct *opa;
559 kref_get(&connection->kref);
560 /* We may just have force_sig()'ed this thread
561 * to get it out of some blocking network function.
562 * Clear signals; otherwise kthread_run(), which internally uses
563 * wait_on_completion_killable(), will mistake our pending signal
564 * for a new fatal signal and fail. */
565 flush_signals(current);
566 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
568 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
569 kref_put(&connection->kref, drbd_destroy_connection);
574 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
576 struct drbd_peer_device *const peer_device = first_peer_device(device);
577 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
578 const int max_tries = 4;
579 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
583 union drbd_state mask, val;
585 if (new_role == R_PRIMARY) {
586 struct drbd_connection *connection;
588 /* Detect dead peers as soon as possible. */
591 for_each_connection(connection, device->resource)
592 request_ping(connection);
596 mutex_lock(device->state_mutex);
598 mask.i = 0; mask.role = R_MASK;
599 val.i = 0; val.role = new_role;
601 while (try++ < max_tries) {
602 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
604 /* in case we first succeeded to outdate,
605 * but now suddenly could establish a connection */
606 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
612 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
613 (device->state.disk < D_UP_TO_DATE &&
614 device->state.disk >= D_INCONSISTENT)) {
616 val.disk = D_UP_TO_DATE;
621 if (rv == SS_NO_UP_TO_DATE_DISK &&
622 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
623 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
625 if (conn_try_outdate_peer(connection)) {
626 val.disk = D_UP_TO_DATE;
632 if (rv == SS_NOTHING_TO_DO)
634 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
635 if (!conn_try_outdate_peer(connection) && force) {
636 drbd_warn(device, "Forced into split brain situation!\n");
638 val.pdsk = D_OUTDATED;
643 if (rv == SS_TWO_PRIMARIES) {
644 /* Maybe the peer is detected as dead very soon...
645 retry at most once more in this case. */
648 nc = rcu_dereference(connection->net_conf);
649 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
651 schedule_timeout_interruptible(timeo);
656 if (rv < SS_SUCCESS) {
657 rv = _drbd_request_state(device, mask, val,
658 CS_VERBOSE + CS_WAIT_COMPLETE);
669 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
671 /* Wait until nothing is on the fly :) */
672 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
674 /* FIXME also wait for all pending P_BARRIER_ACK? */
676 if (new_role == R_SECONDARY) {
677 if (get_ldev(device)) {
678 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
682 mutex_lock(&device->resource->conf_update);
683 nc = connection->net_conf;
685 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
686 mutex_unlock(&device->resource->conf_update);
688 if (get_ldev(device)) {
689 if (((device->state.conn < C_CONNECTED ||
690 device->state.pdsk <= D_FAILED)
691 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
692 drbd_uuid_new_current(device);
694 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
699 /* writeout of activity log covered areas of the bitmap
700 * to stable storage done in after state change already */
702 if (device->state.conn >= C_WF_REPORT_PARAMS) {
703 /* if this was forced, we should consider sync */
705 drbd_send_uuids(peer_device);
706 drbd_send_current_state(peer_device);
709 drbd_md_sync(device);
710 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
711 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
713 mutex_unlock(device->state_mutex);
717 static const char *from_attrs_err_to_txt(int err)
719 return err == -ENOMSG ? "required attribute missing" :
720 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
721 err == -EEXIST ? "can not change invariant setting" :
722 "invalid attribute value";
725 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
727 struct drbd_config_context adm_ctx;
728 struct set_role_parms parms;
730 enum drbd_ret_code retcode;
732 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
733 if (!adm_ctx.reply_skb)
735 if (retcode != NO_ERROR)
738 memset(&parms, 0, sizeof(parms));
739 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
740 err = set_role_parms_from_attrs(&parms, info);
742 retcode = ERR_MANDATORY_TAG;
743 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
748 mutex_lock(&adm_ctx.resource->adm_mutex);
750 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
751 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
753 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
755 mutex_unlock(&adm_ctx.resource->adm_mutex);
758 drbd_adm_finish(&adm_ctx, info, retcode);
762 /* Initializes the md.*_offset members, so we are able to find
763 * the on disk meta data.
765 * We currently have two possible layouts:
767 * |----------- md_size_sect ------------------|
768 * [ 4k superblock ][ activity log ][ Bitmap ]
770 * | bm_offset = al_offset + X |
771 * ==> bitmap sectors = md_size_sect - bm_offset
774 * |----------- md_size_sect ------------------|
775 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
777 * | bm_offset = al_offset - Y |
778 * ==> bitmap sectors = Y = al_offset - bm_offset
780 * Activity log size used to be fixed 32kB,
781 * but is about to become configurable.
783 static void drbd_md_set_sector_offsets(struct drbd_device *device,
784 struct drbd_backing_dev *bdev)
786 sector_t md_size_sect = 0;
787 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
789 bdev->md.md_offset = drbd_md_ss(bdev);
791 switch (bdev->md.meta_dev_idx) {
793 /* v07 style fixed size indexed meta data */
794 bdev->md.md_size_sect = MD_128MB_SECT;
795 bdev->md.al_offset = MD_4kB_SECT;
796 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
798 case DRBD_MD_INDEX_FLEX_EXT:
799 /* just occupy the full device; unit: sectors */
800 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
801 bdev->md.al_offset = MD_4kB_SECT;
802 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
804 case DRBD_MD_INDEX_INTERNAL:
805 case DRBD_MD_INDEX_FLEX_INT:
806 /* al size is still fixed */
807 bdev->md.al_offset = -al_size_sect;
808 /* we need (slightly less than) ~ this much bitmap sectors: */
809 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
810 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
811 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
812 md_size_sect = ALIGN(md_size_sect, 8);
814 /* plus the "drbd meta data super block",
815 * and the activity log; */
816 md_size_sect += MD_4kB_SECT + al_size_sect;
818 bdev->md.md_size_sect = md_size_sect;
819 /* bitmap offset is adjusted by 'super' block size */
820 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
825 /* input size is expected to be in KB */
826 char *ppsize(char *buf, unsigned long long size)
828 /* Needs 9 bytes at max including trailing NUL:
829 * -1ULL ==> "16384 EB" */
830 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
832 while (size >= 10000 && base < sizeof(units)-1) {
834 size = (size >> 10) + !!(size & (1<<9));
837 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
842 /* there is still a theoretical deadlock when called from receiver
843 * on an D_INCONSISTENT R_PRIMARY:
844 * remote READ does inc_ap_bio, receiver would need to receive answer
845 * packet from remote to dec_ap_bio again.
846 * receiver receive_sizes(), comes here,
847 * waits for ap_bio_cnt == 0. -> deadlock.
848 * but this cannot happen, actually, because:
849 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
850 * (not connected, or bad/no disk on peer):
851 * see drbd_fail_request_early, ap_bio_cnt is zero.
852 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
853 * peer may not initiate a resize.
855 /* Note these are not to be confused with
856 * drbd_adm_suspend_io/drbd_adm_resume_io,
857 * which are (sub) state changes triggered by admin (drbdsetup),
858 * and can be long lived.
859 * This changes an device->flag, is triggered by drbd internals,
860 * and should be short-lived. */
861 void drbd_suspend_io(struct drbd_device *device)
863 set_bit(SUSPEND_IO, &device->flags);
864 if (drbd_suspended(device))
866 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
869 void drbd_resume_io(struct drbd_device *device)
871 clear_bit(SUSPEND_IO, &device->flags);
872 wake_up(&device->misc_wait);
876 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
877 * @device: DRBD device.
879 * Returns 0 on success, negative return values indicate errors.
880 * You should call drbd_md_sync() after calling this function.
882 enum determine_dev_size
883 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
885 sector_t prev_first_sect, prev_size; /* previous meta location */
886 sector_t la_size_sect, u_size;
887 struct drbd_md *md = &device->ldev->md;
888 u32 prev_al_stripe_size_4k;
894 int md_moved, la_size_changed;
895 enum determine_dev_size rv = DS_UNCHANGED;
898 * application request passes inc_ap_bio,
899 * but then cannot get an AL-reference.
900 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
903 * Suspend IO right here.
904 * still lock the act_log to not trigger ASSERTs there.
906 drbd_suspend_io(device);
907 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
909 drbd_resume_io(device);
913 /* no wait necessary anymore, actually we could assert that */
914 wait_event(device->al_wait, lc_try_lock(device->act_log));
916 prev_first_sect = drbd_md_first_sector(device->ldev);
917 prev_size = device->ldev->md.md_size_sect;
918 la_size_sect = device->ldev->md.la_size_sect;
921 /* rs is non NULL if we should change the AL layout only */
923 prev_al_stripes = md->al_stripes;
924 prev_al_stripe_size_4k = md->al_stripe_size_4k;
926 md->al_stripes = rs->al_stripes;
927 md->al_stripe_size_4k = rs->al_stripe_size / 4;
928 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
931 drbd_md_set_sector_offsets(device, device->ldev);
934 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
936 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
938 if (size < la_size_sect) {
939 if (rs && u_size == 0) {
940 /* Remove "rs &&" later. This check should always be active, but
941 right now the receiver expects the permissive behavior */
942 drbd_warn(device, "Implicit shrink not allowed. "
943 "Use --size=%llus for explicit shrink.\n",
944 (unsigned long long)size);
945 rv = DS_ERROR_SHRINK;
948 rv = DS_ERROR_SPACE_MD;
949 if (rv != DS_UNCHANGED)
953 if (drbd_get_capacity(device->this_bdev) != size ||
954 drbd_bm_capacity(device) != size) {
956 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
958 /* currently there is only one error: ENOMEM! */
959 size = drbd_bm_capacity(device)>>1;
961 drbd_err(device, "OUT OF MEMORY! "
962 "Could not allocate bitmap!\n");
964 drbd_err(device, "BM resizing failed. "
965 "Leaving size unchanged at size = %lu KB\n",
966 (unsigned long)size);
970 /* racy, see comments above. */
971 drbd_set_my_capacity(device, size);
972 device->ldev->md.la_size_sect = size;
973 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
974 (unsigned long long)size>>1);
979 la_size_changed = (la_size_sect != device->ldev->md.la_size_sect);
981 md_moved = prev_first_sect != drbd_md_first_sector(device->ldev)
982 || prev_size != device->ldev->md.md_size_sect;
984 if (la_size_changed || md_moved || rs) {
987 /* We do some synchronous IO below, which may take some time.
988 * Clear the timer, to avoid scary "timer expired!" messages,
989 * "Superblock" is written out at least twice below, anyways. */
990 del_timer(&device->md_sync_timer);
991 drbd_al_shrink(device); /* All extents inactive. */
993 prev_flags = md->flags;
994 md->flags &= ~MDF_PRIMARY_IND;
995 drbd_md_write(device, buffer);
997 drbd_info(device, "Writing the whole bitmap, %s\n",
998 la_size_changed && md_moved ? "size changed and md moved" :
999 la_size_changed ? "size changed" : "md moved");
1000 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1001 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1002 "size changed", BM_LOCKED_MASK);
1003 drbd_initialize_al(device, buffer);
1005 md->flags = prev_flags;
1006 drbd_md_write(device, buffer);
1009 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1010 md->al_stripes, md->al_stripe_size_4k * 4);
1013 if (size > la_size_sect)
1014 rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1015 if (size < la_size_sect)
1021 md->al_stripes = prev_al_stripes;
1022 md->al_stripe_size_4k = prev_al_stripe_size_4k;
1023 md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
1025 drbd_md_set_sector_offsets(device, device->ldev);
1028 lc_unlock(device->act_log);
1029 wake_up(&device->al_wait);
1030 drbd_md_put_buffer(device);
1031 drbd_resume_io(device);
1037 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1038 sector_t u_size, int assume_peer_has_space)
1040 sector_t p_size = device->p_size; /* partner's disk size. */
1041 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1042 sector_t m_size; /* my size */
1045 m_size = drbd_get_max_capacity(bdev);
1047 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1048 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1052 if (p_size && m_size) {
1053 size = min_t(sector_t, p_size, m_size);
1056 size = la_size_sect;
1057 if (m_size && m_size < size)
1059 if (p_size && p_size < size)
1070 drbd_err(device, "Both nodes diskless!\n");
1074 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1075 (unsigned long)u_size>>1, (unsigned long)size>>1);
1084 * drbd_check_al_size() - Ensures that the AL is of the right size
1085 * @device: DRBD device.
1087 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1088 * failed, and 0 on success. You should call drbd_md_sync() after you called
1091 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1093 struct lru_cache *n, *t;
1094 struct lc_element *e;
1095 unsigned int in_use;
1098 if (device->act_log &&
1099 device->act_log->nr_elements == dc->al_extents)
1103 t = device->act_log;
1104 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1105 dc->al_extents, sizeof(struct lc_element), 0);
1108 drbd_err(device, "Cannot allocate act_log lru!\n");
1111 spin_lock_irq(&device->al_lock);
1113 for (i = 0; i < t->nr_elements; i++) {
1114 e = lc_element_by_index(t, i);
1116 drbd_err(device, "refcnt(%d)==%d\n",
1117 e->lc_number, e->refcnt);
1118 in_use += e->refcnt;
1122 device->act_log = n;
1123 spin_unlock_irq(&device->al_lock);
1125 drbd_err(device, "Activity log still in use!\n");
1132 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1136 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1137 unsigned int max_bio_size)
1139 struct request_queue * const q = device->rq_queue;
1140 unsigned int max_hw_sectors = max_bio_size >> 9;
1141 unsigned int max_segments = 0;
1142 struct request_queue *b = NULL;
1145 b = bdev->backing_bdev->bd_disk->queue;
1147 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1149 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
1152 blk_set_stacking_limits(&q->limits);
1153 blk_queue_max_write_same_sectors(q, 0);
1156 blk_queue_logical_block_size(q, 512);
1157 blk_queue_max_hw_sectors(q, max_hw_sectors);
1158 /* This is the workaround for "bio would need to, but cannot, be split" */
1159 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1160 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1163 struct drbd_connection *connection = first_peer_device(device)->connection;
1165 if (blk_queue_discard(b) &&
1166 (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
1167 /* For now, don't allow more than one activity log extent worth of data
1168 * to be discarded in one go. We may need to rework drbd_al_begin_io()
1169 * to allow for even larger discard ranges */
1170 blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
1172 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1173 /* REALLY? Is stacking secdiscard "legal"? */
1174 if (blk_queue_secdiscard(b))
1175 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
1177 blk_queue_max_discard_sectors(q, 0);
1178 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1179 queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
1182 blk_queue_stack_limits(q, b);
1184 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1185 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1186 q->backing_dev_info.ra_pages,
1187 b->backing_dev_info.ra_pages);
1188 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1193 void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
1195 unsigned int now, new, local, peer;
1197 now = queue_max_hw_sectors(device->rq_queue) << 9;
1198 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1199 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1202 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1203 device->local_max_bio_size = local;
1205 local = min(local, DRBD_MAX_BIO_SIZE);
1207 /* We may ignore peer limits if the peer is modern enough.
1208 Because new from 8.3.8 onwards the peer can use multiple
1209 BIOs for a single peer_request */
1210 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1211 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1212 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1213 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1214 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1215 peer = DRBD_MAX_SIZE_H80_PACKET;
1216 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1217 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1219 peer = DRBD_MAX_BIO_SIZE;
1221 /* We may later detach and re-attach on a disconnected Primary.
1222 * Avoid this setting to jump back in that case.
1223 * We want to store what we know the peer DRBD can handle,
1224 * not what the peer IO backend can handle. */
1225 if (peer > device->peer_max_bio_size)
1226 device->peer_max_bio_size = peer;
1228 new = min(local, peer);
1230 if (device->state.role == R_PRIMARY && new < now)
1231 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1234 drbd_info(device, "max BIO size = %u\n", new);
1236 drbd_setup_queue_param(device, bdev, new);
1239 /* Starts the worker thread */
1240 static void conn_reconfig_start(struct drbd_connection *connection)
1242 drbd_thread_start(&connection->worker);
1243 drbd_flush_workqueue(&connection->sender_work);
1246 /* if still unconfigured, stops worker again. */
1247 static void conn_reconfig_done(struct drbd_connection *connection)
1250 spin_lock_irq(&connection->resource->req_lock);
1251 stop_threads = conn_all_vols_unconf(connection) &&
1252 connection->cstate == C_STANDALONE;
1253 spin_unlock_irq(&connection->resource->req_lock);
1255 /* asender is implicitly stopped by receiver
1256 * in conn_disconnect() */
1257 drbd_thread_stop(&connection->receiver);
1258 drbd_thread_stop(&connection->worker);
1262 /* Make sure IO is suspended before calling this function(). */
1263 static void drbd_suspend_al(struct drbd_device *device)
1267 if (!lc_try_lock(device->act_log)) {
1268 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1272 drbd_al_shrink(device);
1273 spin_lock_irq(&device->resource->req_lock);
1274 if (device->state.conn < C_CONNECTED)
1275 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1276 spin_unlock_irq(&device->resource->req_lock);
1277 lc_unlock(device->act_log);
1280 drbd_info(device, "Suspended AL updates\n");
1284 static bool should_set_defaults(struct genl_info *info)
1286 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1287 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1290 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1292 /* This is limited by 16 bit "slot" numbers,
1293 * and by available on-disk context storage.
1295 * Also (u16)~0 is special (denotes a "free" extent).
1297 * One transaction occupies one 4kB on-disk block,
1298 * we have n such blocks in the on disk ring buffer,
1299 * the "current" transaction may fail (n-1),
1300 * and there is 919 slot numbers context information per transaction.
1302 * 72 transaction blocks amounts to more than 2**16 context slots,
1303 * so cap there first.
1305 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1306 const unsigned int sufficient_on_disk =
1307 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1308 /AL_CONTEXT_PER_TRANSACTION;
1310 unsigned int al_size_4k = bdev->md.al_size_4k;
1312 if (al_size_4k > sufficient_on_disk)
1315 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1318 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1320 return a->disk_barrier != b->disk_barrier ||
1321 a->disk_flushes != b->disk_flushes ||
1322 a->disk_drain != b->disk_drain;
1325 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1327 struct drbd_config_context adm_ctx;
1328 enum drbd_ret_code retcode;
1329 struct drbd_device *device;
1330 struct disk_conf *new_disk_conf, *old_disk_conf;
1331 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1334 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1335 if (!adm_ctx.reply_skb)
1337 if (retcode != NO_ERROR)
1340 device = adm_ctx.device;
1341 mutex_lock(&adm_ctx.resource->adm_mutex);
1343 /* we also need a disk
1344 * to change the options on */
1345 if (!get_ldev(device)) {
1346 retcode = ERR_NO_DISK;
1350 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1351 if (!new_disk_conf) {
1352 retcode = ERR_NOMEM;
1356 mutex_lock(&device->resource->conf_update);
1357 old_disk_conf = device->ldev->disk_conf;
1358 *new_disk_conf = *old_disk_conf;
1359 if (should_set_defaults(info))
1360 set_disk_conf_defaults(new_disk_conf);
1362 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1363 if (err && err != -ENOMSG) {
1364 retcode = ERR_MANDATORY_TAG;
1365 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1369 if (!expect(new_disk_conf->resync_rate >= 1))
1370 new_disk_conf->resync_rate = 1;
1372 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1373 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1374 if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
1375 new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
1377 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1378 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1380 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1381 if (fifo_size != device->rs_plan_s->size) {
1382 new_plan = fifo_alloc(fifo_size);
1384 drbd_err(device, "kmalloc of fifo_buffer failed");
1385 retcode = ERR_NOMEM;
1390 drbd_suspend_io(device);
1391 wait_event(device->al_wait, lc_try_lock(device->act_log));
1392 drbd_al_shrink(device);
1393 err = drbd_check_al_size(device, new_disk_conf);
1394 lc_unlock(device->act_log);
1395 wake_up(&device->al_wait);
1396 drbd_resume_io(device);
1399 retcode = ERR_NOMEM;
1403 lock_all_resources();
1404 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1405 if (retcode == NO_ERROR) {
1406 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1407 drbd_resync_after_changed(device);
1409 unlock_all_resources();
1411 if (retcode != NO_ERROR)
1415 old_plan = device->rs_plan_s;
1416 rcu_assign_pointer(device->rs_plan_s, new_plan);
1419 mutex_unlock(&device->resource->conf_update);
1421 if (new_disk_conf->al_updates)
1422 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1424 device->ldev->md.flags |= MDF_AL_DISABLED;
1426 if (new_disk_conf->md_flushes)
1427 clear_bit(MD_NO_FUA, &device->flags);
1429 set_bit(MD_NO_FUA, &device->flags);
1431 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1432 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1434 drbd_md_sync(device);
1436 if (device->state.conn >= C_CONNECTED) {
1437 struct drbd_peer_device *peer_device;
1439 for_each_peer_device(peer_device, device)
1440 drbd_send_sync_param(peer_device);
1444 kfree(old_disk_conf);
1446 mod_timer(&device->request_timer, jiffies + HZ);
1450 mutex_unlock(&device->resource->conf_update);
1452 kfree(new_disk_conf);
1457 mutex_unlock(&adm_ctx.resource->adm_mutex);
1459 drbd_adm_finish(&adm_ctx, info, retcode);
1463 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1465 struct drbd_config_context adm_ctx;
1466 struct drbd_device *device;
1467 struct drbd_peer_device *peer_device;
1468 struct drbd_connection *connection;
1470 enum drbd_ret_code retcode;
1471 enum determine_dev_size dd;
1472 sector_t max_possible_sectors;
1473 sector_t min_md_device_sectors;
1474 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1475 struct disk_conf *new_disk_conf = NULL;
1476 struct block_device *bdev;
1477 struct lru_cache *resync_lru = NULL;
1478 struct fifo_buffer *new_plan = NULL;
1479 union drbd_state ns, os;
1480 enum drbd_state_rv rv;
1481 struct net_conf *nc;
1483 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1484 if (!adm_ctx.reply_skb)
1486 if (retcode != NO_ERROR)
1489 device = adm_ctx.device;
1490 mutex_lock(&adm_ctx.resource->adm_mutex);
1491 peer_device = first_peer_device(device);
1492 connection = peer_device->connection;
1493 conn_reconfig_start(connection);
1495 /* if you want to reconfigure, please tear down first */
1496 if (device->state.disk > D_DISKLESS) {
1497 retcode = ERR_DISK_CONFIGURED;
1500 /* It may just now have detached because of IO error. Make sure
1501 * drbd_ldev_destroy is done already, we may end up here very fast,
1502 * e.g. if someone calls attach from the on-io-error handler,
1503 * to realize a "hot spare" feature (not that I'd recommend that) */
1504 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1506 /* make sure there is no leftover from previous force-detach attempts */
1507 clear_bit(FORCE_DETACH, &device->flags);
1508 clear_bit(WAS_IO_ERROR, &device->flags);
1509 clear_bit(WAS_READ_ERROR, &device->flags);
1511 /* and no leftover from previously aborted resync or verify, either */
1512 device->rs_total = 0;
1513 device->rs_failed = 0;
1514 atomic_set(&device->rs_pending_cnt, 0);
1516 /* allocation not in the IO path, drbdsetup context */
1517 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1519 retcode = ERR_NOMEM;
1522 spin_lock_init(&nbc->md.uuid_lock);
1524 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1525 if (!new_disk_conf) {
1526 retcode = ERR_NOMEM;
1529 nbc->disk_conf = new_disk_conf;
1531 set_disk_conf_defaults(new_disk_conf);
1532 err = disk_conf_from_attrs(new_disk_conf, info);
1534 retcode = ERR_MANDATORY_TAG;
1535 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1539 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1540 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1542 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1544 retcode = ERR_NOMEM;
1548 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1549 retcode = ERR_MD_IDX_INVALID;
1554 nc = rcu_dereference(connection->net_conf);
1556 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1558 retcode = ERR_STONITH_AND_PROT_A;
1564 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1565 FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
1567 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1569 retcode = ERR_OPEN_DISK;
1572 nbc->backing_bdev = bdev;
1575 * meta_dev_idx >= 0: external fixed size, possibly multiple
1576 * drbd sharing one meta device. TODO in that case, paranoia
1577 * check that [md_bdev, meta_dev_idx] is not yet used by some
1578 * other drbd minor! (if you use drbd.conf + drbdadm, that
1579 * should check it for you already; but if you don't, or
1580 * someone fooled it, we need to double check here)
1582 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1583 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1584 (new_disk_conf->meta_dev_idx < 0) ?
1585 (void *)device : (void *)drbd_m_holder);
1587 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1589 retcode = ERR_OPEN_MD_DISK;
1592 nbc->md_bdev = bdev;
1594 if ((nbc->backing_bdev == nbc->md_bdev) !=
1595 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1596 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1597 retcode = ERR_MD_IDX_INVALID;
1601 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1602 1, 61, sizeof(struct bm_extent),
1603 offsetof(struct bm_extent, lce));
1605 retcode = ERR_NOMEM;
1609 /* Read our meta data super block early.
1610 * This also sets other on-disk offsets. */
1611 retcode = drbd_md_read(device, nbc);
1612 if (retcode != NO_ERROR)
1615 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1616 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1617 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1618 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1620 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1621 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1622 (unsigned long long) drbd_get_max_capacity(nbc),
1623 (unsigned long long) new_disk_conf->disk_size);
1624 retcode = ERR_DISK_TOO_SMALL;
1628 if (new_disk_conf->meta_dev_idx < 0) {
1629 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1630 /* at least one MB, otherwise it does not make sense */
1631 min_md_device_sectors = (2<<10);
1633 max_possible_sectors = DRBD_MAX_SECTORS;
1634 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1637 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1638 retcode = ERR_MD_DISK_TOO_SMALL;
1639 drbd_warn(device, "refusing attach: md-device too small, "
1640 "at least %llu sectors needed for this meta-disk type\n",
1641 (unsigned long long) min_md_device_sectors);
1645 /* Make sure the new disk is big enough
1646 * (we may currently be R_PRIMARY with no local disk...) */
1647 if (drbd_get_max_capacity(nbc) <
1648 drbd_get_capacity(device->this_bdev)) {
1649 retcode = ERR_DISK_TOO_SMALL;
1653 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1655 if (nbc->known_size > max_possible_sectors) {
1656 drbd_warn(device, "==> truncating very big lower level device "
1657 "to currently maximum possible %llu sectors <==\n",
1658 (unsigned long long) max_possible_sectors);
1659 if (new_disk_conf->meta_dev_idx >= 0)
1660 drbd_warn(device, "==>> using internal or flexible "
1661 "meta data may help <<==\n");
1664 drbd_suspend_io(device);
1665 /* also wait for the last barrier ack. */
1666 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1667 * We need a way to either ignore barrier acks for barriers sent before a device
1668 * was attached, or a way to wait for all pending barrier acks to come in.
1669 * As barriers are counted per resource,
1670 * we'd need to suspend io on all devices of a resource.
1672 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1673 /* and for any other previously queued work */
1674 drbd_flush_workqueue(&connection->sender_work);
1676 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1677 retcode = rv; /* FIXME: Type mismatch. */
1678 drbd_resume_io(device);
1679 if (rv < SS_SUCCESS)
1682 if (!get_ldev_if_state(device, D_ATTACHING))
1683 goto force_diskless;
1685 if (!device->bitmap) {
1686 if (drbd_bm_init(device)) {
1687 retcode = ERR_NOMEM;
1688 goto force_diskless_dec;
1692 if (device->state.conn < C_CONNECTED &&
1693 device->state.role == R_PRIMARY && device->ed_uuid &&
1694 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1695 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1696 (unsigned long long)device->ed_uuid);
1697 retcode = ERR_DATA_NOT_CURRENT;
1698 goto force_diskless_dec;
1701 /* Since we are diskless, fix the activity log first... */
1702 if (drbd_check_al_size(device, new_disk_conf)) {
1703 retcode = ERR_NOMEM;
1704 goto force_diskless_dec;
1707 /* Prevent shrinking of consistent devices ! */
1708 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1709 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1710 drbd_warn(device, "refusing to truncate a consistent device\n");
1711 retcode = ERR_DISK_TOO_SMALL;
1712 goto force_diskless_dec;
1715 lock_all_resources();
1716 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1717 if (retcode != NO_ERROR) {
1718 unlock_all_resources();
1719 goto force_diskless_dec;
1722 /* Reset the "barriers don't work" bits here, then force meta data to
1723 * be written, to ensure we determine if barriers are supported. */
1724 if (new_disk_conf->md_flushes)
1725 clear_bit(MD_NO_FUA, &device->flags);
1727 set_bit(MD_NO_FUA, &device->flags);
1729 /* Point of no return reached.
1730 * Devices and memory are no longer released by error cleanup below.
1731 * now device takes over responsibility, and the state engine should
1732 * clean it up somewhere. */
1733 D_ASSERT(device, device->ldev == NULL);
1735 device->resync = resync_lru;
1736 device->rs_plan_s = new_plan;
1739 new_disk_conf = NULL;
1742 drbd_resync_after_changed(device);
1743 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
1744 unlock_all_resources();
1746 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1747 set_bit(CRASHED_PRIMARY, &device->flags);
1749 clear_bit(CRASHED_PRIMARY, &device->flags);
1751 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1752 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1753 set_bit(CRASHED_PRIMARY, &device->flags);
1755 device->send_cnt = 0;
1756 device->recv_cnt = 0;
1757 device->read_cnt = 0;
1758 device->writ_cnt = 0;
1760 drbd_reconsider_max_bio_size(device, device->ldev);
1762 /* If I am currently not R_PRIMARY,
1763 * but meta data primary indicator is set,
1764 * I just now recover from a hard crash,
1765 * and have been R_PRIMARY before that crash.
1767 * Now, if I had no connection before that crash
1768 * (have been degraded R_PRIMARY), chances are that
1769 * I won't find my peer now either.
1771 * In that case, and _only_ in that case,
1772 * we use the degr-wfc-timeout instead of the default,
1773 * so we can automatically recover from a crash of a
1774 * degraded but active "cluster" after a certain timeout.
1776 clear_bit(USE_DEGR_WFC_T, &device->flags);
1777 if (device->state.role != R_PRIMARY &&
1778 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1779 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
1780 set_bit(USE_DEGR_WFC_T, &device->flags);
1782 dd = drbd_determine_dev_size(device, 0, NULL);
1783 if (dd <= DS_ERROR) {
1784 retcode = ERR_NOMEM_BITMAP;
1785 goto force_diskless_dec;
1786 } else if (dd == DS_GREW)
1787 set_bit(RESYNC_AFTER_NEG, &device->flags);
1789 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
1790 (test_bit(CRASHED_PRIMARY, &device->flags) &&
1791 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
1792 drbd_info(device, "Assuming that all blocks are out of sync "
1793 "(aka FullSync)\n");
1794 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
1795 "set_n_write from attaching", BM_LOCKED_MASK)) {
1796 retcode = ERR_IO_MD_DISK;
1797 goto force_diskless_dec;
1800 if (drbd_bitmap_io(device, &drbd_bm_read,
1801 "read from attaching", BM_LOCKED_MASK)) {
1802 retcode = ERR_IO_MD_DISK;
1803 goto force_diskless_dec;
1807 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
1808 drbd_suspend_al(device); /* IO is still suspended here... */
1810 spin_lock_irq(&device->resource->req_lock);
1811 os = drbd_read_state(device);
1813 /* If MDF_CONSISTENT is not set go into inconsistent state,
1814 otherwise investigate MDF_WasUpToDate...
1815 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1816 otherwise into D_CONSISTENT state.
1818 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
1819 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
1820 ns.disk = D_CONSISTENT;
1822 ns.disk = D_OUTDATED;
1824 ns.disk = D_INCONSISTENT;
1827 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
1828 ns.pdsk = D_OUTDATED;
1831 if (ns.disk == D_CONSISTENT &&
1832 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
1833 ns.disk = D_UP_TO_DATE;
1835 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1836 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1837 this point, because drbd_request_state() modifies these
1840 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
1841 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1843 device->ldev->md.flags |= MDF_AL_DISABLED;
1847 /* In case we are C_CONNECTED postpone any decision on the new disk
1848 state after the negotiation phase. */
1849 if (device->state.conn == C_CONNECTED) {
1850 device->new_state_tmp.i = ns.i;
1852 ns.disk = D_NEGOTIATING;
1854 /* We expect to receive up-to-date UUIDs soon.
1855 To avoid a race in receive_state, free p_uuid while
1856 holding req_lock. I.e. atomic with the state change */
1857 kfree(device->p_uuid);
1858 device->p_uuid = NULL;
1861 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1862 spin_unlock_irq(&device->resource->req_lock);
1864 if (rv < SS_SUCCESS)
1865 goto force_diskless_dec;
1867 mod_timer(&device->request_timer, jiffies + HZ);
1869 if (device->state.role == R_PRIMARY)
1870 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1872 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1874 drbd_md_mark_dirty(device);
1875 drbd_md_sync(device);
1877 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1879 conn_reconfig_done(connection);
1880 mutex_unlock(&adm_ctx.resource->adm_mutex);
1881 drbd_adm_finish(&adm_ctx, info, retcode);
1887 drbd_force_state(device, NS(disk, D_DISKLESS));
1888 drbd_md_sync(device);
1890 conn_reconfig_done(connection);
1892 if (nbc->backing_bdev)
1893 blkdev_put(nbc->backing_bdev,
1894 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1896 blkdev_put(nbc->md_bdev,
1897 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1900 kfree(new_disk_conf);
1901 lc_destroy(resync_lru);
1903 mutex_unlock(&adm_ctx.resource->adm_mutex);
1905 drbd_adm_finish(&adm_ctx, info, retcode);
1909 static int adm_detach(struct drbd_device *device, int force)
1911 enum drbd_state_rv retcode;
1915 set_bit(FORCE_DETACH, &device->flags);
1916 drbd_force_state(device, NS(disk, D_FAILED));
1917 retcode = SS_SUCCESS;
1921 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
1922 drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
1923 retcode = drbd_request_state(device, NS(disk, D_FAILED));
1924 drbd_md_put_buffer(device);
1925 /* D_FAILED will transition to DISKLESS. */
1926 ret = wait_event_interruptible(device->misc_wait,
1927 device->state.disk != D_FAILED);
1928 drbd_resume_io(device);
1929 if ((int)retcode == (int)SS_IS_DISKLESS)
1930 retcode = SS_NOTHING_TO_DO;
1937 /* Detaching the disk is a process in multiple stages. First we need to lock
1938 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1939 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1940 * internal references as well.
1941 * Only then we have finally detached. */
1942 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1944 struct drbd_config_context adm_ctx;
1945 enum drbd_ret_code retcode;
1946 struct detach_parms parms = { };
1949 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1950 if (!adm_ctx.reply_skb)
1952 if (retcode != NO_ERROR)
1955 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1956 err = detach_parms_from_attrs(&parms, info);
1958 retcode = ERR_MANDATORY_TAG;
1959 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1964 mutex_lock(&adm_ctx.resource->adm_mutex);
1965 retcode = adm_detach(adm_ctx.device, parms.force_detach);
1966 mutex_unlock(&adm_ctx.resource->adm_mutex);
1968 drbd_adm_finish(&adm_ctx, info, retcode);
1972 static bool conn_resync_running(struct drbd_connection *connection)
1974 struct drbd_peer_device *peer_device;
1979 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1980 struct drbd_device *device = peer_device->device;
1981 if (device->state.conn == C_SYNC_SOURCE ||
1982 device->state.conn == C_SYNC_TARGET ||
1983 device->state.conn == C_PAUSED_SYNC_S ||
1984 device->state.conn == C_PAUSED_SYNC_T) {
1994 static bool conn_ov_running(struct drbd_connection *connection)
1996 struct drbd_peer_device *peer_device;
2001 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2002 struct drbd_device *device = peer_device->device;
2003 if (device->state.conn == C_VERIFY_S ||
2004 device->state.conn == C_VERIFY_T) {
2014 static enum drbd_ret_code
2015 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2017 struct drbd_peer_device *peer_device;
2020 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2021 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2022 return ERR_NEED_APV_100;
2024 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2025 return ERR_NEED_APV_100;
2027 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2028 return ERR_NEED_APV_100;
2031 if (!new_net_conf->two_primaries &&
2032 conn_highest_role(connection) == R_PRIMARY &&
2033 conn_highest_peer(connection) == R_PRIMARY)
2034 return ERR_NEED_ALLOW_TWO_PRI;
2036 if (new_net_conf->two_primaries &&
2037 (new_net_conf->wire_protocol != DRBD_PROT_C))
2038 return ERR_NOT_PROTO_C;
2040 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2041 struct drbd_device *device = peer_device->device;
2042 if (get_ldev(device)) {
2043 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2045 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2046 return ERR_STONITH_AND_PROT_A;
2048 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2049 return ERR_DISCARD_IMPOSSIBLE;
2052 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2053 return ERR_CONG_NOT_PROTO_A;
2058 static enum drbd_ret_code
2059 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2061 static enum drbd_ret_code rv;
2062 struct drbd_peer_device *peer_device;
2066 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2069 /* connection->peer_devices protected by genl_lock() here */
2070 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2071 struct drbd_device *device = peer_device->device;
2072 if (!device->bitmap) {
2073 if (drbd_bm_init(device))
2082 struct crypto_hash *verify_tfm;
2083 struct crypto_hash *csums_tfm;
2084 struct crypto_hash *cram_hmac_tfm;
2085 struct crypto_hash *integrity_tfm;
2089 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
2094 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
2103 static enum drbd_ret_code
2104 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2106 char hmac_name[CRYPTO_MAX_ALG_NAME];
2107 enum drbd_ret_code rv;
2109 rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
2113 rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
2117 rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2121 if (new_net_conf->cram_hmac_alg[0] != 0) {
2122 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2123 new_net_conf->cram_hmac_alg);
2125 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
2132 static void free_crypto(struct crypto *crypto)
2134 crypto_free_hash(crypto->cram_hmac_tfm);
2135 crypto_free_hash(crypto->integrity_tfm);
2136 crypto_free_hash(crypto->csums_tfm);
2137 crypto_free_hash(crypto->verify_tfm);
2140 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2142 struct drbd_config_context adm_ctx;
2143 enum drbd_ret_code retcode;
2144 struct drbd_connection *connection;
2145 struct net_conf *old_net_conf, *new_net_conf = NULL;
2147 int ovr; /* online verify running */
2148 int rsr; /* re-sync running */
2149 struct crypto crypto = { };
2151 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2152 if (!adm_ctx.reply_skb)
2154 if (retcode != NO_ERROR)
2157 connection = adm_ctx.connection;
2158 mutex_lock(&adm_ctx.resource->adm_mutex);
2160 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2161 if (!new_net_conf) {
2162 retcode = ERR_NOMEM;
2166 conn_reconfig_start(connection);
2168 mutex_lock(&connection->data.mutex);
2169 mutex_lock(&connection->resource->conf_update);
2170 old_net_conf = connection->net_conf;
2172 if (!old_net_conf) {
2173 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2174 retcode = ERR_INVALID_REQUEST;
2178 *new_net_conf = *old_net_conf;
2179 if (should_set_defaults(info))
2180 set_net_conf_defaults(new_net_conf);
2182 err = net_conf_from_attrs_for_change(new_net_conf, info);
2183 if (err && err != -ENOMSG) {
2184 retcode = ERR_MANDATORY_TAG;
2185 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2189 retcode = check_net_options(connection, new_net_conf);
2190 if (retcode != NO_ERROR)
2193 /* re-sync running */
2194 rsr = conn_resync_running(connection);
2195 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2196 retcode = ERR_CSUMS_RESYNC_RUNNING;
2200 /* online verify running */
2201 ovr = conn_ov_running(connection);
2202 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2203 retcode = ERR_VERIFY_RUNNING;
2207 retcode = alloc_crypto(&crypto, new_net_conf);
2208 if (retcode != NO_ERROR)
2211 rcu_assign_pointer(connection->net_conf, new_net_conf);
2214 crypto_free_hash(connection->csums_tfm);
2215 connection->csums_tfm = crypto.csums_tfm;
2216 crypto.csums_tfm = NULL;
2219 crypto_free_hash(connection->verify_tfm);
2220 connection->verify_tfm = crypto.verify_tfm;
2221 crypto.verify_tfm = NULL;
2224 crypto_free_hash(connection->integrity_tfm);
2225 connection->integrity_tfm = crypto.integrity_tfm;
2226 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2227 /* Do this without trying to take connection->data.mutex again. */
2228 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2230 crypto_free_hash(connection->cram_hmac_tfm);
2231 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2233 mutex_unlock(&connection->resource->conf_update);
2234 mutex_unlock(&connection->data.mutex);
2236 kfree(old_net_conf);
2238 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2239 struct drbd_peer_device *peer_device;
2242 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2243 drbd_send_sync_param(peer_device);
2249 mutex_unlock(&connection->resource->conf_update);
2250 mutex_unlock(&connection->data.mutex);
2251 free_crypto(&crypto);
2252 kfree(new_net_conf);
2254 conn_reconfig_done(connection);
2256 mutex_unlock(&adm_ctx.resource->adm_mutex);
2258 drbd_adm_finish(&adm_ctx, info, retcode);
2262 static void connection_to_info(struct connection_info *info,
2263 struct drbd_connection *connection)
2265 info->conn_connection_state = connection->cstate;
2266 info->conn_role = conn_highest_peer(connection);
2269 static void peer_device_to_info(struct peer_device_info *info,
2270 struct drbd_peer_device *peer_device)
2272 struct drbd_device *device = peer_device->device;
2274 info->peer_repl_state =
2275 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2276 info->peer_disk_state = device->state.pdsk;
2277 info->peer_resync_susp_user = device->state.user_isp;
2278 info->peer_resync_susp_peer = device->state.peer_isp;
2279 info->peer_resync_susp_dependency = device->state.aftr_isp;
2282 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2284 struct connection_info connection_info;
2285 enum drbd_notification_type flags;
2286 unsigned int peer_devices = 0;
2287 struct drbd_config_context adm_ctx;
2288 struct drbd_peer_device *peer_device;
2289 struct net_conf *old_net_conf, *new_net_conf = NULL;
2290 struct crypto crypto = { };
2291 struct drbd_resource *resource;
2292 struct drbd_connection *connection;
2293 enum drbd_ret_code retcode;
2297 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2299 if (!adm_ctx.reply_skb)
2301 if (retcode != NO_ERROR)
2303 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2304 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2305 retcode = ERR_INVALID_REQUEST;
2309 /* No need for _rcu here. All reconfiguration is
2310 * strictly serialized on genl_lock(). We are protected against
2311 * concurrent reconfiguration/addition/deletion */
2312 for_each_resource(resource, &drbd_resources) {
2313 for_each_connection(connection, resource) {
2314 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2315 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2316 connection->my_addr_len)) {
2317 retcode = ERR_LOCAL_ADDR;
2321 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2322 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2323 connection->peer_addr_len)) {
2324 retcode = ERR_PEER_ADDR;
2330 mutex_lock(&adm_ctx.resource->adm_mutex);
2331 connection = first_connection(adm_ctx.resource);
2332 conn_reconfig_start(connection);
2334 if (connection->cstate > C_STANDALONE) {
2335 retcode = ERR_NET_CONFIGURED;
2339 /* allocation not in the IO path, drbdsetup / netlink process context */
2340 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2341 if (!new_net_conf) {
2342 retcode = ERR_NOMEM;
2346 set_net_conf_defaults(new_net_conf);
2348 err = net_conf_from_attrs(new_net_conf, info);
2349 if (err && err != -ENOMSG) {
2350 retcode = ERR_MANDATORY_TAG;
2351 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2355 retcode = check_net_options(connection, new_net_conf);
2356 if (retcode != NO_ERROR)
2359 retcode = alloc_crypto(&crypto, new_net_conf);
2360 if (retcode != NO_ERROR)
2363 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2365 drbd_flush_workqueue(&connection->sender_work);
2367 mutex_lock(&adm_ctx.resource->conf_update);
2368 old_net_conf = connection->net_conf;
2370 retcode = ERR_NET_CONFIGURED;
2371 mutex_unlock(&adm_ctx.resource->conf_update);
2374 rcu_assign_pointer(connection->net_conf, new_net_conf);
2376 conn_free_crypto(connection);
2377 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2378 connection->integrity_tfm = crypto.integrity_tfm;
2379 connection->csums_tfm = crypto.csums_tfm;
2380 connection->verify_tfm = crypto.verify_tfm;
2382 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2383 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2384 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2385 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2387 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2391 connection_to_info(&connection_info, connection);
2392 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2393 mutex_lock(¬ification_mutex);
2394 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2395 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2396 struct peer_device_info peer_device_info;
2398 peer_device_to_info(&peer_device_info, peer_device);
2399 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2400 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2402 mutex_unlock(¬ification_mutex);
2403 mutex_unlock(&adm_ctx.resource->conf_update);
2406 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2407 struct drbd_device *device = peer_device->device;
2408 device->send_cnt = 0;
2409 device->recv_cnt = 0;
2413 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2415 conn_reconfig_done(connection);
2416 mutex_unlock(&adm_ctx.resource->adm_mutex);
2417 drbd_adm_finish(&adm_ctx, info, retcode);
2421 free_crypto(&crypto);
2422 kfree(new_net_conf);
2424 conn_reconfig_done(connection);
2425 mutex_unlock(&adm_ctx.resource->adm_mutex);
2427 drbd_adm_finish(&adm_ctx, info, retcode);
2431 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2433 enum drbd_state_rv rv;
2435 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2436 force ? CS_HARD : 0);
2439 case SS_NOTHING_TO_DO:
2441 case SS_ALREADY_STANDALONE:
2443 case SS_PRIMARY_NOP:
2444 /* Our state checking code wants to see the peer outdated. */
2445 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2447 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2448 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2451 case SS_CW_FAILED_BY_PEER:
2452 /* The peer probably wants to see us outdated. */
2453 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2454 disk, D_OUTDATED), 0);
2455 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2456 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2461 /* no special handling necessary */
2464 if (rv >= SS_SUCCESS) {
2465 enum drbd_state_rv rv2;
2466 /* No one else can reconfigure the network while I am here.
2467 * The state handling only uses drbd_thread_stop_nowait(),
2468 * we want to really wait here until the receiver is no more.
2470 drbd_thread_stop(&connection->receiver);
2472 /* Race breaker. This additional state change request may be
2473 * necessary, if this was a forced disconnect during a receiver
2474 * restart. We may have "killed" the receiver thread just
2475 * after drbd_receiver() returned. Typically, we should be
2476 * C_STANDALONE already, now, and this becomes a no-op.
2478 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2479 CS_VERBOSE | CS_HARD);
2480 if (rv2 < SS_SUCCESS)
2481 drbd_err(connection,
2482 "unexpected rv2=%d in conn_try_disconnect()\n",
2484 /* Unlike in DRBD 9, the state engine has generated
2485 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2490 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2492 struct drbd_config_context adm_ctx;
2493 struct disconnect_parms parms;
2494 struct drbd_connection *connection;
2495 enum drbd_state_rv rv;
2496 enum drbd_ret_code retcode;
2499 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2500 if (!adm_ctx.reply_skb)
2502 if (retcode != NO_ERROR)
2505 connection = adm_ctx.connection;
2506 memset(&parms, 0, sizeof(parms));
2507 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2508 err = disconnect_parms_from_attrs(&parms, info);
2510 retcode = ERR_MANDATORY_TAG;
2511 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2516 mutex_lock(&adm_ctx.resource->adm_mutex);
2517 rv = conn_try_disconnect(connection, parms.force_disconnect);
2518 if (rv < SS_SUCCESS)
2519 retcode = rv; /* FIXME: Type mismatch. */
2522 mutex_unlock(&adm_ctx.resource->adm_mutex);
2524 drbd_adm_finish(&adm_ctx, info, retcode);
2528 void resync_after_online_grow(struct drbd_device *device)
2530 int iass; /* I am sync source */
2532 drbd_info(device, "Resync of new storage after online grow\n");
2533 if (device->state.role != device->state.peer)
2534 iass = (device->state.role == R_PRIMARY);
2536 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2539 drbd_start_resync(device, C_SYNC_SOURCE);
2541 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2544 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2546 struct drbd_config_context adm_ctx;
2547 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2548 struct resize_parms rs;
2549 struct drbd_device *device;
2550 enum drbd_ret_code retcode;
2551 enum determine_dev_size dd;
2552 bool change_al_layout = false;
2553 enum dds_flags ddsf;
2557 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2558 if (!adm_ctx.reply_skb)
2560 if (retcode != NO_ERROR)
2563 mutex_lock(&adm_ctx.resource->adm_mutex);
2564 device = adm_ctx.device;
2565 if (!get_ldev(device)) {
2566 retcode = ERR_NO_DISK;
2570 memset(&rs, 0, sizeof(struct resize_parms));
2571 rs.al_stripes = device->ldev->md.al_stripes;
2572 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2573 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2574 err = resize_parms_from_attrs(&rs, info);
2576 retcode = ERR_MANDATORY_TAG;
2577 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2582 if (device->state.conn > C_CONNECTED) {
2583 retcode = ERR_RESIZE_RESYNC;
2587 if (device->state.role == R_SECONDARY &&
2588 device->state.peer == R_SECONDARY) {
2589 retcode = ERR_NO_PRIMARY;
2593 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2594 retcode = ERR_NEED_APV_93;
2599 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2601 if (u_size != (sector_t)rs.resize_size) {
2602 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2603 if (!new_disk_conf) {
2604 retcode = ERR_NOMEM;
2609 if (device->ldev->md.al_stripes != rs.al_stripes ||
2610 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2611 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2613 if (al_size_k > (16 * 1024 * 1024)) {
2614 retcode = ERR_MD_LAYOUT_TOO_BIG;
2618 if (al_size_k < MD_32kB_SECT/2) {
2619 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2623 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2624 retcode = ERR_MD_LAYOUT_CONNECTED;
2628 change_al_layout = true;
2631 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2632 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2634 if (new_disk_conf) {
2635 mutex_lock(&device->resource->conf_update);
2636 old_disk_conf = device->ldev->disk_conf;
2637 *new_disk_conf = *old_disk_conf;
2638 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2639 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2640 mutex_unlock(&device->resource->conf_update);
2642 kfree(old_disk_conf);
2645 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2646 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2647 drbd_md_sync(device);
2649 if (dd == DS_ERROR) {
2650 retcode = ERR_NOMEM_BITMAP;
2652 } else if (dd == DS_ERROR_SPACE_MD) {
2653 retcode = ERR_MD_LAYOUT_NO_FIT;
2655 } else if (dd == DS_ERROR_SHRINK) {
2656 retcode = ERR_IMPLICIT_SHRINK;
2660 if (device->state.conn == C_CONNECTED) {
2662 set_bit(RESIZE_PENDING, &device->flags);
2664 drbd_send_uuids(first_peer_device(device));
2665 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2669 mutex_unlock(&adm_ctx.resource->adm_mutex);
2671 drbd_adm_finish(&adm_ctx, info, retcode);
2679 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2681 struct drbd_config_context adm_ctx;
2682 enum drbd_ret_code retcode;
2683 struct res_opts res_opts;
2686 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2687 if (!adm_ctx.reply_skb)
2689 if (retcode != NO_ERROR)
2692 res_opts = adm_ctx.resource->res_opts;
2693 if (should_set_defaults(info))
2694 set_res_opts_defaults(&res_opts);
2696 err = res_opts_from_attrs(&res_opts, info);
2697 if (err && err != -ENOMSG) {
2698 retcode = ERR_MANDATORY_TAG;
2699 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2703 mutex_lock(&adm_ctx.resource->adm_mutex);
2704 err = set_resource_options(adm_ctx.resource, &res_opts);
2706 retcode = ERR_INVALID_REQUEST;
2708 retcode = ERR_NOMEM;
2710 mutex_unlock(&adm_ctx.resource->adm_mutex);
2713 drbd_adm_finish(&adm_ctx, info, retcode);
2717 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2719 struct drbd_config_context adm_ctx;
2720 struct drbd_device *device;
2721 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2723 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2724 if (!adm_ctx.reply_skb)
2726 if (retcode != NO_ERROR)
2729 device = adm_ctx.device;
2730 if (!get_ldev(device)) {
2731 retcode = ERR_NO_DISK;
2735 mutex_lock(&adm_ctx.resource->adm_mutex);
2737 /* If there is still bitmap IO pending, probably because of a previous
2738 * resync just being finished, wait for it before requesting a new resync.
2739 * Also wait for it's after_state_ch(). */
2740 drbd_suspend_io(device);
2741 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2742 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2744 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2745 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2746 * try to start a resync handshake as sync target for full sync.
2748 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2749 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
2750 if (retcode >= SS_SUCCESS) {
2751 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2752 "set_n_write from invalidate", BM_LOCKED_MASK))
2753 retcode = ERR_IO_MD_DISK;
2756 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2757 drbd_resume_io(device);
2758 mutex_unlock(&adm_ctx.resource->adm_mutex);
2761 drbd_adm_finish(&adm_ctx, info, retcode);
2765 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2766 union drbd_state mask, union drbd_state val)
2768 struct drbd_config_context adm_ctx;
2769 enum drbd_ret_code retcode;
2771 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2772 if (!adm_ctx.reply_skb)
2774 if (retcode != NO_ERROR)
2777 mutex_lock(&adm_ctx.resource->adm_mutex);
2778 retcode = drbd_request_state(adm_ctx.device, mask, val);
2779 mutex_unlock(&adm_ctx.resource->adm_mutex);
2781 drbd_adm_finish(&adm_ctx, info, retcode);
2785 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
2789 rv = drbd_bmio_set_n_write(device);
2790 drbd_suspend_al(device);
2794 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2796 struct drbd_config_context adm_ctx;
2797 int retcode; /* drbd_ret_code, drbd_state_rv */
2798 struct drbd_device *device;
2800 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2801 if (!adm_ctx.reply_skb)
2803 if (retcode != NO_ERROR)
2806 device = adm_ctx.device;
2807 if (!get_ldev(device)) {
2808 retcode = ERR_NO_DISK;
2812 mutex_lock(&adm_ctx.resource->adm_mutex);
2814 /* If there is still bitmap IO pending, probably because of a previous
2815 * resync just being finished, wait for it before requesting a new resync.
2816 * Also wait for it's after_state_ch(). */
2817 drbd_suspend_io(device);
2818 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2819 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2821 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2822 * in the bitmap. Otherwise, try to start a resync handshake
2823 * as sync source for full sync.
2825 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
2826 /* The peer will get a resync upon connect anyways. Just make that
2827 into a full resync. */
2828 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
2829 if (retcode >= SS_SUCCESS) {
2830 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
2831 "set_n_write from invalidate_peer",
2832 BM_LOCKED_SET_ALLOWED))
2833 retcode = ERR_IO_MD_DISK;
2836 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
2837 drbd_resume_io(device);
2838 mutex_unlock(&adm_ctx.resource->adm_mutex);
2841 drbd_adm_finish(&adm_ctx, info, retcode);
2845 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2847 struct drbd_config_context adm_ctx;
2848 enum drbd_ret_code retcode;
2850 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2851 if (!adm_ctx.reply_skb)
2853 if (retcode != NO_ERROR)
2856 mutex_lock(&adm_ctx.resource->adm_mutex);
2857 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2858 retcode = ERR_PAUSE_IS_SET;
2859 mutex_unlock(&adm_ctx.resource->adm_mutex);
2861 drbd_adm_finish(&adm_ctx, info, retcode);
2865 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2867 struct drbd_config_context adm_ctx;
2868 union drbd_dev_state s;
2869 enum drbd_ret_code retcode;
2871 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2872 if (!adm_ctx.reply_skb)
2874 if (retcode != NO_ERROR)
2877 mutex_lock(&adm_ctx.resource->adm_mutex);
2878 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2879 s = adm_ctx.device->state;
2880 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2881 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2882 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2884 retcode = ERR_PAUSE_IS_CLEAR;
2887 mutex_unlock(&adm_ctx.resource->adm_mutex);
2889 drbd_adm_finish(&adm_ctx, info, retcode);
2893 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2895 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2898 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2900 struct drbd_config_context adm_ctx;
2901 struct drbd_device *device;
2902 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2904 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2905 if (!adm_ctx.reply_skb)
2907 if (retcode != NO_ERROR)
2910 mutex_lock(&adm_ctx.resource->adm_mutex);
2911 device = adm_ctx.device;
2912 if (test_bit(NEW_CUR_UUID, &device->flags)) {
2913 drbd_uuid_new_current(device);
2914 clear_bit(NEW_CUR_UUID, &device->flags);
2916 drbd_suspend_io(device);
2917 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2918 if (retcode == SS_SUCCESS) {
2919 if (device->state.conn < C_CONNECTED)
2920 tl_clear(first_peer_device(device)->connection);
2921 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
2922 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
2924 drbd_resume_io(device);
2925 mutex_unlock(&adm_ctx.resource->adm_mutex);
2927 drbd_adm_finish(&adm_ctx, info, retcode);
2931 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2933 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2936 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
2937 struct drbd_resource *resource,
2938 struct drbd_connection *connection,
2939 struct drbd_device *device)
2942 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2944 goto nla_put_failure;
2946 nla_put_u32(skb, T_ctx_volume, device->vnr))
2947 goto nla_put_failure;
2948 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
2949 goto nla_put_failure;
2951 if (connection->my_addr_len &&
2952 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
2953 goto nla_put_failure;
2954 if (connection->peer_addr_len &&
2955 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
2956 goto nla_put_failure;
2958 nla_nest_end(skb, nla);
2963 nla_nest_cancel(skb, nla);
2968 * Return the connection of @resource if @resource has exactly one connection.
2970 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
2972 struct list_head *connections = &resource->connections;
2974 if (list_empty(connections) || connections->next->next != connections)
2976 return list_first_entry(&resource->connections, struct drbd_connection, connections);
2979 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2980 const struct sib_info *sib)
2982 struct drbd_resource *resource = device->resource;
2983 struct state_info *si = NULL; /* for sizeof(si->member); */
2987 int exclude_sensitive;
2989 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2990 * to. So we better exclude_sensitive information.
2992 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2993 * in the context of the requesting user process. Exclude sensitive
2994 * information, unless current has superuser.
2996 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2997 * relies on the current implementation of netlink_dump(), which
2998 * executes the dump callback successively from netlink_recvmsg(),
2999 * always in the context of the receiving process */
3000 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3002 got_ldev = get_ldev(device);
3004 /* We need to add connection name and volume number information still.
3005 * Minor number is in drbd_genlmsghdr. */
3006 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3007 goto nla_put_failure;
3009 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3010 goto nla_put_failure;
3014 struct disk_conf *disk_conf;
3016 disk_conf = rcu_dereference(device->ldev->disk_conf);
3017 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3020 struct net_conf *nc;
3022 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3024 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3028 goto nla_put_failure;
3030 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
3032 goto nla_put_failure;
3033 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3034 nla_put_u32(skb, T_current_state, device->state.i) ||
3035 nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
3036 nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
3037 nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
3038 nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
3039 nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
3040 nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
3041 nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3042 nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3043 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3044 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3045 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3046 goto nla_put_failure;
3051 spin_lock_irq(&device->ldev->md.uuid_lock);
3052 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3053 spin_unlock_irq(&device->ldev->md.uuid_lock);
3056 goto nla_put_failure;
3058 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3059 nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
3060 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
3061 goto nla_put_failure;
3062 if (C_SYNC_SOURCE <= device->state.conn &&
3063 C_PAUSED_SYNC_T >= device->state.conn) {
3064 if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
3065 nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
3066 goto nla_put_failure;
3071 switch(sib->sib_reason) {
3072 case SIB_SYNC_PROGRESS:
3073 case SIB_GET_STATUS_REPLY:
3075 case SIB_STATE_CHANGE:
3076 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3077 nla_put_u32(skb, T_new_state, sib->ns.i))
3078 goto nla_put_failure;
3080 case SIB_HELPER_POST:
3081 if (nla_put_u32(skb, T_helper_exit_code,
3082 sib->helper_exit_code))
3083 goto nla_put_failure;
3085 case SIB_HELPER_PRE:
3086 if (nla_put_string(skb, T_helper, sib->helper_name))
3087 goto nla_put_failure;
3091 nla_nest_end(skb, nla);
3101 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3103 struct drbd_config_context adm_ctx;
3104 enum drbd_ret_code retcode;
3107 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3108 if (!adm_ctx.reply_skb)
3110 if (retcode != NO_ERROR)
3113 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3115 nlmsg_free(adm_ctx.reply_skb);
3119 drbd_adm_finish(&adm_ctx, info, retcode);
3123 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3125 struct drbd_device *device;
3126 struct drbd_genlmsghdr *dh;
3127 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3128 struct drbd_resource *resource = NULL;
3129 struct drbd_resource *tmp;
3130 unsigned volume = cb->args[1];
3132 /* Open coded, deferred, iteration:
3133 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3134 * connection = "first connection of resource or undefined";
3135 * idr_for_each_entry(&resource->devices, device, i) {
3139 * where resource is cb->args[0];
3140 * and i is cb->args[1];
3142 * cb->args[2] indicates if we shall loop over all resources,
3143 * or just dump all volumes of a single resource.
3145 * This may miss entries inserted after this dump started,
3146 * or entries deleted before they are reached.
3148 * We need to make sure the device won't disappear while
3149 * we are looking at it, and revalidate our iterators
3150 * on each iteration.
3153 /* synchronize with conn_create()/drbd_destroy_connection() */
3155 /* revalidate iterator position */
3156 for_each_resource_rcu(tmp, &drbd_resources) {
3158 /* first iteration */
3170 device = idr_get_next(&resource->devices, &volume);
3172 /* No more volumes to dump on this resource.
3173 * Advance resource iterator. */
3174 pos = list_entry_rcu(resource->resources.next,
3175 struct drbd_resource, resources);
3176 /* Did we dump any volume of this resource yet? */
3178 /* If we reached the end of the list,
3179 * or only a single resource dump was requested,
3181 if (&pos->resources == &drbd_resources || cb->args[2])
3189 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3190 cb->nlh->nlmsg_seq, &drbd_genl_family,
3191 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3196 /* This is a connection without a single volume.
3197 * Suprisingly enough, it may have a network
3199 struct drbd_connection *connection;
3202 dh->ret_code = NO_ERROR;
3203 connection = the_only_connection(resource);
3204 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3207 struct net_conf *nc;
3209 nc = rcu_dereference(connection->net_conf);
3210 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3216 D_ASSERT(device, device->vnr == volume);
3217 D_ASSERT(device, device->resource == resource);
3219 dh->minor = device_to_minor(device);
3220 dh->ret_code = NO_ERROR;
3222 if (nla_put_status_info(skb, device, NULL)) {
3224 genlmsg_cancel(skb, dh);
3228 genlmsg_end(skb, dh);
3233 /* where to start the next iteration */
3234 cb->args[0] = (long)pos;
3235 cb->args[1] = (pos == resource) ? volume + 1 : 0;
3237 /* No more resources/volumes/minors found results in an empty skb.
3238 * Which will terminate the dump. */
3243 * Request status of all resources, or of all volumes within a single resource.
3245 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3246 * Which means we cannot use the family->attrbuf or other such members, because
3247 * dump is NOT protected by the genl_lock(). During dump, we only have access
3248 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3250 * Once things are setup properly, we call into get_one_status().
3252 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3254 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3256 const char *resource_name;
3257 struct drbd_resource *resource;
3260 /* Is this a followup call? */
3262 /* ... of a single resource dump,
3263 * and the resource iterator has been advanced already? */
3264 if (cb->args[2] && cb->args[2] != cb->args[0])
3265 return 0; /* DONE. */
3269 /* First call (from netlink_dump_start). We need to figure out
3270 * which resource(s) the user wants us to dump. */
3271 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
3272 nlmsg_attrlen(cb->nlh, hdrlen),
3273 DRBD_NLA_CFG_CONTEXT);
3275 /* No explicit context given. Dump all. */
3278 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3279 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
3281 return PTR_ERR(nla);
3282 /* context given, but no name present? */
3285 resource_name = nla_data(nla);
3286 if (!*resource_name)
3288 resource = drbd_find_resource(resource_name);
3292 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
3294 /* prime iterators, and set "filter" mode mark:
3295 * only dump this connection. */
3296 cb->args[0] = (long)resource;
3297 /* cb->args[1] = 0; passed in this way. */
3298 cb->args[2] = (long)resource;
3301 return get_one_status(skb, cb);
3304 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
3306 struct drbd_config_context adm_ctx;
3307 enum drbd_ret_code retcode;
3308 struct timeout_parms tp;
3311 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3312 if (!adm_ctx.reply_skb)
3314 if (retcode != NO_ERROR)
3318 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
3319 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
3322 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
3324 nlmsg_free(adm_ctx.reply_skb);
3328 drbd_adm_finish(&adm_ctx, info, retcode);
3332 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
3334 struct drbd_config_context adm_ctx;
3335 struct drbd_device *device;
3336 enum drbd_ret_code retcode;
3337 struct start_ov_parms parms;
3339 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3340 if (!adm_ctx.reply_skb)
3342 if (retcode != NO_ERROR)
3345 device = adm_ctx.device;
3347 /* resume from last known position, if possible */
3348 parms.ov_start_sector = device->ov_start_sector;
3349 parms.ov_stop_sector = ULLONG_MAX;
3350 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
3351 int err = start_ov_parms_from_attrs(&parms, info);
3353 retcode = ERR_MANDATORY_TAG;
3354 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3358 mutex_lock(&adm_ctx.resource->adm_mutex);
3360 /* w_make_ov_request expects position to be aligned */
3361 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3362 device->ov_stop_sector = parms.ov_stop_sector;
3364 /* If there is still bitmap IO pending, e.g. previous resync or verify
3365 * just being finished, wait for it before requesting a new resync. */
3366 drbd_suspend_io(device);
3367 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3368 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
3369 drbd_resume_io(device);
3371 mutex_unlock(&adm_ctx.resource->adm_mutex);
3373 drbd_adm_finish(&adm_ctx, info, retcode);
3378 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3380 struct drbd_config_context adm_ctx;
3381 struct drbd_device *device;
3382 enum drbd_ret_code retcode;
3383 int skip_initial_sync = 0;
3385 struct new_c_uuid_parms args;
3387 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3388 if (!adm_ctx.reply_skb)
3390 if (retcode != NO_ERROR)
3393 device = adm_ctx.device;
3394 memset(&args, 0, sizeof(args));
3395 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3396 err = new_c_uuid_parms_from_attrs(&args, info);
3398 retcode = ERR_MANDATORY_TAG;
3399 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3404 mutex_lock(&adm_ctx.resource->adm_mutex);
3405 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
3407 if (!get_ldev(device)) {
3408 retcode = ERR_NO_DISK;
3412 /* this is "skip initial sync", assume to be clean */
3413 if (device->state.conn == C_CONNECTED &&
3414 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3415 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3416 drbd_info(device, "Preparing to skip initial sync\n");
3417 skip_initial_sync = 1;
3418 } else if (device->state.conn != C_STANDALONE) {
3419 retcode = ERR_CONNECTED;
3423 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3424 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
3426 if (args.clear_bm) {
3427 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3428 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3430 drbd_err(device, "Writing bitmap failed with %d\n", err);
3431 retcode = ERR_IO_MD_DISK;
3433 if (skip_initial_sync) {
3434 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
3435 _drbd_uuid_set(device, UI_BITMAP, 0);
3436 drbd_print_uuids(device, "cleared bitmap UUID");
3437 spin_lock_irq(&device->resource->req_lock);
3438 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3440 spin_unlock_irq(&device->resource->req_lock);
3444 drbd_md_sync(device);
3448 mutex_unlock(device->state_mutex);
3449 mutex_unlock(&adm_ctx.resource->adm_mutex);
3451 drbd_adm_finish(&adm_ctx, info, retcode);
3455 static enum drbd_ret_code
3456 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
3458 const char *name = adm_ctx->resource_name;
3459 if (!name || !name[0]) {
3460 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
3461 return ERR_MANDATORY_TAG;
3463 /* if we want to use these in sysfs/configfs/debugfs some day,
3464 * we must not allow slashes */
3465 if (strchr(name, '/')) {
3466 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
3467 return ERR_INVALID_REQUEST;
3472 static void resource_to_info(struct resource_info *info,
3473 struct drbd_resource *resource)
3475 info->res_role = conn_highest_role(first_connection(resource));
3476 info->res_susp = resource->susp;
3477 info->res_susp_nod = resource->susp_nod;
3478 info->res_susp_fen = resource->susp_fen;
3481 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3483 struct drbd_connection *connection;
3484 struct drbd_config_context adm_ctx;
3485 enum drbd_ret_code retcode;
3486 struct res_opts res_opts;
3489 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
3490 if (!adm_ctx.reply_skb)
3492 if (retcode != NO_ERROR)
3495 set_res_opts_defaults(&res_opts);
3496 err = res_opts_from_attrs(&res_opts, info);
3497 if (err && err != -ENOMSG) {
3498 retcode = ERR_MANDATORY_TAG;
3499 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3503 retcode = drbd_check_resource_name(&adm_ctx);
3504 if (retcode != NO_ERROR)
3507 if (adm_ctx.resource) {
3508 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3509 retcode = ERR_INVALID_REQUEST;
3510 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
3512 /* else: still NO_ERROR */
3516 /* not yet safe for genl_family.parallel_ops */
3517 mutex_lock(&resources_mutex);
3518 connection = conn_create(adm_ctx.resource_name, &res_opts);
3519 mutex_unlock(&resources_mutex);
3522 struct resource_info resource_info;
3524 mutex_lock(¬ification_mutex);
3525 resource_to_info(&resource_info, connection->resource);
3526 notify_resource_state(NULL, 0, connection->resource,
3527 &resource_info, NOTIFY_CREATE);
3528 mutex_unlock(¬ification_mutex);
3530 retcode = ERR_NOMEM;
3533 drbd_adm_finish(&adm_ctx, info, retcode);
3537 static void device_to_info(struct device_info *info,
3538 struct drbd_device *device)
3540 info->dev_disk_state = device->state.disk;
3544 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
3546 struct drbd_config_context adm_ctx;
3547 struct drbd_genlmsghdr *dh = info->userhdr;
3548 enum drbd_ret_code retcode;
3550 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3551 if (!adm_ctx.reply_skb)
3553 if (retcode != NO_ERROR)
3556 if (dh->minor > MINORMASK) {
3557 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
3558 retcode = ERR_INVALID_REQUEST;
3561 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3562 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
3563 retcode = ERR_INVALID_REQUEST;
3567 /* drbd_adm_prepare made sure already
3568 * that first_peer_device(device)->connection and device->vnr match the request. */
3569 if (adm_ctx.device) {
3570 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3571 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
3572 /* else: still NO_ERROR */
3576 mutex_lock(&adm_ctx.resource->adm_mutex);
3577 retcode = drbd_create_device(&adm_ctx, dh->minor);
3578 if (retcode == NO_ERROR) {
3579 struct drbd_device *device;
3580 struct drbd_peer_device *peer_device;
3581 struct device_info info;
3582 unsigned int peer_devices = 0;
3583 enum drbd_notification_type flags;
3585 device = minor_to_device(dh->minor);
3586 for_each_peer_device(peer_device, device) {
3587 if (!has_net_conf(peer_device->connection))
3592 device_to_info(&info, device);
3593 mutex_lock(¬ification_mutex);
3594 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
3595 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
3596 for_each_peer_device(peer_device, device) {
3597 struct peer_device_info peer_device_info;
3599 if (!has_net_conf(peer_device->connection))
3601 peer_device_to_info(&peer_device_info, peer_device);
3602 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
3603 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
3604 NOTIFY_CREATE | flags);
3606 mutex_unlock(¬ification_mutex);
3608 mutex_unlock(&adm_ctx.resource->adm_mutex);
3610 drbd_adm_finish(&adm_ctx, info, retcode);
3614 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
3616 struct drbd_peer_device *peer_device;
3618 if (device->state.disk == D_DISKLESS &&
3619 /* no need to be device->state.conn == C_STANDALONE &&
3620 * we may want to delete a minor from a live replication group.
3622 device->state.role == R_SECONDARY) {
3623 struct drbd_connection *connection =
3624 first_connection(device->resource);
3626 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
3627 CS_VERBOSE + CS_WAIT_COMPLETE);
3629 /* If the state engine hasn't stopped the sender thread yet, we
3630 * need to flush the sender work queue before generating the
3631 * DESTROY events here. */
3632 if (get_t_state(&connection->worker) == RUNNING)
3633 drbd_flush_workqueue(&connection->sender_work);
3635 mutex_lock(¬ification_mutex);
3636 for_each_peer_device(peer_device, device) {
3637 if (!has_net_conf(peer_device->connection))
3639 notify_peer_device_state(NULL, 0, peer_device, NULL,
3640 NOTIFY_DESTROY | NOTIFY_CONTINUES);
3642 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
3643 mutex_unlock(¬ification_mutex);
3645 drbd_delete_device(device);
3648 return ERR_MINOR_CONFIGURED;
3651 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
3653 struct drbd_config_context adm_ctx;
3654 enum drbd_ret_code retcode;
3656 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3657 if (!adm_ctx.reply_skb)
3659 if (retcode != NO_ERROR)
3662 mutex_lock(&adm_ctx.resource->adm_mutex);
3663 retcode = adm_del_minor(adm_ctx.device);
3664 mutex_unlock(&adm_ctx.resource->adm_mutex);
3666 drbd_adm_finish(&adm_ctx, info, retcode);
3670 static int adm_del_resource(struct drbd_resource *resource)
3672 struct drbd_connection *connection;
3674 for_each_connection(connection, resource) {
3675 if (connection->cstate > C_STANDALONE)
3676 return ERR_NET_CONFIGURED;
3678 if (!idr_is_empty(&resource->devices))
3679 return ERR_RES_IN_USE;
3681 /* The state engine has stopped the sender thread, so we don't
3682 * need to flush the sender work queue before generating the
3683 * DESTROY event here. */
3684 mutex_lock(¬ification_mutex);
3685 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
3686 mutex_unlock(¬ification_mutex);
3688 mutex_lock(&resources_mutex);
3689 list_del_rcu(&resource->resources);
3690 mutex_unlock(&resources_mutex);
3691 /* Make sure all threads have actually stopped: state handling only
3692 * does drbd_thread_stop_nowait(). */
3693 list_for_each_entry(connection, &resource->connections, connections)
3694 drbd_thread_stop(&connection->worker);
3696 drbd_free_resource(resource);
3700 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3702 struct drbd_config_context adm_ctx;
3703 struct drbd_resource *resource;
3704 struct drbd_connection *connection;
3705 struct drbd_device *device;
3706 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3709 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3710 if (!adm_ctx.reply_skb)
3712 if (retcode != NO_ERROR)
3715 resource = adm_ctx.resource;
3716 mutex_lock(&resource->adm_mutex);
3718 for_each_connection(connection, resource) {
3719 struct drbd_peer_device *peer_device;
3721 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
3722 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
3723 if (retcode < SS_SUCCESS) {
3724 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
3729 retcode = conn_try_disconnect(connection, 0);
3730 if (retcode < SS_SUCCESS) {
3731 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
3737 idr_for_each_entry(&resource->devices, device, i) {
3738 retcode = adm_detach(device, 0);
3739 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3740 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
3745 /* delete volumes */
3746 idr_for_each_entry(&resource->devices, device, i) {
3747 retcode = adm_del_minor(device);
3748 if (retcode != NO_ERROR) {
3749 /* "can not happen" */
3750 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
3755 retcode = adm_del_resource(resource);
3757 mutex_unlock(&resource->adm_mutex);
3759 drbd_adm_finish(&adm_ctx, info, retcode);
3763 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3765 struct drbd_config_context adm_ctx;
3766 struct drbd_resource *resource;
3767 enum drbd_ret_code retcode;
3769 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3770 if (!adm_ctx.reply_skb)
3772 if (retcode != NO_ERROR)
3774 resource = adm_ctx.resource;
3776 mutex_lock(&resource->adm_mutex);
3777 retcode = adm_del_resource(resource);
3778 mutex_unlock(&resource->adm_mutex);
3780 drbd_adm_finish(&adm_ctx, info, retcode);
3784 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
3786 struct sk_buff *msg;
3787 struct drbd_genlmsghdr *d_out;
3791 seq = atomic_inc_return(&drbd_genl_seq);
3792 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3797 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3798 if (!d_out) /* cannot happen, but anyways. */
3799 goto nla_put_failure;
3800 d_out->minor = device_to_minor(device);
3801 d_out->ret_code = NO_ERROR;
3803 if (nla_put_status_info(msg, device, sib))
3804 goto nla_put_failure;
3805 genlmsg_end(msg, d_out);
3806 err = drbd_genl_multicast_events(msg, 0);
3807 /* msg has been consumed or freed in netlink_broadcast() */
3808 if (err && err != -ESRCH)
3816 drbd_err(device, "Error %d while broadcasting event. "
3817 "Event seq:%u sib_reason:%u\n",
3818 err, seq, sib->sib_reason);
3821 static void device_to_statistics(struct device_statistics *s,
3822 struct drbd_device *device)
3824 memset(s, 0, sizeof(*s));
3825 s->dev_upper_blocked = !may_inc_ap_bio(device);
3826 if (get_ldev(device)) {
3827 struct drbd_md *md = &device->ldev->md;
3828 u64 *history_uuids = (u64 *)s->history_uuids;
3829 struct request_queue *q;
3832 spin_lock_irq(&md->uuid_lock);
3833 s->dev_current_uuid = md->uuid[UI_CURRENT];
3834 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3835 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3836 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3837 for (; n < HISTORY_UUIDS; n++)
3838 history_uuids[n] = 0;
3839 s->history_uuids_len = HISTORY_UUIDS;
3840 spin_unlock_irq(&md->uuid_lock);
3842 s->dev_disk_flags = md->flags;
3843 q = bdev_get_queue(device->ldev->backing_bdev);
3844 s->dev_lower_blocked =
3845 bdi_congested(&q->backing_dev_info,
3846 (1 << WB_async_congested) |
3847 (1 << WB_sync_congested));
3850 s->dev_size = drbd_get_capacity(device->this_bdev);
3851 s->dev_read = device->read_cnt;
3852 s->dev_write = device->writ_cnt;
3853 s->dev_al_writes = device->al_writ_cnt;
3854 s->dev_bm_writes = device->bm_writ_cnt;
3855 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3856 s->dev_lower_pending = atomic_read(&device->local_cnt);
3857 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3858 s->dev_exposed_data_uuid = device->ed_uuid;
3861 enum mdf_peer_flag {
3862 MDF_PEER_CONNECTED = 1 << 0,
3863 MDF_PEER_OUTDATED = 1 << 1,
3864 MDF_PEER_FENCING = 1 << 2,
3865 MDF_PEER_FULL_SYNC = 1 << 3,
3868 static void peer_device_to_statistics(struct peer_device_statistics *s,
3869 struct drbd_peer_device *peer_device)
3871 struct drbd_device *device = peer_device->device;
3873 memset(s, 0, sizeof(*s));
3874 s->peer_dev_received = device->recv_cnt;
3875 s->peer_dev_sent = device->send_cnt;
3876 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3877 atomic_read(&device->rs_pending_cnt);
3878 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3879 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3880 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3881 if (get_ldev(device)) {
3882 struct drbd_md *md = &device->ldev->md;
3884 spin_lock_irq(&md->uuid_lock);
3885 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3886 spin_unlock_irq(&md->uuid_lock);
3888 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3889 MDF_PEER_CONNECTED : 0) +
3890 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3891 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3892 MDF_PEER_OUTDATED : 0) +
3893 /* FIXME: MDF_PEER_FENCING? */
3894 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3895 MDF_PEER_FULL_SYNC : 0);
3900 static int nla_put_notification_header(struct sk_buff *msg,
3901 enum drbd_notification_type type)
3903 struct drbd_notification_header nh = {
3907 return drbd_notification_header_to_skb(msg, &nh, true);
3910 void notify_resource_state(struct sk_buff *skb,
3912 struct drbd_resource *resource,
3913 struct resource_info *resource_info,
3914 enum drbd_notification_type type)
3916 struct resource_statistics resource_statistics;
3917 struct drbd_genlmsghdr *dh;
3918 bool multicast = false;
3922 seq = atomic_inc_return(¬ify_genl_seq);
3923 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3931 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
3933 goto nla_put_failure;
3935 dh->ret_code = NO_ERROR;
3936 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
3937 nla_put_notification_header(skb, type) ||
3938 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
3939 resource_info_to_skb(skb, resource_info, true)))
3940 goto nla_put_failure;
3941 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3942 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3944 goto nla_put_failure;
3945 genlmsg_end(skb, dh);
3947 err = drbd_genl_multicast_events(skb, 0);
3948 /* skb has been consumed or freed in netlink_broadcast() */
3949 if (err && err != -ESRCH)
3957 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
3961 void notify_device_state(struct sk_buff *skb,
3963 struct drbd_device *device,
3964 struct device_info *device_info,
3965 enum drbd_notification_type type)
3967 struct device_statistics device_statistics;
3968 struct drbd_genlmsghdr *dh;
3969 bool multicast = false;
3973 seq = atomic_inc_return(¬ify_genl_seq);
3974 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3982 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
3984 goto nla_put_failure;
3985 dh->minor = device->minor;
3986 dh->ret_code = NO_ERROR;
3987 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
3988 nla_put_notification_header(skb, type) ||
3989 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
3990 device_info_to_skb(skb, device_info, true)))
3991 goto nla_put_failure;
3992 device_to_statistics(&device_statistics, device);
3993 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3994 genlmsg_end(skb, dh);
3996 err = drbd_genl_multicast_events(skb, 0);
3997 /* skb has been consumed or freed in netlink_broadcast() */
3998 if (err && err != -ESRCH)
4006 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4010 void notify_connection_state(struct sk_buff *skb,
4012 struct drbd_connection *connection,
4013 struct connection_info *connection_info,
4014 enum drbd_notification_type type)
4016 struct connection_statistics connection_statistics;
4017 struct drbd_genlmsghdr *dh;
4018 bool multicast = false;
4022 seq = atomic_inc_return(¬ify_genl_seq);
4023 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4031 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4033 goto nla_put_failure;
4035 dh->ret_code = NO_ERROR;
4036 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4037 nla_put_notification_header(skb, type) ||
4038 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4039 connection_info_to_skb(skb, connection_info, true)))
4040 goto nla_put_failure;
4041 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4042 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4043 genlmsg_end(skb, dh);
4045 err = drbd_genl_multicast_events(skb, 0);
4046 /* skb has been consumed or freed in netlink_broadcast() */
4047 if (err && err != -ESRCH)
4055 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4059 void notify_peer_device_state(struct sk_buff *skb,
4061 struct drbd_peer_device *peer_device,
4062 struct peer_device_info *peer_device_info,
4063 enum drbd_notification_type type)
4065 struct peer_device_statistics peer_device_statistics;
4066 struct drbd_resource *resource = peer_device->device->resource;
4067 struct drbd_genlmsghdr *dh;
4068 bool multicast = false;
4072 seq = atomic_inc_return(¬ify_genl_seq);
4073 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4081 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4083 goto nla_put_failure;
4085 dh->ret_code = NO_ERROR;
4086 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4087 nla_put_notification_header(skb, type) ||
4088 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4089 peer_device_info_to_skb(skb, peer_device_info, true)))
4090 goto nla_put_failure;
4091 peer_device_to_statistics(&peer_device_statistics, peer_device);
4092 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4093 genlmsg_end(skb, dh);
4095 err = drbd_genl_multicast_events(skb, 0);
4096 /* skb has been consumed or freed in netlink_broadcast() */
4097 if (err && err != -ESRCH)
4105 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4109 void notify_helper(enum drbd_notification_type type,
4110 struct drbd_device *device, struct drbd_connection *connection,
4111 const char *name, int status)
4113 struct drbd_resource *resource = device ? device->resource : connection->resource;
4114 struct drbd_helper_info helper_info;
4115 unsigned int seq = atomic_inc_return(¬ify_genl_seq);
4116 struct sk_buff *skb = NULL;
4117 struct drbd_genlmsghdr *dh;
4120 strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4121 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4122 helper_info.helper_status = status;
4124 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4130 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4133 dh->minor = device ? device->minor : -1;
4134 dh->ret_code = NO_ERROR;
4135 mutex_lock(¬ification_mutex);
4136 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4137 nla_put_notification_header(skb, type) ||
4138 drbd_helper_info_to_skb(skb, &helper_info, true))
4140 genlmsg_end(skb, dh);
4141 err = drbd_genl_multicast_events(skb, 0);
4143 /* skb has been consumed or freed in netlink_broadcast() */
4144 if (err && err != -ESRCH)
4146 mutex_unlock(¬ification_mutex);
4150 mutex_unlock(¬ification_mutex);
4153 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4157 static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4159 struct drbd_genlmsghdr *dh;
4163 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4165 goto nla_put_failure;
4167 dh->ret_code = NO_ERROR;
4168 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4169 goto nla_put_failure;
4170 genlmsg_end(skb, dh);
4175 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4178 static void free_state_changes(struct list_head *list)
4180 while (!list_empty(list)) {
4181 struct drbd_state_change *state_change =
4182 list_first_entry(list, struct drbd_state_change, list);
4183 list_del(&state_change->list);
4184 forget_state_change(state_change);
4188 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4191 state_change->n_connections +
4192 state_change->n_devices +
4193 state_change->n_devices * state_change->n_connections;
4196 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4198 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4199 unsigned int seq = cb->args[2];
4201 enum drbd_notification_type flags = 0;
4203 /* There is no need for taking notification_mutex here: it doesn't
4204 matter if the initial state events mix with later state chage
4205 events; we can always tell the events apart by the NOTIFY_EXISTS
4209 if (cb->args[5] == 1) {
4210 notify_initial_state_done(skb, seq);
4214 if (cb->args[4] < cb->args[3])
4215 flags |= NOTIFY_CONTINUES;
4217 notify_resource_state_change(skb, seq, state_change->resource,
4218 NOTIFY_EXISTS | flags);
4222 if (n < state_change->n_connections) {
4223 notify_connection_state_change(skb, seq, &state_change->connections[n],
4224 NOTIFY_EXISTS | flags);
4227 n -= state_change->n_connections;
4228 if (n < state_change->n_devices) {
4229 notify_device_state_change(skb, seq, &state_change->devices[n],
4230 NOTIFY_EXISTS | flags);
4233 n -= state_change->n_devices;
4234 if (n < state_change->n_devices * state_change->n_connections) {
4235 notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4236 NOTIFY_EXISTS | flags);
4241 if (cb->args[4] == cb->args[3]) {
4242 struct drbd_state_change *next_state_change =
4243 list_entry(state_change->list.next,
4244 struct drbd_state_change, list);
4245 cb->args[0] = (long)next_state_change;
4246 cb->args[3] = notifications_for_state_change(next_state_change);
4253 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4255 struct drbd_resource *resource;
4258 if (cb->args[5] >= 1) {
4259 if (cb->args[5] > 1)
4260 return get_initial_state(skb, cb);
4262 struct drbd_state_change *state_change =
4263 (struct drbd_state_change *)cb->args[0];
4265 /* connect list to head */
4266 list_add(&head, &state_change->list);
4267 free_state_changes(&head);
4272 cb->args[5] = 2; /* number of iterations */
4273 mutex_lock(&resources_mutex);
4274 for_each_resource(resource, &drbd_resources) {
4275 struct drbd_state_change *state_change;
4277 state_change = remember_old_state(resource, GFP_KERNEL);
4278 if (!state_change) {
4279 if (!list_empty(&head))
4280 free_state_changes(&head);
4281 mutex_unlock(&resources_mutex);
4284 copy_old_to_new_state_change(state_change);
4285 list_add_tail(&state_change->list, &head);
4286 cb->args[5] += notifications_for_state_change(state_change);
4288 mutex_unlock(&resources_mutex);
4290 if (!list_empty(&head)) {
4291 struct drbd_state_change *state_change =
4292 list_entry(head.next, struct drbd_state_change, list);
4293 cb->args[0] = (long)state_change;
4294 cb->args[3] = notifications_for_state_change(state_change);
4295 list_del(&head); /* detach list from head */
4298 cb->args[2] = cb->nlh->nlmsg_seq;
4299 return get_initial_state(skb, cb);