drbd: Remove now-unused int_dig_out buffer
[cascardo/linux.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74 /* .dumpit */
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
79
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
83 /* Configuration is strictly serialized, because generic netlink message
84  * processing is strictly serialized by the genl_lock().
85  * Which means we can use one static global drbd_config_context struct.
86  */
87 static struct drbd_config_context {
88         /* assigned from drbd_genlmsghdr */
89         unsigned int minor;
90         /* assigned from request attributes, if present */
91         unsigned int volume;
92 #define VOLUME_UNSPECIFIED              (-1U)
93         /* pointer into the request skb,
94          * limited lifetime! */
95         char *conn_name;
96
97         /* reply buffer */
98         struct sk_buff *reply_skb;
99         /* pointer into reply buffer */
100         struct drbd_genlmsghdr *reply_dh;
101         /* resolved from attributes, if possible */
102         struct drbd_conf *mdev;
103         struct drbd_tconn *tconn;
104 } adm_ctx;
105
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107 {
108         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109         if (genlmsg_reply(skb, info))
110                 printk(KERN_ERR "drbd: error sending genl reply\n");
111 }
112
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114  * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
116 {
117         struct sk_buff *skb = adm_ctx.reply_skb;
118         struct nlattr *nla;
119         int err = -EMSGSIZE;
120
121         if (!info || !info[0])
122                 return 0;
123
124         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125         if (!nla)
126                 return err;
127
128         err = nla_put_string(skb, T_info_text, info);
129         if (err) {
130                 nla_nest_cancel(skb, nla);
131                 return err;
132         } else
133                 nla_nest_end(skb, nla);
134         return 0;
135 }
136
137 /* This would be a good candidate for a "pre_doit" hook,
138  * and per-family private info->pointers.
139  * But we need to stay compatible with older kernels.
140  * If it returns successfully, adm_ctx members are valid.
141  */
142 #define DRBD_ADM_NEED_MINOR     1
143 #define DRBD_ADM_NEED_CONN      2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145                 unsigned flags)
146 {
147         struct drbd_genlmsghdr *d_in = info->userhdr;
148         const u8 cmd = info->genlhdr->cmd;
149         int err;
150
151         memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154         if (cmd != DRBD_ADM_GET_STATUS
155         && security_netlink_recv(skb, CAP_SYS_ADMIN))
156                return -EPERM;
157
158         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159         if (!adm_ctx.reply_skb)
160                 goto fail;
161
162         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163                                         info, &drbd_genl_family, 0, cmd);
164         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165          * but anyways */
166         if (!adm_ctx.reply_dh)
167                 goto fail;
168
169         adm_ctx.reply_dh->minor = d_in->minor;
170         adm_ctx.reply_dh->ret_code = NO_ERROR;
171
172         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
173                 struct nlattr *nla;
174                 /* parse and validate only */
175                 err = drbd_cfg_context_from_attrs(NULL, info);
176                 if (err)
177                         goto fail;
178
179                 /* It was present, and valid,
180                  * copy it over to the reply skb. */
181                 err = nla_put_nohdr(adm_ctx.reply_skb,
182                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184                 if (err)
185                         goto fail;
186
187                 /* and assign stuff to the global adm_ctx */
188                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
191                 if (nla)
192                         adm_ctx.conn_name = nla_data(nla);
193         } else
194                 adm_ctx.volume = VOLUME_UNSPECIFIED;
195
196         adm_ctx.minor = d_in->minor;
197         adm_ctx.mdev = minor_to_mdev(d_in->minor);
198         adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
199
200         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201                 drbd_msg_put_info("unknown minor");
202                 return ERR_MINOR_INVALID;
203         }
204         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205                 drbd_msg_put_info("unknown connection");
206                 return ERR_INVALID_REQUEST;
207         }
208
209         /* some more paranoia, if the request was over-determined */
210         if (adm_ctx.mdev && adm_ctx.tconn &&
211             adm_ctx.mdev->tconn != adm_ctx.tconn) {
212                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214                 drbd_msg_put_info("minor exists in different connection");
215                 return ERR_INVALID_REQUEST;
216         }
217         if (adm_ctx.mdev &&
218             adm_ctx.volume != VOLUME_UNSPECIFIED &&
219             adm_ctx.volume != adm_ctx.mdev->vnr) {
220                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221                                 adm_ctx.minor, adm_ctx.volume,
222                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223                 drbd_msg_put_info("minor exists as different volume");
224                 return ERR_INVALID_REQUEST;
225         }
226         if (adm_ctx.mdev && !adm_ctx.tconn)
227                 adm_ctx.tconn = adm_ctx.mdev->tconn;
228         return NO_ERROR;
229
230 fail:
231         nlmsg_free(adm_ctx.reply_skb);
232         adm_ctx.reply_skb = NULL;
233         return -ENOMEM;
234 }
235
236 static int drbd_adm_finish(struct genl_info *info, int retcode)
237 {
238         struct nlattr *nla;
239         const char *conn_name = NULL;
240
241         if (!adm_ctx.reply_skb)
242                 return -ENOMEM;
243
244         adm_ctx.reply_dh->ret_code = retcode;
245
246         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
247         if (nla) {
248                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
249                 if (nla)
250                         conn_name = nla_data(nla);
251         }
252
253         drbd_adm_send_reply(adm_ctx.reply_skb, info);
254         return 0;
255 }
256
257 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
258 {
259         char *afs;
260
261         if (get_net_conf(tconn)) {
262                 switch (((struct sockaddr *)tconn->net_conf->peer_addr)->sa_family) {
263                 case AF_INET6:
264                         afs = "ipv6";
265                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
266                                  &((struct sockaddr_in6 *)tconn->net_conf->peer_addr)->sin6_addr);
267                         break;
268                 case AF_INET:
269                         afs = "ipv4";
270                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
271                                  &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
272                         break;
273                 default:
274                         afs = "ssocks";
275                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
276                                  &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
277                 }
278                 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
279                 put_net_conf(tconn);
280         }
281 }
282
283 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
284 {
285         char *envp[] = { "HOME=/",
286                         "TERM=linux",
287                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
288                          (char[20]) { }, /* address family */
289                          (char[60]) { }, /* address */
290                         NULL };
291         char mb[12];
292         char *argv[] = {usermode_helper, cmd, mb, NULL };
293         struct sib_info sib;
294         int ret;
295
296         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
297         setup_khelper_env(mdev->tconn, envp);
298
299         /* The helper may take some time.
300          * write out any unsynced meta data changes now */
301         drbd_md_sync(mdev);
302
303         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
304         sib.sib_reason = SIB_HELPER_PRE;
305         sib.helper_name = cmd;
306         drbd_bcast_event(mdev, &sib);
307         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
308         if (ret)
309                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
310                                 usermode_helper, cmd, mb,
311                                 (ret >> 8) & 0xff, ret);
312         else
313                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
314                                 usermode_helper, cmd, mb,
315                                 (ret >> 8) & 0xff, ret);
316         sib.sib_reason = SIB_HELPER_POST;
317         sib.helper_exit_code = ret;
318         drbd_bcast_event(mdev, &sib);
319
320         if (ret < 0) /* Ignore any ERRNOs we got. */
321                 ret = 0;
322
323         return ret;
324 }
325
326 static void conn_md_sync(struct drbd_tconn *tconn)
327 {
328         struct drbd_conf *mdev;
329         int vnr;
330
331         idr_for_each_entry(&tconn->volumes, mdev, vnr)
332                 drbd_md_sync(mdev);
333 }
334
335 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
336 {
337         char *envp[] = { "HOME=/",
338                         "TERM=linux",
339                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
340                          (char[20]) { }, /* address family */
341                          (char[60]) { }, /* address */
342                         NULL };
343         char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
344         int ret;
345
346         setup_khelper_env(tconn, envp);
347         conn_md_sync(tconn);
348
349         conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
350         /* TODO: conn_bcast_event() ?? */
351
352         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
353         if (ret)
354                 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
355                           usermode_helper, cmd, tconn->name,
356                           (ret >> 8) & 0xff, ret);
357         else
358                 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
359                           usermode_helper, cmd, tconn->name,
360                           (ret >> 8) & 0xff, ret);
361         /* TODO: conn_bcast_event() ?? */
362
363         if (ret < 0) /* Ignore any ERRNOs we got. */
364                 ret = 0;
365
366         return ret;
367 }
368
369 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
370 {
371         enum drbd_fencing_p fp = FP_NOT_AVAIL;
372         struct drbd_conf *mdev;
373         int vnr;
374
375         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
376                 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
377                         fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
378                         put_ldev(mdev);
379                 }
380         }
381
382         return fp;
383 }
384
385 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
386 {
387         union drbd_state mask = { };
388         union drbd_state val = { };
389         enum drbd_fencing_p fp;
390         char *ex_to_string;
391         int r;
392
393         if (tconn->cstate >= C_WF_REPORT_PARAMS) {
394                 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
395                 return false;
396         }
397
398         fp = highest_fencing_policy(tconn);
399         switch (fp) {
400         case FP_NOT_AVAIL:
401                 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
402                 goto out;
403         case FP_DONT_CARE:
404                 return true;
405         default: ;
406         }
407
408         r = conn_khelper(tconn, "fence-peer");
409
410         switch ((r>>8) & 0xff) {
411         case 3: /* peer is inconsistent */
412                 ex_to_string = "peer is inconsistent or worse";
413                 mask.pdsk = D_MASK;
414                 val.pdsk = D_INCONSISTENT;
415                 break;
416         case 4: /* peer got outdated, or was already outdated */
417                 ex_to_string = "peer was fenced";
418                 mask.pdsk = D_MASK;
419                 val.pdsk = D_OUTDATED;
420                 break;
421         case 5: /* peer was down */
422                 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
423                         /* we will(have) create(d) a new UUID anyways... */
424                         ex_to_string = "peer is unreachable, assumed to be dead";
425                         mask.pdsk = D_MASK;
426                         val.pdsk = D_OUTDATED;
427                 } else {
428                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
429                 }
430                 break;
431         case 6: /* Peer is primary, voluntarily outdate myself.
432                  * This is useful when an unconnected R_SECONDARY is asked to
433                  * become R_PRIMARY, but finds the other peer being active. */
434                 ex_to_string = "peer is active";
435                 conn_warn(tconn, "Peer is primary, outdating myself.\n");
436                 mask.disk = D_MASK;
437                 val.disk = D_OUTDATED;
438                 break;
439         case 7:
440                 if (fp != FP_STONITH)
441                         conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
442                 ex_to_string = "peer was stonithed";
443                 mask.pdsk = D_MASK;
444                 val.pdsk = D_OUTDATED;
445                 break;
446         default:
447                 /* The script is broken ... */
448                 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
449                 return false; /* Eventually leave IO frozen */
450         }
451
452         conn_info(tconn, "fence-peer helper returned %d (%s)\n",
453                   (r>>8) & 0xff, ex_to_string);
454
455  out:
456
457         /* Not using
458            conn_request_state(tconn, mask, val, CS_VERBOSE);
459            here, because we might were able to re-establish the connection in the
460            meantime. */
461         spin_lock_irq(&tconn->req_lock);
462         if (tconn->cstate < C_WF_REPORT_PARAMS)
463                 _conn_request_state(tconn, mask, val, CS_VERBOSE);
464         spin_unlock_irq(&tconn->req_lock);
465
466         return conn_highest_pdsk(tconn) <= D_OUTDATED;
467 }
468
469 static int _try_outdate_peer_async(void *data)
470 {
471         struct drbd_tconn *tconn = (struct drbd_tconn *)data;
472
473         conn_try_outdate_peer(tconn);
474
475         return 0;
476 }
477
478 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
479 {
480         struct task_struct *opa;
481
482         opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
483         if (IS_ERR(opa))
484                 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
485 }
486
487 enum drbd_state_rv
488 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
489 {
490         const int max_tries = 4;
491         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
492         int try = 0;
493         int forced = 0;
494         union drbd_state mask, val;
495
496         if (new_role == R_PRIMARY)
497                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
498
499         mutex_lock(mdev->state_mutex);
500
501         mask.i = 0; mask.role = R_MASK;
502         val.i  = 0; val.role  = new_role;
503
504         while (try++ < max_tries) {
505                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
506
507                 /* in case we first succeeded to outdate,
508                  * but now suddenly could establish a connection */
509                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
510                         val.pdsk = 0;
511                         mask.pdsk = 0;
512                         continue;
513                 }
514
515                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
516                     (mdev->state.disk < D_UP_TO_DATE &&
517                      mdev->state.disk >= D_INCONSISTENT)) {
518                         mask.disk = D_MASK;
519                         val.disk  = D_UP_TO_DATE;
520                         forced = 1;
521                         continue;
522                 }
523
524                 if (rv == SS_NO_UP_TO_DATE_DISK &&
525                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
526                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
527
528                         if (conn_try_outdate_peer(mdev->tconn)) {
529                                 val.disk = D_UP_TO_DATE;
530                                 mask.disk = D_MASK;
531                         }
532                         continue;
533                 }
534
535                 if (rv == SS_NOTHING_TO_DO)
536                         goto out;
537                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
538                         if (!conn_try_outdate_peer(mdev->tconn) && force) {
539                                 dev_warn(DEV, "Forced into split brain situation!\n");
540                                 mask.pdsk = D_MASK;
541                                 val.pdsk  = D_OUTDATED;
542
543                         }
544                         continue;
545                 }
546                 if (rv == SS_TWO_PRIMARIES) {
547                         /* Maybe the peer is detected as dead very soon...
548                            retry at most once more in this case. */
549                         schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
550                         if (try < max_tries)
551                                 try = max_tries - 1;
552                         continue;
553                 }
554                 if (rv < SS_SUCCESS) {
555                         rv = _drbd_request_state(mdev, mask, val,
556                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
557                         if (rv < SS_SUCCESS)
558                                 goto out;
559                 }
560                 break;
561         }
562
563         if (rv < SS_SUCCESS)
564                 goto out;
565
566         if (forced)
567                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
568
569         /* Wait until nothing is on the fly :) */
570         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
571
572         if (new_role == R_SECONDARY) {
573                 set_disk_ro(mdev->vdisk, true);
574                 if (get_ldev(mdev)) {
575                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
576                         put_ldev(mdev);
577                 }
578         } else {
579                 if (get_net_conf(mdev->tconn)) {
580                         mdev->tconn->net_conf->want_lose = 0;
581                         put_net_conf(mdev->tconn);
582                 }
583                 set_disk_ro(mdev->vdisk, false);
584                 if (get_ldev(mdev)) {
585                         if (((mdev->state.conn < C_CONNECTED ||
586                                mdev->state.pdsk <= D_FAILED)
587                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
588                                 drbd_uuid_new_current(mdev);
589
590                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
591                         put_ldev(mdev);
592                 }
593         }
594
595         /* writeout of activity log covered areas of the bitmap
596          * to stable storage done in after state change already */
597
598         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
599                 /* if this was forced, we should consider sync */
600                 if (forced)
601                         drbd_send_uuids(mdev);
602                 drbd_send_state(mdev);
603         }
604
605         drbd_md_sync(mdev);
606
607         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
608 out:
609         mutex_unlock(mdev->state_mutex);
610         return rv;
611 }
612
613 static const char *from_attrs_err_to_txt(int err)
614 {
615         return  err == -ENOMSG ? "required attribute missing" :
616                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
617                 err == -EEXIST ? "can not change invariant setting" :
618                 "invalid attribute value";
619 }
620
621 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
622 {
623         struct set_role_parms parms;
624         int err;
625         enum drbd_ret_code retcode;
626
627         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
628         if (!adm_ctx.reply_skb)
629                 return retcode;
630         if (retcode != NO_ERROR)
631                 goto out;
632
633         memset(&parms, 0, sizeof(parms));
634         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
635                 err = set_role_parms_from_attrs(&parms, info);
636                 if (err) {
637                         retcode = ERR_MANDATORY_TAG;
638                         drbd_msg_put_info(from_attrs_err_to_txt(err));
639                         goto out;
640                 }
641         }
642
643         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
644                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
645         else
646                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
647 out:
648         drbd_adm_finish(info, retcode);
649         return 0;
650 }
651
652 /* initializes the md.*_offset members, so we are able to find
653  * the on disk meta data */
654 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
655                                        struct drbd_backing_dev *bdev)
656 {
657         sector_t md_size_sect = 0;
658         switch (bdev->dc.meta_dev_idx) {
659         default:
660                 /* v07 style fixed size indexed meta data */
661                 bdev->md.md_size_sect = MD_RESERVED_SECT;
662                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
663                 bdev->md.al_offset = MD_AL_OFFSET;
664                 bdev->md.bm_offset = MD_BM_OFFSET;
665                 break;
666         case DRBD_MD_INDEX_FLEX_EXT:
667                 /* just occupy the full device; unit: sectors */
668                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
669                 bdev->md.md_offset = 0;
670                 bdev->md.al_offset = MD_AL_OFFSET;
671                 bdev->md.bm_offset = MD_BM_OFFSET;
672                 break;
673         case DRBD_MD_INDEX_INTERNAL:
674         case DRBD_MD_INDEX_FLEX_INT:
675                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
676                 /* al size is still fixed */
677                 bdev->md.al_offset = -MD_AL_SECTORS;
678                 /* we need (slightly less than) ~ this much bitmap sectors: */
679                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
680                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
681                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
682                 md_size_sect = ALIGN(md_size_sect, 8);
683
684                 /* plus the "drbd meta data super block",
685                  * and the activity log; */
686                 md_size_sect += MD_BM_OFFSET;
687
688                 bdev->md.md_size_sect = md_size_sect;
689                 /* bitmap offset is adjusted by 'super' block size */
690                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
691                 break;
692         }
693 }
694
695 /* input size is expected to be in KB */
696 char *ppsize(char *buf, unsigned long long size)
697 {
698         /* Needs 9 bytes at max including trailing NUL:
699          * -1ULL ==> "16384 EB" */
700         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
701         int base = 0;
702         while (size >= 10000 && base < sizeof(units)-1) {
703                 /* shift + round */
704                 size = (size >> 10) + !!(size & (1<<9));
705                 base++;
706         }
707         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
708
709         return buf;
710 }
711
712 /* there is still a theoretical deadlock when called from receiver
713  * on an D_INCONSISTENT R_PRIMARY:
714  *  remote READ does inc_ap_bio, receiver would need to receive answer
715  *  packet from remote to dec_ap_bio again.
716  *  receiver receive_sizes(), comes here,
717  *  waits for ap_bio_cnt == 0. -> deadlock.
718  * but this cannot happen, actually, because:
719  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
720  *  (not connected, or bad/no disk on peer):
721  *  see drbd_fail_request_early, ap_bio_cnt is zero.
722  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
723  *  peer may not initiate a resize.
724  */
725 /* Note these are not to be confused with
726  * drbd_adm_suspend_io/drbd_adm_resume_io,
727  * which are (sub) state changes triggered by admin (drbdsetup),
728  * and can be long lived.
729  * This changes an mdev->flag, is triggered by drbd internals,
730  * and should be short-lived. */
731 void drbd_suspend_io(struct drbd_conf *mdev)
732 {
733         set_bit(SUSPEND_IO, &mdev->flags);
734         if (drbd_suspended(mdev))
735                 return;
736         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
737 }
738
739 void drbd_resume_io(struct drbd_conf *mdev)
740 {
741         clear_bit(SUSPEND_IO, &mdev->flags);
742         wake_up(&mdev->misc_wait);
743 }
744
745 /**
746  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
747  * @mdev:       DRBD device.
748  *
749  * Returns 0 on success, negative return values indicate errors.
750  * You should call drbd_md_sync() after calling this function.
751  */
752 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
753 {
754         sector_t prev_first_sect, prev_size; /* previous meta location */
755         sector_t la_size;
756         sector_t size;
757         char ppb[10];
758
759         int md_moved, la_size_changed;
760         enum determine_dev_size rv = unchanged;
761
762         /* race:
763          * application request passes inc_ap_bio,
764          * but then cannot get an AL-reference.
765          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
766          *
767          * to avoid that:
768          * Suspend IO right here.
769          * still lock the act_log to not trigger ASSERTs there.
770          */
771         drbd_suspend_io(mdev);
772
773         /* no wait necessary anymore, actually we could assert that */
774         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
775
776         prev_first_sect = drbd_md_first_sector(mdev->ldev);
777         prev_size = mdev->ldev->md.md_size_sect;
778         la_size = mdev->ldev->md.la_size_sect;
779
780         /* TODO: should only be some assert here, not (re)init... */
781         drbd_md_set_sector_offsets(mdev, mdev->ldev);
782
783         size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
784
785         if (drbd_get_capacity(mdev->this_bdev) != size ||
786             drbd_bm_capacity(mdev) != size) {
787                 int err;
788                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
789                 if (unlikely(err)) {
790                         /* currently there is only one error: ENOMEM! */
791                         size = drbd_bm_capacity(mdev)>>1;
792                         if (size == 0) {
793                                 dev_err(DEV, "OUT OF MEMORY! "
794                                     "Could not allocate bitmap!\n");
795                         } else {
796                                 dev_err(DEV, "BM resizing failed. "
797                                     "Leaving size unchanged at size = %lu KB\n",
798                                     (unsigned long)size);
799                         }
800                         rv = dev_size_error;
801                 }
802                 /* racy, see comments above. */
803                 drbd_set_my_capacity(mdev, size);
804                 mdev->ldev->md.la_size_sect = size;
805                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
806                      (unsigned long long)size>>1);
807         }
808         if (rv == dev_size_error)
809                 goto out;
810
811         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
812
813         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
814                 || prev_size       != mdev->ldev->md.md_size_sect;
815
816         if (la_size_changed || md_moved) {
817                 int err;
818
819                 drbd_al_shrink(mdev); /* All extents inactive. */
820                 dev_info(DEV, "Writing the whole bitmap, %s\n",
821                          la_size_changed && md_moved ? "size changed and md moved" :
822                          la_size_changed ? "size changed" : "md moved");
823                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
824                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
825                                 "size changed", BM_LOCKED_MASK);
826                 if (err) {
827                         rv = dev_size_error;
828                         goto out;
829                 }
830                 drbd_md_mark_dirty(mdev);
831         }
832
833         if (size > la_size)
834                 rv = grew;
835         if (size < la_size)
836                 rv = shrunk;
837 out:
838         lc_unlock(mdev->act_log);
839         wake_up(&mdev->al_wait);
840         drbd_resume_io(mdev);
841
842         return rv;
843 }
844
845 sector_t
846 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
847 {
848         sector_t p_size = mdev->p_size;   /* partner's disk size. */
849         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
850         sector_t m_size; /* my size */
851         sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
852         sector_t size = 0;
853
854         m_size = drbd_get_max_capacity(bdev);
855
856         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
857                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
858                 p_size = m_size;
859         }
860
861         if (p_size && m_size) {
862                 size = min_t(sector_t, p_size, m_size);
863         } else {
864                 if (la_size) {
865                         size = la_size;
866                         if (m_size && m_size < size)
867                                 size = m_size;
868                         if (p_size && p_size < size)
869                                 size = p_size;
870                 } else {
871                         if (m_size)
872                                 size = m_size;
873                         if (p_size)
874                                 size = p_size;
875                 }
876         }
877
878         if (size == 0)
879                 dev_err(DEV, "Both nodes diskless!\n");
880
881         if (u_size) {
882                 if (u_size > size)
883                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
884                             (unsigned long)u_size>>1, (unsigned long)size>>1);
885                 else
886                         size = u_size;
887         }
888
889         return size;
890 }
891
892 /**
893  * drbd_check_al_size() - Ensures that the AL is of the right size
894  * @mdev:       DRBD device.
895  *
896  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
897  * failed, and 0 on success. You should call drbd_md_sync() after you called
898  * this function.
899  */
900 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
901 {
902         struct lru_cache *n, *t;
903         struct lc_element *e;
904         unsigned int in_use;
905         int i;
906
907         if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
908                 dc->al_extents = DRBD_AL_EXTENTS_MIN;
909
910         if (mdev->act_log &&
911             mdev->act_log->nr_elements == dc->al_extents)
912                 return 0;
913
914         in_use = 0;
915         t = mdev->act_log;
916         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
917                 dc->al_extents, sizeof(struct lc_element), 0);
918
919         if (n == NULL) {
920                 dev_err(DEV, "Cannot allocate act_log lru!\n");
921                 return -ENOMEM;
922         }
923         spin_lock_irq(&mdev->al_lock);
924         if (t) {
925                 for (i = 0; i < t->nr_elements; i++) {
926                         e = lc_element_by_index(t, i);
927                         if (e->refcnt)
928                                 dev_err(DEV, "refcnt(%d)==%d\n",
929                                     e->lc_number, e->refcnt);
930                         in_use += e->refcnt;
931                 }
932         }
933         if (!in_use)
934                 mdev->act_log = n;
935         spin_unlock_irq(&mdev->al_lock);
936         if (in_use) {
937                 dev_err(DEV, "Activity log still in use!\n");
938                 lc_destroy(n);
939                 return -EBUSY;
940         } else {
941                 if (t)
942                         lc_destroy(t);
943         }
944         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
945         return 0;
946 }
947
948 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
949 {
950         struct request_queue * const q = mdev->rq_queue;
951         int max_hw_sectors = max_bio_size >> 9;
952         int max_segments = 0;
953
954         if (get_ldev_if_state(mdev, D_ATTACHING)) {
955                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
956
957                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
958                 max_segments = mdev->ldev->dc.max_bio_bvecs;
959                 put_ldev(mdev);
960         }
961
962         blk_queue_logical_block_size(q, 512);
963         blk_queue_max_hw_sectors(q, max_hw_sectors);
964         /* This is the workaround for "bio would need to, but cannot, be split" */
965         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
966         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
967
968         if (get_ldev_if_state(mdev, D_ATTACHING)) {
969                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
970
971                 blk_queue_stack_limits(q, b);
972
973                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
974                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
975                                  q->backing_dev_info.ra_pages,
976                                  b->backing_dev_info.ra_pages);
977                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
978                 }
979                 put_ldev(mdev);
980         }
981 }
982
983 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
984 {
985         int now, new, local, peer;
986
987         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
988         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
989         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
990
991         if (get_ldev_if_state(mdev, D_ATTACHING)) {
992                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
993                 mdev->local_max_bio_size = local;
994                 put_ldev(mdev);
995         }
996
997         /* We may ignore peer limits if the peer is modern enough.
998            Because new from 8.3.8 onwards the peer can use multiple
999            BIOs for a single peer_request */
1000         if (mdev->state.conn >= C_CONNECTED) {
1001                 if (mdev->tconn->agreed_pro_version < 94)
1002                         peer = mdev->peer_max_bio_size;
1003                 else if (mdev->tconn->agreed_pro_version == 94)
1004                         peer = DRBD_MAX_SIZE_H80_PACKET;
1005                 else /* drbd 8.3.8 onwards */
1006                         peer = DRBD_MAX_BIO_SIZE;
1007         }
1008
1009         new = min_t(int, local, peer);
1010
1011         if (mdev->state.role == R_PRIMARY && new < now)
1012                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1013
1014         if (new != now)
1015                 dev_info(DEV, "max BIO size = %u\n", new);
1016
1017         drbd_setup_queue_param(mdev, new);
1018 }
1019
1020 /* serialize deconfig (worker exiting, doing cleanup)
1021  * and reconfig (drbdsetup disk, drbdsetup net)
1022  *
1023  * Wait for a potentially exiting worker, then restart it,
1024  * or start a new one.  Flush any pending work, there may still be an
1025  * after_state_change queued.
1026  */
1027 static void conn_reconfig_start(struct drbd_tconn *tconn)
1028 {
1029         wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
1030         wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
1031         drbd_thread_start(&tconn->worker);
1032         conn_flush_workqueue(tconn);
1033 }
1034
1035 /* if still unconfigured, stops worker again.
1036  * if configured now, clears CONFIG_PENDING.
1037  * wakes potential waiters */
1038 static void conn_reconfig_done(struct drbd_tconn *tconn)
1039 {
1040         spin_lock_irq(&tconn->req_lock);
1041         if (conn_all_vols_unconf(tconn)) {
1042                 set_bit(OBJECT_DYING, &tconn->flags);
1043                 drbd_thread_stop_nowait(&tconn->worker);
1044         } else
1045                 clear_bit(CONFIG_PENDING, &tconn->flags);
1046         spin_unlock_irq(&tconn->req_lock);
1047         wake_up(&tconn->ping_wait);
1048 }
1049
1050 /* Make sure IO is suspended before calling this function(). */
1051 static void drbd_suspend_al(struct drbd_conf *mdev)
1052 {
1053         int s = 0;
1054
1055         if (!lc_try_lock(mdev->act_log)) {
1056                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1057                 return;
1058         }
1059
1060         drbd_al_shrink(mdev);
1061         spin_lock_irq(&mdev->tconn->req_lock);
1062         if (mdev->state.conn < C_CONNECTED)
1063                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1064         spin_unlock_irq(&mdev->tconn->req_lock);
1065         lc_unlock(mdev->act_log);
1066
1067         if (s)
1068                 dev_info(DEV, "Suspended AL updates\n");
1069 }
1070
1071 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1072 {
1073         enum drbd_ret_code retcode;
1074         struct drbd_conf *mdev;
1075         struct disk_conf *ndc; /* new disk conf */
1076         int err, fifo_size;
1077         int *rs_plan_s = NULL;
1078
1079         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1080         if (!adm_ctx.reply_skb)
1081                 return retcode;
1082         if (retcode != NO_ERROR)
1083                 goto out;
1084
1085         mdev = adm_ctx.mdev;
1086
1087         /* we also need a disk
1088          * to change the options on */
1089         if (!get_ldev(mdev)) {
1090                 retcode = ERR_NO_DISK;
1091                 goto out;
1092         }
1093
1094 /* FIXME freeze IO, cluster wide.
1095  *
1096  * We should make sure no-one uses
1097  * some half-updated struct when we
1098  * assign it later. */
1099
1100         ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
1101         if (!ndc) {
1102                 retcode = ERR_NOMEM;
1103                 goto fail;
1104         }
1105
1106         memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
1107         err = disk_conf_from_attrs_for_change(ndc, info);
1108         if (err) {
1109                 retcode = ERR_MANDATORY_TAG;
1110                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1111         }
1112
1113         if (!expect(ndc->resync_rate >= 1))
1114                 ndc->resync_rate = 1;
1115
1116         /* clip to allowed range */
1117         if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
1118                 ndc->al_extents = DRBD_AL_EXTENTS_MIN;
1119         if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
1120                 ndc->al_extents = DRBD_AL_EXTENTS_MAX;
1121
1122         /* most sanity checks done, try to assign the new sync-after
1123          * dependency.  need to hold the global lock in there,
1124          * to avoid a race in the dependency loop check. */
1125         retcode = drbd_alter_sa(mdev, ndc->resync_after);
1126         if (retcode != NO_ERROR)
1127                 goto fail;
1128
1129         fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1130         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1131                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1132                 if (!rs_plan_s) {
1133                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1134                         retcode = ERR_NOMEM;
1135                         goto fail;
1136                 }
1137         }
1138
1139         if (fifo_size != mdev->rs_plan_s.size) {
1140                 kfree(mdev->rs_plan_s.values);
1141                 mdev->rs_plan_s.values = rs_plan_s;
1142                 mdev->rs_plan_s.size   = fifo_size;
1143                 mdev->rs_planed = 0;
1144                 rs_plan_s = NULL;
1145         }
1146
1147         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1148         drbd_al_shrink(mdev);
1149         err = drbd_check_al_size(mdev, ndc);
1150         lc_unlock(mdev->act_log);
1151         wake_up(&mdev->al_wait);
1152
1153         if (err) {
1154                 retcode = ERR_NOMEM;
1155                 goto fail;
1156         }
1157
1158         /* FIXME
1159          * To avoid someone looking at a half-updated struct, we probably
1160          * should have a rw-semaphor on net_conf and disk_conf.
1161          */
1162         mdev->ldev->dc = *ndc;
1163
1164         drbd_md_sync(mdev);
1165
1166
1167         if (mdev->state.conn >= C_CONNECTED)
1168                 drbd_send_sync_param(mdev);
1169
1170  fail:
1171         put_ldev(mdev);
1172         kfree(ndc);
1173         kfree(rs_plan_s);
1174  out:
1175         drbd_adm_finish(info, retcode);
1176         return 0;
1177 }
1178
1179 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1180 {
1181         struct drbd_conf *mdev;
1182         int err;
1183         enum drbd_ret_code retcode;
1184         enum determine_dev_size dd;
1185         sector_t max_possible_sectors;
1186         sector_t min_md_device_sectors;
1187         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1188         struct block_device *bdev;
1189         struct lru_cache *resync_lru = NULL;
1190         union drbd_state ns, os;
1191         enum drbd_state_rv rv;
1192         int cp_discovered = 0;
1193
1194         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1195         if (!adm_ctx.reply_skb)
1196                 return retcode;
1197         if (retcode != NO_ERROR)
1198                 goto finish;
1199
1200         mdev = adm_ctx.mdev;
1201         conn_reconfig_start(mdev->tconn);
1202
1203         /* if you want to reconfigure, please tear down first */
1204         if (mdev->state.disk > D_DISKLESS) {
1205                 retcode = ERR_DISK_CONFIGURED;
1206                 goto fail;
1207         }
1208         /* It may just now have detached because of IO error.  Make sure
1209          * drbd_ldev_destroy is done already, we may end up here very fast,
1210          * e.g. if someone calls attach from the on-io-error handler,
1211          * to realize a "hot spare" feature (not that I'd recommend that) */
1212         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1213
1214         /* allocation not in the IO path, drbdsetup context */
1215         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1216         if (!nbc) {
1217                 retcode = ERR_NOMEM;
1218                 goto fail;
1219         }
1220
1221         nbc->dc = (struct disk_conf) {
1222                 {}, 0, /* backing_dev */
1223                 {}, 0, /* meta_dev */
1224                 0, /* meta_dev_idx */
1225                 DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
1226                 DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
1227                 DRBD_ON_IO_ERROR_DEF, /* on_io_error */
1228                 DRBD_FENCING_DEF, /* fencing */
1229                 DRBD_RATE_DEF, /* resync_rate */
1230                 DRBD_AFTER_DEF, /* resync_after */
1231                 DRBD_AL_EXTENTS_DEF, /* al_extents */
1232                 DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
1233                 DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
1234                 DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
1235                 DRBD_C_MAX_RATE_DEF, /* c_max_rate */
1236                 DRBD_C_MIN_RATE_DEF, /* c_min_rate */
1237                 0, /* no_disk_barrier */
1238                 0, /* no_disk_flush */
1239                 0, /* no_disk_drain */
1240                 0, /* no_md_flush */
1241         };
1242
1243         err = disk_conf_from_attrs(&nbc->dc, info);
1244         if (err) {
1245                 retcode = ERR_MANDATORY_TAG;
1246                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1247                 goto fail;
1248         }
1249
1250         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1251                 retcode = ERR_MD_IDX_INVALID;
1252                 goto fail;
1253         }
1254
1255         if (get_net_conf(mdev->tconn)) {
1256                 int prot = mdev->tconn->net_conf->wire_protocol;
1257                 put_net_conf(mdev->tconn);
1258                 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1259                         retcode = ERR_STONITH_AND_PROT_A;
1260                         goto fail;
1261                 }
1262         }
1263
1264         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1265                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1266         if (IS_ERR(bdev)) {
1267                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1268                         PTR_ERR(bdev));
1269                 retcode = ERR_OPEN_DISK;
1270                 goto fail;
1271         }
1272         nbc->backing_bdev = bdev;
1273
1274         /*
1275          * meta_dev_idx >= 0: external fixed size, possibly multiple
1276          * drbd sharing one meta device.  TODO in that case, paranoia
1277          * check that [md_bdev, meta_dev_idx] is not yet used by some
1278          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1279          * should check it for you already; but if you don't, or
1280          * someone fooled it, we need to double check here)
1281          */
1282         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1283                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1284                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1285                                   (void *)mdev : (void *)drbd_m_holder);
1286         if (IS_ERR(bdev)) {
1287                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1288                         PTR_ERR(bdev));
1289                 retcode = ERR_OPEN_MD_DISK;
1290                 goto fail;
1291         }
1292         nbc->md_bdev = bdev;
1293
1294         if ((nbc->backing_bdev == nbc->md_bdev) !=
1295             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1296              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1297                 retcode = ERR_MD_IDX_INVALID;
1298                 goto fail;
1299         }
1300
1301         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1302                         1, 61, sizeof(struct bm_extent),
1303                         offsetof(struct bm_extent, lce));
1304         if (!resync_lru) {
1305                 retcode = ERR_NOMEM;
1306                 goto fail;
1307         }
1308
1309         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1310         drbd_md_set_sector_offsets(mdev, nbc);
1311
1312         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1313                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1314                         (unsigned long long) drbd_get_max_capacity(nbc),
1315                         (unsigned long long) nbc->dc.disk_size);
1316                 retcode = ERR_DISK_TO_SMALL;
1317                 goto fail;
1318         }
1319
1320         if ((int)nbc->dc.meta_dev_idx < 0) {
1321                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1322                 /* at least one MB, otherwise it does not make sense */
1323                 min_md_device_sectors = (2<<10);
1324         } else {
1325                 max_possible_sectors = DRBD_MAX_SECTORS;
1326                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1327         }
1328
1329         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1330                 retcode = ERR_MD_DISK_TO_SMALL;
1331                 dev_warn(DEV, "refusing attach: md-device too small, "
1332                      "at least %llu sectors needed for this meta-disk type\n",
1333                      (unsigned long long) min_md_device_sectors);
1334                 goto fail;
1335         }
1336
1337         /* Make sure the new disk is big enough
1338          * (we may currently be R_PRIMARY with no local disk...) */
1339         if (drbd_get_max_capacity(nbc) <
1340             drbd_get_capacity(mdev->this_bdev)) {
1341                 retcode = ERR_DISK_TO_SMALL;
1342                 goto fail;
1343         }
1344
1345         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1346
1347         if (nbc->known_size > max_possible_sectors) {
1348                 dev_warn(DEV, "==> truncating very big lower level device "
1349                         "to currently maximum possible %llu sectors <==\n",
1350                         (unsigned long long) max_possible_sectors);
1351                 if ((int)nbc->dc.meta_dev_idx >= 0)
1352                         dev_warn(DEV, "==>> using internal or flexible "
1353                                       "meta data may help <<==\n");
1354         }
1355
1356         drbd_suspend_io(mdev);
1357         /* also wait for the last barrier ack. */
1358         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1359         /* and for any other previously queued work */
1360         drbd_flush_workqueue(mdev);
1361
1362         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1363         retcode = rv;  /* FIXME: Type mismatch. */
1364         drbd_resume_io(mdev);
1365         if (rv < SS_SUCCESS)
1366                 goto fail;
1367
1368         if (!get_ldev_if_state(mdev, D_ATTACHING))
1369                 goto force_diskless;
1370
1371         drbd_md_set_sector_offsets(mdev, nbc);
1372
1373         if (!mdev->bitmap) {
1374                 if (drbd_bm_init(mdev)) {
1375                         retcode = ERR_NOMEM;
1376                         goto force_diskless_dec;
1377                 }
1378         }
1379
1380         retcode = drbd_md_read(mdev, nbc);
1381         if (retcode != NO_ERROR)
1382                 goto force_diskless_dec;
1383
1384         if (mdev->state.conn < C_CONNECTED &&
1385             mdev->state.role == R_PRIMARY &&
1386             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1387                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1388                     (unsigned long long)mdev->ed_uuid);
1389                 retcode = ERR_DATA_NOT_CURRENT;
1390                 goto force_diskless_dec;
1391         }
1392
1393         /* Since we are diskless, fix the activity log first... */
1394         if (drbd_check_al_size(mdev, &nbc->dc)) {
1395                 retcode = ERR_NOMEM;
1396                 goto force_diskless_dec;
1397         }
1398
1399         /* Prevent shrinking of consistent devices ! */
1400         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1401             drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1402                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1403                 retcode = ERR_DISK_TO_SMALL;
1404                 goto force_diskless_dec;
1405         }
1406
1407         if (!drbd_al_read_log(mdev, nbc)) {
1408                 retcode = ERR_IO_MD_DISK;
1409                 goto force_diskless_dec;
1410         }
1411
1412         /* Reset the "barriers don't work" bits here, then force meta data to
1413          * be written, to ensure we determine if barriers are supported. */
1414         if (nbc->dc.no_md_flush)
1415                 set_bit(MD_NO_FUA, &mdev->flags);
1416         else
1417                 clear_bit(MD_NO_FUA, &mdev->flags);
1418
1419         /* Point of no return reached.
1420          * Devices and memory are no longer released by error cleanup below.
1421          * now mdev takes over responsibility, and the state engine should
1422          * clean it up somewhere.  */
1423         D_ASSERT(mdev->ldev == NULL);
1424         mdev->ldev = nbc;
1425         mdev->resync = resync_lru;
1426         nbc = NULL;
1427         resync_lru = NULL;
1428
1429         mdev->write_ordering = WO_bdev_flush;
1430         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1431
1432         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1433                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1434         else
1435                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1436
1437         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1438             !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1439                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1440                 cp_discovered = 1;
1441         }
1442
1443         mdev->send_cnt = 0;
1444         mdev->recv_cnt = 0;
1445         mdev->read_cnt = 0;
1446         mdev->writ_cnt = 0;
1447
1448         drbd_reconsider_max_bio_size(mdev);
1449
1450         /* If I am currently not R_PRIMARY,
1451          * but meta data primary indicator is set,
1452          * I just now recover from a hard crash,
1453          * and have been R_PRIMARY before that crash.
1454          *
1455          * Now, if I had no connection before that crash
1456          * (have been degraded R_PRIMARY), chances are that
1457          * I won't find my peer now either.
1458          *
1459          * In that case, and _only_ in that case,
1460          * we use the degr-wfc-timeout instead of the default,
1461          * so we can automatically recover from a crash of a
1462          * degraded but active "cluster" after a certain timeout.
1463          */
1464         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1465         if (mdev->state.role != R_PRIMARY &&
1466              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1467             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1468                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1469
1470         dd = drbd_determine_dev_size(mdev, 0);
1471         if (dd == dev_size_error) {
1472                 retcode = ERR_NOMEM_BITMAP;
1473                 goto force_diskless_dec;
1474         } else if (dd == grew)
1475                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1476
1477         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1478                 dev_info(DEV, "Assuming that all blocks are out of sync "
1479                      "(aka FullSync)\n");
1480                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1481                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1482                         retcode = ERR_IO_MD_DISK;
1483                         goto force_diskless_dec;
1484                 }
1485         } else {
1486                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1487                         "read from attaching", BM_LOCKED_MASK)) {
1488                         retcode = ERR_IO_MD_DISK;
1489                         goto force_diskless_dec;
1490                 }
1491         }
1492
1493         if (cp_discovered) {
1494                 drbd_al_apply_to_bm(mdev);
1495                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1496                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1497                         retcode = ERR_IO_MD_DISK;
1498                         goto force_diskless_dec;
1499                 }
1500         }
1501
1502         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1503                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1504
1505         spin_lock_irq(&mdev->tconn->req_lock);
1506         os = drbd_read_state(mdev);
1507         ns = os;
1508         /* If MDF_CONSISTENT is not set go into inconsistent state,
1509            otherwise investigate MDF_WasUpToDate...
1510            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1511            otherwise into D_CONSISTENT state.
1512         */
1513         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1514                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1515                         ns.disk = D_CONSISTENT;
1516                 else
1517                         ns.disk = D_OUTDATED;
1518         } else {
1519                 ns.disk = D_INCONSISTENT;
1520         }
1521
1522         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1523                 ns.pdsk = D_OUTDATED;
1524
1525         if ( ns.disk == D_CONSISTENT &&
1526             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1527                 ns.disk = D_UP_TO_DATE;
1528
1529         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1530            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1531            this point, because drbd_request_state() modifies these
1532            flags. */
1533
1534         /* In case we are C_CONNECTED postpone any decision on the new disk
1535            state after the negotiation phase. */
1536         if (mdev->state.conn == C_CONNECTED) {
1537                 mdev->new_state_tmp.i = ns.i;
1538                 ns.i = os.i;
1539                 ns.disk = D_NEGOTIATING;
1540
1541                 /* We expect to receive up-to-date UUIDs soon.
1542                    To avoid a race in receive_state, free p_uuid while
1543                    holding req_lock. I.e. atomic with the state change */
1544                 kfree(mdev->p_uuid);
1545                 mdev->p_uuid = NULL;
1546         }
1547
1548         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1549         spin_unlock_irq(&mdev->tconn->req_lock);
1550
1551         if (rv < SS_SUCCESS)
1552                 goto force_diskless_dec;
1553
1554         if (mdev->state.role == R_PRIMARY)
1555                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1556         else
1557                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1558
1559         drbd_md_mark_dirty(mdev);
1560         drbd_md_sync(mdev);
1561
1562         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1563         put_ldev(mdev);
1564         conn_reconfig_done(mdev->tconn);
1565         drbd_adm_finish(info, retcode);
1566         return 0;
1567
1568  force_diskless_dec:
1569         put_ldev(mdev);
1570  force_diskless:
1571         drbd_force_state(mdev, NS(disk, D_FAILED));
1572         drbd_md_sync(mdev);
1573  fail:
1574         conn_reconfig_done(mdev->tconn);
1575         if (nbc) {
1576                 if (nbc->backing_bdev)
1577                         blkdev_put(nbc->backing_bdev,
1578                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1579                 if (nbc->md_bdev)
1580                         blkdev_put(nbc->md_bdev,
1581                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1582                 kfree(nbc);
1583         }
1584         lc_destroy(resync_lru);
1585
1586  finish:
1587         drbd_adm_finish(info, retcode);
1588         return 0;
1589 }
1590
1591 static int adm_detach(struct drbd_conf *mdev)
1592 {
1593         enum drbd_state_rv retcode;
1594         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1595         retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1596         wait_event(mdev->misc_wait,
1597                         mdev->state.disk != D_DISKLESS ||
1598                         !atomic_read(&mdev->local_cnt));
1599         drbd_resume_io(mdev);
1600         return retcode;
1601 }
1602
1603 /* Detaching the disk is a process in multiple stages.  First we need to lock
1604  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1605  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1606  * internal references as well.
1607  * Only then we have finally detached. */
1608 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1609 {
1610         enum drbd_ret_code retcode;
1611
1612         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1613         if (!adm_ctx.reply_skb)
1614                 return retcode;
1615         if (retcode != NO_ERROR)
1616                 goto out;
1617
1618         retcode = adm_detach(adm_ctx.mdev);
1619 out:
1620         drbd_adm_finish(info, retcode);
1621         return 0;
1622 }
1623
1624 static bool conn_resync_running(struct drbd_tconn *tconn)
1625 {
1626         struct drbd_conf *mdev;
1627         int vnr;
1628
1629         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1630                 if (mdev->state.conn == C_SYNC_SOURCE ||
1631                     mdev->state.conn == C_SYNC_TARGET ||
1632                     mdev->state.conn == C_PAUSED_SYNC_S ||
1633                     mdev->state.conn == C_PAUSED_SYNC_T)
1634                         return true;
1635         }
1636         return false;
1637 }
1638
1639 static bool conn_ov_running(struct drbd_tconn *tconn)
1640 {
1641         struct drbd_conf *mdev;
1642         int vnr;
1643
1644         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1645                 if (mdev->state.conn == C_VERIFY_S ||
1646                     mdev->state.conn == C_VERIFY_T)
1647                         return true;
1648         }
1649         return false;
1650 }
1651
1652 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1653 {
1654         enum drbd_ret_code retcode;
1655         struct drbd_tconn *tconn;
1656         struct net_conf *new_conf = NULL;
1657         int err;
1658         int ovr; /* online verify running */
1659         int rsr; /* re-sync running */
1660         struct crypto_hash *verify_tfm = NULL;
1661         struct crypto_hash *csums_tfm = NULL;
1662
1663
1664         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1665         if (!adm_ctx.reply_skb)
1666                 return retcode;
1667         if (retcode != NO_ERROR)
1668                 goto out;
1669
1670         tconn = adm_ctx.tconn;
1671
1672         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1673         if (!new_conf) {
1674                 retcode = ERR_NOMEM;
1675                 goto out;
1676         }
1677
1678         /* we also need a net config
1679          * to change the options on */
1680         if (!get_net_conf(tconn)) {
1681                 drbd_msg_put_info("net conf missing, try connect");
1682                 retcode = ERR_INVALID_REQUEST;
1683                 goto out;
1684         }
1685
1686         conn_reconfig_start(tconn);
1687
1688         memcpy(new_conf, tconn->net_conf, sizeof(*new_conf));
1689         err = net_conf_from_attrs_for_change(new_conf, info);
1690         if (err) {
1691                 retcode = ERR_MANDATORY_TAG;
1692                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1693                 goto fail;
1694         }
1695
1696         /* re-sync running */
1697         rsr = conn_resync_running(tconn);
1698         if (rsr && strcmp(new_conf->csums_alg, tconn->net_conf->csums_alg)) {
1699                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1700                 goto fail;
1701         }
1702
1703         if (!rsr && new_conf->csums_alg[0]) {
1704                 csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
1705                 if (IS_ERR(csums_tfm)) {
1706                         csums_tfm = NULL;
1707                         retcode = ERR_CSUMS_ALG;
1708                         goto fail;
1709                 }
1710
1711                 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1712                         retcode = ERR_CSUMS_ALG_ND;
1713                         goto fail;
1714                 }
1715         }
1716
1717         /* online verify running */
1718         ovr = conn_ov_running(tconn);
1719         if (ovr) {
1720                 if (strcmp(new_conf->verify_alg, tconn->net_conf->verify_alg)) {
1721                         retcode = ERR_VERIFY_RUNNING;
1722                         goto fail;
1723                 }
1724         }
1725
1726         if (!ovr && new_conf->verify_alg[0]) {
1727                 verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
1728                 if (IS_ERR(verify_tfm)) {
1729                         verify_tfm = NULL;
1730                         retcode = ERR_VERIFY_ALG;
1731                         goto fail;
1732                 }
1733
1734                 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1735                         retcode = ERR_VERIFY_ALG_ND;
1736                         goto fail;
1737                 }
1738         }
1739
1740
1741         /* For now, use struct assignment, not pointer assignment.
1742          * We don't have any means to determine who might still
1743          * keep a local alias into the struct,
1744          * so we cannot just free it and hope for the best :(
1745          * FIXME
1746          * To avoid someone looking at a half-updated struct, we probably
1747          * should have a rw-semaphor on net_conf and disk_conf.
1748          */
1749         *tconn->net_conf = *new_conf;
1750
1751         if (!rsr) {
1752                 crypto_free_hash(tconn->csums_tfm);
1753                 tconn->csums_tfm = csums_tfm;
1754                 csums_tfm = NULL;
1755         }
1756         if (!ovr) {
1757                 crypto_free_hash(tconn->verify_tfm);
1758                 tconn->verify_tfm = verify_tfm;
1759                 verify_tfm = NULL;
1760         }
1761
1762         if (tconn->cstate >= C_WF_REPORT_PARAMS)
1763                 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1764
1765  fail:
1766         crypto_free_hash(csums_tfm);
1767         crypto_free_hash(verify_tfm);
1768         kfree(new_conf);
1769         put_net_conf(tconn);
1770         conn_reconfig_done(tconn);
1771  out:
1772         drbd_adm_finish(info, retcode);
1773         return 0;
1774 }
1775
1776 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1777 {
1778         char hmac_name[CRYPTO_MAX_ALG_NAME];
1779         struct drbd_conf *mdev;
1780         struct net_conf *new_conf = NULL;
1781         struct crypto_hash *tfm = NULL;
1782         struct crypto_hash *integrity_w_tfm = NULL;
1783         struct crypto_hash *integrity_r_tfm = NULL;
1784         void *int_dig_in = NULL;
1785         void *int_dig_vv = NULL;
1786         struct drbd_tconn *oconn;
1787         struct drbd_tconn *tconn;
1788         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1789         enum drbd_ret_code retcode;
1790         int i;
1791         int err;
1792
1793         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1794         if (!adm_ctx.reply_skb)
1795                 return retcode;
1796         if (retcode != NO_ERROR)
1797                 goto out;
1798
1799         tconn = adm_ctx.tconn;
1800         conn_reconfig_start(tconn);
1801
1802         if (tconn->cstate > C_STANDALONE) {
1803                 retcode = ERR_NET_CONFIGURED;
1804                 goto fail;
1805         }
1806
1807         /* allocation not in the IO path, cqueue thread context */
1808         new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
1809         if (!new_conf) {
1810                 retcode = ERR_NOMEM;
1811                 goto fail;
1812         }
1813
1814         *new_conf = (struct net_conf) {
1815                 {}, 0, /* my_addr */
1816                 {}, 0, /* peer_addr */
1817                 {}, 0, /* shared_secret */
1818                 {}, 0, /* cram_hmac_alg */
1819                 {}, 0, /* integrity_alg */
1820                 {}, 0, /* verify_alg */
1821                 {}, 0, /* csums_alg */
1822                 DRBD_PROTOCOL_DEF, /* wire_protocol */
1823                 DRBD_CONNECT_INT_DEF, /* try_connect_int */
1824                 DRBD_TIMEOUT_DEF, /* timeout */
1825                 DRBD_PING_INT_DEF, /* ping_int */
1826                 DRBD_PING_TIMEO_DEF, /* ping_timeo */
1827                 DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
1828                 DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
1829                 DRBD_KO_COUNT_DEF, /* ko_count */
1830                 DRBD_MAX_BUFFERS_DEF, /* max_buffers */
1831                 DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
1832                 DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
1833                 DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
1834                 DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
1835                 DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
1836                 DRBD_RR_CONFLICT_DEF, /* rr_conflict */
1837                 DRBD_ON_CONGESTION_DEF, /* on_congestion */
1838                 DRBD_CONG_FILL_DEF, /* cong_fill */
1839                 DRBD_CONG_EXTENTS_DEF, /* cong_extents */
1840                 0, /* two_primaries */
1841                 0, /* want_lose */
1842                 0, /* no_cork */
1843                 0, /* always_asbp */
1844                 0, /* dry_run */
1845                 0, /* use_rle */
1846         };
1847
1848         err = net_conf_from_attrs(new_conf, info);
1849         if (err) {
1850                 retcode = ERR_MANDATORY_TAG;
1851                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1852                 goto fail;
1853         }
1854
1855         if (new_conf->two_primaries
1856             && (new_conf->wire_protocol != DRBD_PROT_C)) {
1857                 retcode = ERR_NOT_PROTO_C;
1858                 goto fail;
1859         }
1860
1861         idr_for_each_entry(&tconn->volumes, mdev, i) {
1862                 if (get_ldev(mdev)) {
1863                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1864                         put_ldev(mdev);
1865                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1866                                 retcode = ERR_STONITH_AND_PROT_A;
1867                                 goto fail;
1868                         }
1869                 }
1870                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1871                         retcode = ERR_DISCARD;
1872                         goto fail;
1873                 }
1874                 if (!mdev->bitmap) {
1875                         if(drbd_bm_init(mdev)) {
1876                                 retcode = ERR_NOMEM;
1877                                 goto fail;
1878                         }
1879                 }
1880         }
1881
1882         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1883                 retcode = ERR_CONG_NOT_PROTO_A;
1884                 goto fail;
1885         }
1886
1887         retcode = NO_ERROR;
1888
1889         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1890         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1891
1892         /* No need to take drbd_cfg_mutex here.  All reconfiguration is
1893          * strictly serialized on genl_lock(). We are protected against
1894          * concurrent reconfiguration/addition/deletion */
1895         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1896                 if (oconn == tconn)
1897                         continue;
1898                 if (get_net_conf(oconn)) {
1899                         taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1900                         if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1901                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1902                                 retcode = ERR_LOCAL_ADDR;
1903
1904                         taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1905                         if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1906                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1907                                 retcode = ERR_PEER_ADDR;
1908
1909                         put_net_conf(oconn);
1910                         if (retcode != NO_ERROR)
1911                                 goto fail;
1912                 }
1913         }
1914
1915         if (new_conf->cram_hmac_alg[0] != 0) {
1916                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1917                         new_conf->cram_hmac_alg);
1918                 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1919                 if (IS_ERR(tfm)) {
1920                         tfm = NULL;
1921                         retcode = ERR_AUTH_ALG;
1922                         goto fail;
1923                 }
1924
1925                 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1926                         retcode = ERR_AUTH_ALG_ND;
1927                         goto fail;
1928                 }
1929         }
1930
1931         if (new_conf->integrity_alg[0]) {
1932                 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1933                 if (IS_ERR(integrity_w_tfm)) {
1934                         integrity_w_tfm = NULL;
1935                         retcode=ERR_INTEGRITY_ALG;
1936                         goto fail;
1937                 }
1938
1939                 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1940                         retcode=ERR_INTEGRITY_ALG_ND;
1941                         goto fail;
1942                 }
1943
1944                 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1945                 if (IS_ERR(integrity_r_tfm)) {
1946                         integrity_r_tfm = NULL;
1947                         retcode=ERR_INTEGRITY_ALG;
1948                         goto fail;
1949                 }
1950         }
1951
1952         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1953
1954         /* allocation not in the IO path, cqueue thread context */
1955         if (integrity_w_tfm) {
1956                 i = crypto_hash_digestsize(integrity_w_tfm);
1957                 int_dig_in = kmalloc(i, GFP_KERNEL);
1958                 if (!int_dig_in) {
1959                         retcode = ERR_NOMEM;
1960                         goto fail;
1961                 }
1962                 int_dig_vv = kmalloc(i, GFP_KERNEL);
1963                 if (!int_dig_vv) {
1964                         retcode = ERR_NOMEM;
1965                         goto fail;
1966                 }
1967         }
1968
1969         conn_flush_workqueue(tconn);
1970         spin_lock_irq(&tconn->req_lock);
1971         if (tconn->net_conf != NULL) {
1972                 retcode = ERR_NET_CONFIGURED;
1973                 spin_unlock_irq(&tconn->req_lock);
1974                 goto fail;
1975         }
1976         tconn->net_conf = new_conf;
1977
1978         crypto_free_hash(tconn->cram_hmac_tfm);
1979         tconn->cram_hmac_tfm = tfm;
1980
1981         crypto_free_hash(tconn->integrity_w_tfm);
1982         tconn->integrity_w_tfm = integrity_w_tfm;
1983
1984         crypto_free_hash(tconn->integrity_r_tfm);
1985         tconn->integrity_r_tfm = integrity_r_tfm;
1986
1987         kfree(tconn->int_dig_in);
1988         kfree(tconn->int_dig_vv);
1989         tconn->int_dig_in=int_dig_in;
1990         tconn->int_dig_vv=int_dig_vv;
1991         retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1992         spin_unlock_irq(&tconn->req_lock);
1993
1994         idr_for_each_entry(&tconn->volumes, mdev, i) {
1995                 mdev->send_cnt = 0;
1996                 mdev->recv_cnt = 0;
1997                 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1998         }
1999         conn_reconfig_done(tconn);
2000         drbd_adm_finish(info, retcode);
2001         return 0;
2002
2003 fail:
2004         kfree(int_dig_in);
2005         kfree(int_dig_vv);
2006         crypto_free_hash(tfm);
2007         crypto_free_hash(integrity_w_tfm);
2008         crypto_free_hash(integrity_r_tfm);
2009         kfree(new_conf);
2010
2011         conn_reconfig_done(tconn);
2012 out:
2013         drbd_adm_finish(info, retcode);
2014         return 0;
2015 }
2016
2017 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2018 {
2019         enum drbd_state_rv rv;
2020         if (force) {
2021                 spin_lock_irq(&tconn->req_lock);
2022                 if (tconn->cstate >= C_WF_CONNECTION)
2023                         _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2024                 spin_unlock_irq(&tconn->req_lock);
2025                 return SS_SUCCESS;
2026         }
2027
2028         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
2029
2030         switch (rv) {
2031         case SS_NOTHING_TO_DO:
2032         case SS_ALREADY_STANDALONE:
2033                 return SS_SUCCESS;
2034         case SS_PRIMARY_NOP:
2035                 /* Our state checking code wants to see the peer outdated. */
2036                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2037                                                         pdsk, D_OUTDATED), CS_VERBOSE);
2038                 break;
2039         case SS_CW_FAILED_BY_PEER:
2040                 /* The peer probably wants to see us outdated. */
2041                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2042                                                         disk, D_OUTDATED), 0);
2043                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2044                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2045                         rv = SS_SUCCESS;
2046                 }
2047                 break;
2048         default:;
2049                 /* no special handling necessary */
2050         }
2051
2052         return rv;
2053 }
2054
2055 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2056 {
2057         struct disconnect_parms parms;
2058         struct drbd_tconn *tconn;
2059         enum drbd_state_rv rv;
2060         enum drbd_ret_code retcode;
2061         int err;
2062
2063         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2064         if (!adm_ctx.reply_skb)
2065                 return retcode;
2066         if (retcode != NO_ERROR)
2067                 goto fail;
2068
2069         tconn = adm_ctx.tconn;
2070         memset(&parms, 0, sizeof(parms));
2071         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2072                 err = disconnect_parms_from_attrs(&parms, info);
2073                 if (err) {
2074                         retcode = ERR_MANDATORY_TAG;
2075                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2076                         goto fail;
2077                 }
2078         }
2079
2080         rv = conn_try_disconnect(tconn, parms.force_disconnect);
2081         if (rv < SS_SUCCESS)
2082                 goto fail;
2083
2084         if (wait_event_interruptible(tconn->ping_wait,
2085                                      tconn->cstate != C_DISCONNECTING)) {
2086                 /* Do not test for mdev->state.conn == C_STANDALONE, since
2087                    someone else might connect us in the mean time! */
2088                 retcode = ERR_INTR;
2089                 goto fail;
2090         }
2091
2092         retcode = NO_ERROR;
2093  fail:
2094         drbd_adm_finish(info, retcode);
2095         return 0;
2096 }
2097
2098 void resync_after_online_grow(struct drbd_conf *mdev)
2099 {
2100         int iass; /* I am sync source */
2101
2102         dev_info(DEV, "Resync of new storage after online grow\n");
2103         if (mdev->state.role != mdev->state.peer)
2104                 iass = (mdev->state.role == R_PRIMARY);
2105         else
2106                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2107
2108         if (iass)
2109                 drbd_start_resync(mdev, C_SYNC_SOURCE);
2110         else
2111                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2112 }
2113
2114 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2115 {
2116         struct resize_parms rs;
2117         struct drbd_conf *mdev;
2118         enum drbd_ret_code retcode;
2119         enum determine_dev_size dd;
2120         enum dds_flags ddsf;
2121         int err;
2122
2123         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2124         if (!adm_ctx.reply_skb)
2125                 return retcode;
2126         if (retcode != NO_ERROR)
2127                 goto fail;
2128
2129         memset(&rs, 0, sizeof(struct resize_parms));
2130         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2131                 err = resize_parms_from_attrs(&rs, info);
2132                 if (err) {
2133                         retcode = ERR_MANDATORY_TAG;
2134                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2135                         goto fail;
2136                 }
2137         }
2138
2139         mdev = adm_ctx.mdev;
2140         if (mdev->state.conn > C_CONNECTED) {
2141                 retcode = ERR_RESIZE_RESYNC;
2142                 goto fail;
2143         }
2144
2145         if (mdev->state.role == R_SECONDARY &&
2146             mdev->state.peer == R_SECONDARY) {
2147                 retcode = ERR_NO_PRIMARY;
2148                 goto fail;
2149         }
2150
2151         if (!get_ldev(mdev)) {
2152                 retcode = ERR_NO_DISK;
2153                 goto fail;
2154         }
2155
2156         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2157                 retcode = ERR_NEED_APV_93;
2158                 goto fail;
2159         }
2160
2161         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2162                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2163
2164         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
2165         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2166         dd = drbd_determine_dev_size(mdev, ddsf);
2167         drbd_md_sync(mdev);
2168         put_ldev(mdev);
2169         if (dd == dev_size_error) {
2170                 retcode = ERR_NOMEM_BITMAP;
2171                 goto fail;
2172         }
2173
2174         if (mdev->state.conn == C_CONNECTED) {
2175                 if (dd == grew)
2176                         set_bit(RESIZE_PENDING, &mdev->flags);
2177
2178                 drbd_send_uuids(mdev);
2179                 drbd_send_sizes(mdev, 1, ddsf);
2180         }
2181
2182  fail:
2183         drbd_adm_finish(info, retcode);
2184         return 0;
2185 }
2186
2187 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2188 {
2189         enum drbd_ret_code retcode;
2190         cpumask_var_t new_cpu_mask;
2191         struct drbd_tconn *tconn;
2192         int *rs_plan_s = NULL;
2193         struct res_opts sc;
2194         int err;
2195
2196         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2197         if (!adm_ctx.reply_skb)
2198                 return retcode;
2199         if (retcode != NO_ERROR)
2200                 goto fail;
2201         tconn = adm_ctx.tconn;
2202
2203         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2204                 retcode = ERR_NOMEM;
2205                 drbd_msg_put_info("unable to allocate cpumask");
2206                 goto fail;
2207         }
2208
2209         if (((struct drbd_genlmsghdr*)info->userhdr)->flags
2210                         & DRBD_GENL_F_SET_DEFAULTS) {
2211                 memset(&sc, 0, sizeof(struct res_opts));
2212                 sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
2213         } else
2214                 sc = tconn->res_opts;
2215
2216         err = res_opts_from_attrs(&sc, info);
2217         if (err) {
2218                 retcode = ERR_MANDATORY_TAG;
2219                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2220                 goto fail;
2221         }
2222
2223         /* silently ignore cpu mask on UP kernel */
2224         if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
2225                 err = __bitmap_parse(sc.cpu_mask, 32, 0,
2226                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2227                 if (err) {
2228                         conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2229                         retcode = ERR_CPU_MASK_PARSE;
2230                         goto fail;
2231                 }
2232         }
2233
2234
2235         tconn->res_opts = sc;
2236
2237         if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2238                 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2239                 drbd_calc_cpu_mask(tconn);
2240                 tconn->receiver.reset_cpu_mask = 1;
2241                 tconn->asender.reset_cpu_mask = 1;
2242                 tconn->worker.reset_cpu_mask = 1;
2243         }
2244
2245 fail:
2246         kfree(rs_plan_s);
2247         free_cpumask_var(new_cpu_mask);
2248
2249         drbd_adm_finish(info, retcode);
2250         return 0;
2251 }
2252
2253 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2254 {
2255         struct drbd_conf *mdev;
2256         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2257
2258         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2259         if (!adm_ctx.reply_skb)
2260                 return retcode;
2261         if (retcode != NO_ERROR)
2262                 goto out;
2263
2264         mdev = adm_ctx.mdev;
2265
2266         /* If there is still bitmap IO pending, probably because of a previous
2267          * resync just being finished, wait for it before requesting a new resync. */
2268         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2269
2270         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2271
2272         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2273                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2274
2275         while (retcode == SS_NEED_CONNECTION) {
2276                 spin_lock_irq(&mdev->tconn->req_lock);
2277                 if (mdev->state.conn < C_CONNECTED)
2278                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2279                 spin_unlock_irq(&mdev->tconn->req_lock);
2280
2281                 if (retcode != SS_NEED_CONNECTION)
2282                         break;
2283
2284                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2285         }
2286
2287 out:
2288         drbd_adm_finish(info, retcode);
2289         return 0;
2290 }
2291
2292 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2293 {
2294         int rv;
2295
2296         rv = drbd_bmio_set_n_write(mdev);
2297         drbd_suspend_al(mdev);
2298         return rv;
2299 }
2300
2301 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2302                 union drbd_state mask, union drbd_state val)
2303 {
2304         enum drbd_ret_code retcode;
2305
2306         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2307         if (!adm_ctx.reply_skb)
2308                 return retcode;
2309         if (retcode != NO_ERROR)
2310                 goto out;
2311
2312         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2313 out:
2314         drbd_adm_finish(info, retcode);
2315         return 0;
2316 }
2317
2318 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2319 {
2320         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2321 }
2322
2323 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2324 {
2325         enum drbd_ret_code retcode;
2326
2327         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2328         if (!adm_ctx.reply_skb)
2329                 return retcode;
2330         if (retcode != NO_ERROR)
2331                 goto out;
2332
2333         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2334                 retcode = ERR_PAUSE_IS_SET;
2335 out:
2336         drbd_adm_finish(info, retcode);
2337         return 0;
2338 }
2339
2340 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2341 {
2342         union drbd_dev_state s;
2343         enum drbd_ret_code retcode;
2344
2345         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2346         if (!adm_ctx.reply_skb)
2347                 return retcode;
2348         if (retcode != NO_ERROR)
2349                 goto out;
2350
2351         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2352                 s = adm_ctx.mdev->state;
2353                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2354                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2355                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2356                 } else {
2357                         retcode = ERR_PAUSE_IS_CLEAR;
2358                 }
2359         }
2360
2361 out:
2362         drbd_adm_finish(info, retcode);
2363         return 0;
2364 }
2365
2366 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2367 {
2368         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2369 }
2370
2371 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2372 {
2373         struct drbd_conf *mdev;
2374         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2375
2376         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2377         if (!adm_ctx.reply_skb)
2378                 return retcode;
2379         if (retcode != NO_ERROR)
2380                 goto out;
2381
2382         mdev = adm_ctx.mdev;
2383         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2384                 drbd_uuid_new_current(mdev);
2385                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2386         }
2387         drbd_suspend_io(mdev);
2388         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2389         if (retcode == SS_SUCCESS) {
2390                 if (mdev->state.conn < C_CONNECTED)
2391                         tl_clear(mdev->tconn);
2392                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2393                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2394         }
2395         drbd_resume_io(mdev);
2396
2397 out:
2398         drbd_adm_finish(info, retcode);
2399         return 0;
2400 }
2401
2402 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2403 {
2404         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2405 }
2406
2407 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2408 {
2409         struct nlattr *nla;
2410         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2411         if (!nla)
2412                 goto nla_put_failure;
2413         if (vnr != VOLUME_UNSPECIFIED)
2414                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2415         NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2416         nla_nest_end(skb, nla);
2417         return 0;
2418
2419 nla_put_failure:
2420         if (nla)
2421                 nla_nest_cancel(skb, nla);
2422         return -EMSGSIZE;
2423 }
2424
2425 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2426                 const struct sib_info *sib)
2427 {
2428         struct state_info *si = NULL; /* for sizeof(si->member); */
2429         struct nlattr *nla;
2430         int got_ldev;
2431         int got_net;
2432         int err = 0;
2433         int exclude_sensitive;
2434
2435         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2436          * to.  So we better exclude_sensitive information.
2437          *
2438          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2439          * in the context of the requesting user process. Exclude sensitive
2440          * information, unless current has superuser.
2441          *
2442          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2443          * relies on the current implementation of netlink_dump(), which
2444          * executes the dump callback successively from netlink_recvmsg(),
2445          * always in the context of the receiving process */
2446         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2447
2448         got_ldev = get_ldev(mdev);
2449         got_net = get_net_conf(mdev->tconn);
2450
2451         /* We need to add connection name and volume number information still.
2452          * Minor number is in drbd_genlmsghdr. */
2453         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2454                 goto nla_put_failure;
2455
2456         if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2457                 goto nla_put_failure;
2458
2459         if (got_ldev)
2460                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2461                         goto nla_put_failure;
2462         if (got_net)
2463                 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2464                         goto nla_put_failure;
2465
2466         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2467         if (!nla)
2468                 goto nla_put_failure;
2469         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2470         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2471         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2472         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2473
2474         if (got_ldev) {
2475                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2476                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2477                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2478                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2479                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2480                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2481                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2482                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2483                 }
2484         }
2485
2486         if (sib) {
2487                 switch(sib->sib_reason) {
2488                 case SIB_SYNC_PROGRESS:
2489                 case SIB_GET_STATUS_REPLY:
2490                         break;
2491                 case SIB_STATE_CHANGE:
2492                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2493                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2494                         break;
2495                 case SIB_HELPER_POST:
2496                         NLA_PUT_U32(skb,
2497                                 T_helper_exit_code, sib->helper_exit_code);
2498                         /* fall through */
2499                 case SIB_HELPER_PRE:
2500                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2501                         break;
2502                 }
2503         }
2504         nla_nest_end(skb, nla);
2505
2506         if (0)
2507 nla_put_failure:
2508                 err = -EMSGSIZE;
2509         if (got_ldev)
2510                 put_ldev(mdev);
2511         if (got_net)
2512                 put_net_conf(mdev->tconn);
2513         return err;
2514 }
2515
2516 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2517 {
2518         enum drbd_ret_code retcode;
2519         int err;
2520
2521         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2522         if (!adm_ctx.reply_skb)
2523                 return retcode;
2524         if (retcode != NO_ERROR)
2525                 goto out;
2526
2527         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2528         if (err) {
2529                 nlmsg_free(adm_ctx.reply_skb);
2530                 return err;
2531         }
2532 out:
2533         drbd_adm_finish(info, retcode);
2534         return 0;
2535 }
2536
2537 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2538 {
2539         struct drbd_conf *mdev;
2540         struct drbd_genlmsghdr *dh;
2541         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2542         struct drbd_tconn *tconn = NULL;
2543         struct drbd_tconn *tmp;
2544         unsigned volume = cb->args[1];
2545
2546         /* Open coded, deferred, iteration:
2547          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2548          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2549          *        ...
2550          *      }
2551          * }
2552          * where tconn is cb->args[0];
2553          * and i is cb->args[1];
2554          *
2555          * This may miss entries inserted after this dump started,
2556          * or entries deleted before they are reached.
2557          *
2558          * We need to make sure the mdev won't disappear while
2559          * we are looking at it, and revalidate our iterators
2560          * on each iteration.
2561          */
2562
2563         /* synchronize with drbd_new_tconn/drbd_free_tconn */
2564         mutex_lock(&drbd_cfg_mutex);
2565         /* synchronize with drbd_delete_device */
2566         rcu_read_lock();
2567 next_tconn:
2568         /* revalidate iterator position */
2569         list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2570                 if (pos == NULL) {
2571                         /* first iteration */
2572                         pos = tmp;
2573                         tconn = pos;
2574                         break;
2575                 }
2576                 if (tmp == pos) {
2577                         tconn = pos;
2578                         break;
2579                 }
2580         }
2581         if (tconn) {
2582                 mdev = idr_get_next(&tconn->volumes, &volume);
2583                 if (!mdev) {
2584                         /* No more volumes to dump on this tconn.
2585                          * Advance tconn iterator. */
2586                         pos = list_entry(tconn->all_tconn.next,
2587                                         struct drbd_tconn, all_tconn);
2588                         /* But, did we dump any volume on this tconn yet? */
2589                         if (volume != 0) {
2590                                 tconn = NULL;
2591                                 volume = 0;
2592                                 goto next_tconn;
2593                         }
2594                 }
2595
2596                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2597                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2598                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2599                 if (!dh)
2600                         goto out;
2601
2602                 if (!mdev) {
2603                         /* this is a tconn without a single volume */
2604                         dh->minor = -1U;
2605                         dh->ret_code = NO_ERROR;
2606                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2607                                 genlmsg_cancel(skb, dh);
2608                         else
2609                                 genlmsg_end(skb, dh);
2610                         goto out;
2611                 }
2612
2613                 D_ASSERT(mdev->vnr == volume);
2614                 D_ASSERT(mdev->tconn == tconn);
2615
2616                 dh->minor = mdev_to_minor(mdev);
2617                 dh->ret_code = NO_ERROR;
2618
2619                 if (nla_put_status_info(skb, mdev, NULL)) {
2620                         genlmsg_cancel(skb, dh);
2621                         goto out;
2622                 }
2623                 genlmsg_end(skb, dh);
2624         }
2625
2626 out:
2627         rcu_read_unlock();
2628         mutex_unlock(&drbd_cfg_mutex);
2629         /* where to start the next iteration */
2630         cb->args[0] = (long)pos;
2631         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2632
2633         /* No more tconns/volumes/minors found results in an empty skb.
2634          * Which will terminate the dump. */
2635         return skb->len;
2636 }
2637
2638 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2639 {
2640         enum drbd_ret_code retcode;
2641         struct timeout_parms tp;
2642         int err;
2643
2644         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2645         if (!adm_ctx.reply_skb)
2646                 return retcode;
2647         if (retcode != NO_ERROR)
2648                 goto out;
2649
2650         tp.timeout_type =
2651                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2652                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2653                 UT_DEFAULT;
2654
2655         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2656         if (err) {
2657                 nlmsg_free(adm_ctx.reply_skb);
2658                 return err;
2659         }
2660 out:
2661         drbd_adm_finish(info, retcode);
2662         return 0;
2663 }
2664
2665 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2666 {
2667         struct drbd_conf *mdev;
2668         enum drbd_ret_code retcode;
2669
2670         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2671         if (!adm_ctx.reply_skb)
2672                 return retcode;
2673         if (retcode != NO_ERROR)
2674                 goto out;
2675
2676         mdev = adm_ctx.mdev;
2677         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2678                 /* resume from last known position, if possible */
2679                 struct start_ov_parms parms =
2680                         { .ov_start_sector = mdev->ov_start_sector };
2681                 int err = start_ov_parms_from_attrs(&parms, info);
2682                 if (err) {
2683                         retcode = ERR_MANDATORY_TAG;
2684                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2685                         goto out;
2686                 }
2687                 /* w_make_ov_request expects position to be aligned */
2688                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2689         }
2690         /* If there is still bitmap IO pending, e.g. previous resync or verify
2691          * just being finished, wait for it before requesting a new resync. */
2692         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2693         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2694 out:
2695         drbd_adm_finish(info, retcode);
2696         return 0;
2697 }
2698
2699
2700 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2701 {
2702         struct drbd_conf *mdev;
2703         enum drbd_ret_code retcode;
2704         int skip_initial_sync = 0;
2705         int err;
2706         struct new_c_uuid_parms args;
2707
2708         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2709         if (!adm_ctx.reply_skb)
2710                 return retcode;
2711         if (retcode != NO_ERROR)
2712                 goto out_nolock;
2713
2714         mdev = adm_ctx.mdev;
2715         memset(&args, 0, sizeof(args));
2716         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2717                 err = new_c_uuid_parms_from_attrs(&args, info);
2718                 if (err) {
2719                         retcode = ERR_MANDATORY_TAG;
2720                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2721                         goto out_nolock;
2722                 }
2723         }
2724
2725         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2726
2727         if (!get_ldev(mdev)) {
2728                 retcode = ERR_NO_DISK;
2729                 goto out;
2730         }
2731
2732         /* this is "skip initial sync", assume to be clean */
2733         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2734             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2735                 dev_info(DEV, "Preparing to skip initial sync\n");
2736                 skip_initial_sync = 1;
2737         } else if (mdev->state.conn != C_STANDALONE) {
2738                 retcode = ERR_CONNECTED;
2739                 goto out_dec;
2740         }
2741
2742         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2743         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2744
2745         if (args.clear_bm) {
2746                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2747                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2748                 if (err) {
2749                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2750                         retcode = ERR_IO_MD_DISK;
2751                 }
2752                 if (skip_initial_sync) {
2753                         drbd_send_uuids_skip_initial_sync(mdev);
2754                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2755                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2756                         spin_lock_irq(&mdev->tconn->req_lock);
2757                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2758                                         CS_VERBOSE, NULL);
2759                         spin_unlock_irq(&mdev->tconn->req_lock);
2760                 }
2761         }
2762
2763         drbd_md_sync(mdev);
2764 out_dec:
2765         put_ldev(mdev);
2766 out:
2767         mutex_unlock(mdev->state_mutex);
2768 out_nolock:
2769         drbd_adm_finish(info, retcode);
2770         return 0;
2771 }
2772
2773 static enum drbd_ret_code
2774 drbd_check_conn_name(const char *name)
2775 {
2776         if (!name || !name[0]) {
2777                 drbd_msg_put_info("connection name missing");
2778                 return ERR_MANDATORY_TAG;
2779         }
2780         /* if we want to use these in sysfs/configfs/debugfs some day,
2781          * we must not allow slashes */
2782         if (strchr(name, '/')) {
2783                 drbd_msg_put_info("invalid connection name");
2784                 return ERR_INVALID_REQUEST;
2785         }
2786         return NO_ERROR;
2787 }
2788
2789 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2790 {
2791         enum drbd_ret_code retcode;
2792
2793         retcode = drbd_adm_prepare(skb, info, 0);
2794         if (!adm_ctx.reply_skb)
2795                 return retcode;
2796         if (retcode != NO_ERROR)
2797                 goto out;
2798
2799         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2800         if (retcode != NO_ERROR)
2801                 goto out;
2802
2803         if (adm_ctx.tconn) {
2804                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2805                         retcode = ERR_INVALID_REQUEST;
2806                         drbd_msg_put_info("connection exists");
2807                 }
2808                 /* else: still NO_ERROR */
2809                 goto out;
2810         }
2811
2812         if (!drbd_new_tconn(adm_ctx.conn_name))
2813                 retcode = ERR_NOMEM;
2814 out:
2815         drbd_adm_finish(info, retcode);
2816         return 0;
2817 }
2818
2819 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2820 {
2821         struct drbd_genlmsghdr *dh = info->userhdr;
2822         enum drbd_ret_code retcode;
2823
2824         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2825         if (!adm_ctx.reply_skb)
2826                 return retcode;
2827         if (retcode != NO_ERROR)
2828                 goto out;
2829
2830         /* FIXME drop minor_count parameter, limit to MINORMASK */
2831         if (dh->minor >= minor_count) {
2832                 drbd_msg_put_info("requested minor out of range");
2833                 retcode = ERR_INVALID_REQUEST;
2834                 goto out;
2835         }
2836         /* FIXME we need a define here */
2837         if (adm_ctx.volume >= 256) {
2838                 drbd_msg_put_info("requested volume id out of range");
2839                 retcode = ERR_INVALID_REQUEST;
2840                 goto out;
2841         }
2842
2843         /* drbd_adm_prepare made sure already
2844          * that mdev->tconn and mdev->vnr match the request. */
2845         if (adm_ctx.mdev) {
2846                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2847                         retcode = ERR_MINOR_EXISTS;
2848                 /* else: still NO_ERROR */
2849                 goto out;
2850         }
2851
2852         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2853 out:
2854         drbd_adm_finish(info, retcode);
2855         return 0;
2856 }
2857
2858 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2859 {
2860         if (mdev->state.disk == D_DISKLESS &&
2861             /* no need to be mdev->state.conn == C_STANDALONE &&
2862              * we may want to delete a minor from a live replication group.
2863              */
2864             mdev->state.role == R_SECONDARY) {
2865                 drbd_delete_device(mdev_to_minor(mdev));
2866                 return NO_ERROR;
2867         } else
2868                 return ERR_MINOR_CONFIGURED;
2869 }
2870
2871 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2872 {
2873         enum drbd_ret_code retcode;
2874
2875         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2876         if (!adm_ctx.reply_skb)
2877                 return retcode;
2878         if (retcode != NO_ERROR)
2879                 goto out;
2880
2881         mutex_lock(&drbd_cfg_mutex);
2882         retcode = adm_delete_minor(adm_ctx.mdev);
2883         mutex_unlock(&drbd_cfg_mutex);
2884         /* if this was the last volume of this connection,
2885          * this will terminate all threads */
2886         if (retcode == NO_ERROR)
2887                 conn_reconfig_done(adm_ctx.tconn);
2888 out:
2889         drbd_adm_finish(info, retcode);
2890         return 0;
2891 }
2892
2893 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
2894 {
2895         enum drbd_ret_code retcode;
2896         enum drbd_state_rv rv;
2897         struct drbd_conf *mdev;
2898         unsigned i;
2899
2900         retcode = drbd_adm_prepare(skb, info, 0);
2901         if (!adm_ctx.reply_skb)
2902                 return retcode;
2903         if (retcode != NO_ERROR)
2904                 goto out;
2905
2906         if (!adm_ctx.tconn) {
2907                 retcode = ERR_CONN_NOT_KNOWN;
2908                 goto out;
2909         }
2910
2911         mutex_lock(&drbd_cfg_mutex);
2912         /* demote */
2913         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2914                 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
2915                 if (retcode < SS_SUCCESS) {
2916                         drbd_msg_put_info("failed to demote");
2917                         goto out_unlock;
2918                 }
2919         }
2920
2921         /* disconnect */
2922         rv = conn_try_disconnect(adm_ctx.tconn, 0);
2923         if (rv < SS_SUCCESS) {
2924                 retcode = rv; /* enum type mismatch! */
2925                 drbd_msg_put_info("failed to disconnect");
2926                 goto out_unlock;
2927         }
2928
2929         /* detach */
2930         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2931                 rv = adm_detach(mdev);
2932                 if (rv < SS_SUCCESS) {
2933                         retcode = rv; /* enum type mismatch! */
2934                         drbd_msg_put_info("failed to detach");
2935                         goto out_unlock;
2936                 }
2937         }
2938
2939         /* delete volumes */
2940         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2941                 retcode = adm_delete_minor(mdev);
2942                 if (retcode != NO_ERROR) {
2943                         /* "can not happen" */
2944                         drbd_msg_put_info("failed to delete volume");
2945                         goto out_unlock;
2946                 }
2947         }
2948
2949         /* stop all threads */
2950         conn_reconfig_done(adm_ctx.tconn);
2951
2952         /* delete connection */
2953         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2954                 drbd_free_tconn(adm_ctx.tconn);
2955                 retcode = NO_ERROR;
2956         } else {
2957                 /* "can not happen" */
2958                 retcode = ERR_CONN_IN_USE;
2959                 drbd_msg_put_info("failed to delete connection");
2960                 goto out_unlock;
2961         }
2962 out_unlock:
2963         mutex_unlock(&drbd_cfg_mutex);
2964 out:
2965         drbd_adm_finish(info, retcode);
2966         return 0;
2967 }
2968
2969 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
2970 {
2971         enum drbd_ret_code retcode;
2972
2973         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2974         if (!adm_ctx.reply_skb)
2975                 return retcode;
2976         if (retcode != NO_ERROR)
2977                 goto out;
2978
2979         mutex_lock(&drbd_cfg_mutex);
2980         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2981                 drbd_free_tconn(adm_ctx.tconn);
2982                 retcode = NO_ERROR;
2983         } else {
2984                 retcode = ERR_CONN_IN_USE;
2985         }
2986         mutex_unlock(&drbd_cfg_mutex);
2987
2988 out:
2989         drbd_adm_finish(info, retcode);
2990         return 0;
2991 }
2992
2993 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
2994 {
2995         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
2996         struct sk_buff *msg;
2997         struct drbd_genlmsghdr *d_out;
2998         unsigned seq;
2999         int err = -ENOMEM;
3000
3001         seq = atomic_inc_return(&drbd_genl_seq);
3002         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3003         if (!msg)
3004                 goto failed;
3005
3006         err = -EMSGSIZE;
3007         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3008         if (!d_out) /* cannot happen, but anyways. */
3009                 goto nla_put_failure;
3010         d_out->minor = mdev_to_minor(mdev);
3011         d_out->ret_code = 0;
3012
3013         if (nla_put_status_info(msg, mdev, sib))
3014                 goto nla_put_failure;
3015         genlmsg_end(msg, d_out);
3016         err = drbd_genl_multicast_events(msg, 0);
3017         /* msg has been consumed or freed in netlink_broadcast() */
3018         if (err && err != -ESRCH)
3019                 goto failed;
3020
3021         return;
3022
3023 nla_put_failure:
3024         nlmsg_free(msg);
3025 failed:
3026         dev_err(DEV, "Error %d while broadcasting event. "
3027                         "Event seq:%u sib_reason:%u\n",
3028                         err, seq, sib->sib_reason);
3029 }