drbd: Backport the "events2" command
[cascardo/linux.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
27
28 #include <linux/module.h>
29 #include <linux/drbd.h>
30 #include <linux/in.h>
31 #include <linux/fs.h>
32 #include <linux/file.h>
33 #include <linux/slab.h>
34 #include <linux/blkpg.h>
35 #include <linux/cpumask.h>
36 #include "drbd_int.h"
37 #include "drbd_protocol.h"
38 #include "drbd_req.h"
39 #include "drbd_state_change.h"
40 #include <asm/unaligned.h>
41 #include <linux/drbd_limits.h>
42 #include <linux/kthread.h>
43
44 #include <net/genetlink.h>
45
46 /* .doit */
47 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
48 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
52
53 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
56
57 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
74 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
76 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
77 /* .dumpit */
78 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
79 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
80
81 #include <linux/drbd_genl_api.h>
82 #include "drbd_nla.h"
83 #include <linux/genl_magic_func.h>
84
85 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
86 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
87
88 DEFINE_MUTEX(notification_mutex);
89
90 /* used blkdev_get_by_path, to claim our meta data device(s) */
91 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
92
93 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
94 {
95         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
96         if (genlmsg_reply(skb, info))
97                 pr_err("error sending genl reply\n");
98 }
99
100 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
101  * reason it could fail was no space in skb, and there are 4k available. */
102 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
103 {
104         struct nlattr *nla;
105         int err = -EMSGSIZE;
106
107         if (!info || !info[0])
108                 return 0;
109
110         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
111         if (!nla)
112                 return err;
113
114         err = nla_put_string(skb, T_info_text, info);
115         if (err) {
116                 nla_nest_cancel(skb, nla);
117                 return err;
118         } else
119                 nla_nest_end(skb, nla);
120         return 0;
121 }
122
123 /* This would be a good candidate for a "pre_doit" hook,
124  * and per-family private info->pointers.
125  * But we need to stay compatible with older kernels.
126  * If it returns successfully, adm_ctx members are valid.
127  *
128  * At this point, we still rely on the global genl_lock().
129  * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
130  * to add additional synchronization against object destruction/modification.
131  */
132 #define DRBD_ADM_NEED_MINOR     1
133 #define DRBD_ADM_NEED_RESOURCE  2
134 #define DRBD_ADM_NEED_CONNECTION 4
135 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
136         struct sk_buff *skb, struct genl_info *info, unsigned flags)
137 {
138         struct drbd_genlmsghdr *d_in = info->userhdr;
139         const u8 cmd = info->genlhdr->cmd;
140         int err;
141
142         memset(adm_ctx, 0, sizeof(*adm_ctx));
143
144         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
145         if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
146                return -EPERM;
147
148         adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
149         if (!adm_ctx->reply_skb) {
150                 err = -ENOMEM;
151                 goto fail;
152         }
153
154         adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
155                                         info, &drbd_genl_family, 0, cmd);
156         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
157          * but anyways */
158         if (!adm_ctx->reply_dh) {
159                 err = -ENOMEM;
160                 goto fail;
161         }
162
163         adm_ctx->reply_dh->minor = d_in->minor;
164         adm_ctx->reply_dh->ret_code = NO_ERROR;
165
166         adm_ctx->volume = VOLUME_UNSPECIFIED;
167         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
168                 struct nlattr *nla;
169                 /* parse and validate only */
170                 err = drbd_cfg_context_from_attrs(NULL, info);
171                 if (err)
172                         goto fail;
173
174                 /* It was present, and valid,
175                  * copy it over to the reply skb. */
176                 err = nla_put_nohdr(adm_ctx->reply_skb,
177                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
178                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
179                 if (err)
180                         goto fail;
181
182                 /* and assign stuff to the adm_ctx */
183                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
184                 if (nla)
185                         adm_ctx->volume = nla_get_u32(nla);
186                 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
187                 if (nla)
188                         adm_ctx->resource_name = nla_data(nla);
189                 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
190                 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
191                 if ((adm_ctx->my_addr &&
192                      nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
193                     (adm_ctx->peer_addr &&
194                      nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
195                         err = -EINVAL;
196                         goto fail;
197                 }
198         }
199
200         adm_ctx->minor = d_in->minor;
201         adm_ctx->device = minor_to_device(d_in->minor);
202
203         /* We are protected by the global genl_lock().
204          * But we may explicitly drop it/retake it in drbd_adm_set_role(),
205          * so make sure this object stays around. */
206         if (adm_ctx->device)
207                 kref_get(&adm_ctx->device->kref);
208
209         if (adm_ctx->resource_name) {
210                 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
211         }
212
213         if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
214                 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
215                 return ERR_MINOR_INVALID;
216         }
217         if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
218                 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
219                 if (adm_ctx->resource_name)
220                         return ERR_RES_NOT_KNOWN;
221                 return ERR_INVALID_REQUEST;
222         }
223
224         if (flags & DRBD_ADM_NEED_CONNECTION) {
225                 if (adm_ctx->resource) {
226                         drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
227                         return ERR_INVALID_REQUEST;
228                 }
229                 if (adm_ctx->device) {
230                         drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
231                         return ERR_INVALID_REQUEST;
232                 }
233                 if (adm_ctx->my_addr && adm_ctx->peer_addr)
234                         adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
235                                                           nla_len(adm_ctx->my_addr),
236                                                           nla_data(adm_ctx->peer_addr),
237                                                           nla_len(adm_ctx->peer_addr));
238                 if (!adm_ctx->connection) {
239                         drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
240                         return ERR_INVALID_REQUEST;
241                 }
242         }
243
244         /* some more paranoia, if the request was over-determined */
245         if (adm_ctx->device && adm_ctx->resource &&
246             adm_ctx->device->resource != adm_ctx->resource) {
247                 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
248                                 adm_ctx->minor, adm_ctx->resource->name,
249                                 adm_ctx->device->resource->name);
250                 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
251                 return ERR_INVALID_REQUEST;
252         }
253         if (adm_ctx->device &&
254             adm_ctx->volume != VOLUME_UNSPECIFIED &&
255             adm_ctx->volume != adm_ctx->device->vnr) {
256                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
257                                 adm_ctx->minor, adm_ctx->volume,
258                                 adm_ctx->device->vnr,
259                                 adm_ctx->device->resource->name);
260                 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
261                 return ERR_INVALID_REQUEST;
262         }
263
264         /* still, provide adm_ctx->resource always, if possible. */
265         if (!adm_ctx->resource) {
266                 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
267                         : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
268                 if (adm_ctx->resource)
269                         kref_get(&adm_ctx->resource->kref);
270         }
271
272         return NO_ERROR;
273
274 fail:
275         nlmsg_free(adm_ctx->reply_skb);
276         adm_ctx->reply_skb = NULL;
277         return err;
278 }
279
280 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
281         struct genl_info *info, int retcode)
282 {
283         if (adm_ctx->device) {
284                 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
285                 adm_ctx->device = NULL;
286         }
287         if (adm_ctx->connection) {
288                 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
289                 adm_ctx->connection = NULL;
290         }
291         if (adm_ctx->resource) {
292                 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
293                 adm_ctx->resource = NULL;
294         }
295
296         if (!adm_ctx->reply_skb)
297                 return -ENOMEM;
298
299         adm_ctx->reply_dh->ret_code = retcode;
300         drbd_adm_send_reply(adm_ctx->reply_skb, info);
301         return 0;
302 }
303
304 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
305 {
306         char *afs;
307
308         /* FIXME: A future version will not allow this case. */
309         if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
310                 return;
311
312         switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
313         case AF_INET6:
314                 afs = "ipv6";
315                 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
316                          &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
317                 break;
318         case AF_INET:
319                 afs = "ipv4";
320                 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
321                          &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
322                 break;
323         default:
324                 afs = "ssocks";
325                 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
326                          &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
327         }
328         snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
329 }
330
331 int drbd_khelper(struct drbd_device *device, char *cmd)
332 {
333         char *envp[] = { "HOME=/",
334                         "TERM=linux",
335                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
336                          (char[20]) { }, /* address family */
337                          (char[60]) { }, /* address */
338                         NULL };
339         char mb[12];
340         char *argv[] = {usermode_helper, cmd, mb, NULL };
341         struct drbd_connection *connection = first_peer_device(device)->connection;
342         struct sib_info sib;
343         int ret;
344
345         if (current == connection->worker.task)
346                 set_bit(CALLBACK_PENDING, &connection->flags);
347
348         snprintf(mb, 12, "minor-%d", device_to_minor(device));
349         setup_khelper_env(connection, envp);
350
351         /* The helper may take some time.
352          * write out any unsynced meta data changes now */
353         drbd_md_sync(device);
354
355         drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
356         sib.sib_reason = SIB_HELPER_PRE;
357         sib.helper_name = cmd;
358         drbd_bcast_event(device, &sib);
359         notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
360         ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
361         if (ret)
362                 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
363                                 usermode_helper, cmd, mb,
364                                 (ret >> 8) & 0xff, ret);
365         else
366                 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
367                                 usermode_helper, cmd, mb,
368                                 (ret >> 8) & 0xff, ret);
369         sib.sib_reason = SIB_HELPER_POST;
370         sib.helper_exit_code = ret;
371         drbd_bcast_event(device, &sib);
372         notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
373
374         if (current == connection->worker.task)
375                 clear_bit(CALLBACK_PENDING, &connection->flags);
376
377         if (ret < 0) /* Ignore any ERRNOs we got. */
378                 ret = 0;
379
380         return ret;
381 }
382
383 static int conn_khelper(struct drbd_connection *connection, char *cmd)
384 {
385         char *envp[] = { "HOME=/",
386                         "TERM=linux",
387                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
388                          (char[20]) { }, /* address family */
389                          (char[60]) { }, /* address */
390                         NULL };
391         char *resource_name = connection->resource->name;
392         char *argv[] = {usermode_helper, cmd, resource_name, NULL };
393         int ret;
394
395         setup_khelper_env(connection, envp);
396         conn_md_sync(connection);
397
398         drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
399         /* TODO: conn_bcast_event() ?? */
400         notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
401
402         ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
403         if (ret)
404                 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
405                           usermode_helper, cmd, resource_name,
406                           (ret >> 8) & 0xff, ret);
407         else
408                 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
409                           usermode_helper, cmd, resource_name,
410                           (ret >> 8) & 0xff, ret);
411         /* TODO: conn_bcast_event() ?? */
412         notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
413
414         if (ret < 0) /* Ignore any ERRNOs we got. */
415                 ret = 0;
416
417         return ret;
418 }
419
420 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
421 {
422         enum drbd_fencing_p fp = FP_NOT_AVAIL;
423         struct drbd_peer_device *peer_device;
424         int vnr;
425
426         rcu_read_lock();
427         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
428                 struct drbd_device *device = peer_device->device;
429                 if (get_ldev_if_state(device, D_CONSISTENT)) {
430                         struct disk_conf *disk_conf =
431                                 rcu_dereference(peer_device->device->ldev->disk_conf);
432                         fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
433                         put_ldev(device);
434                 }
435         }
436         rcu_read_unlock();
437
438         if (fp == FP_NOT_AVAIL) {
439                 /* IO Suspending works on the whole resource.
440                    Do it only for one device. */
441                 vnr = 0;
442                 peer_device = idr_get_next(&connection->peer_devices, &vnr);
443                 drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0));
444         }
445
446         return fp;
447 }
448
449 bool conn_try_outdate_peer(struct drbd_connection *connection)
450 {
451         unsigned int connect_cnt;
452         union drbd_state mask = { };
453         union drbd_state val = { };
454         enum drbd_fencing_p fp;
455         char *ex_to_string;
456         int r;
457
458         spin_lock_irq(&connection->resource->req_lock);
459         if (connection->cstate >= C_WF_REPORT_PARAMS) {
460                 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
461                 spin_unlock_irq(&connection->resource->req_lock);
462                 return false;
463         }
464
465         connect_cnt = connection->connect_cnt;
466         spin_unlock_irq(&connection->resource->req_lock);
467
468         fp = highest_fencing_policy(connection);
469         switch (fp) {
470         case FP_NOT_AVAIL:
471                 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
472                 goto out;
473         case FP_DONT_CARE:
474                 return true;
475         default: ;
476         }
477
478         r = conn_khelper(connection, "fence-peer");
479
480         switch ((r>>8) & 0xff) {
481         case 3: /* peer is inconsistent */
482                 ex_to_string = "peer is inconsistent or worse";
483                 mask.pdsk = D_MASK;
484                 val.pdsk = D_INCONSISTENT;
485                 break;
486         case 4: /* peer got outdated, or was already outdated */
487                 ex_to_string = "peer was fenced";
488                 mask.pdsk = D_MASK;
489                 val.pdsk = D_OUTDATED;
490                 break;
491         case 5: /* peer was down */
492                 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
493                         /* we will(have) create(d) a new UUID anyways... */
494                         ex_to_string = "peer is unreachable, assumed to be dead";
495                         mask.pdsk = D_MASK;
496                         val.pdsk = D_OUTDATED;
497                 } else {
498                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
499                 }
500                 break;
501         case 6: /* Peer is primary, voluntarily outdate myself.
502                  * This is useful when an unconnected R_SECONDARY is asked to
503                  * become R_PRIMARY, but finds the other peer being active. */
504                 ex_to_string = "peer is active";
505                 drbd_warn(connection, "Peer is primary, outdating myself.\n");
506                 mask.disk = D_MASK;
507                 val.disk = D_OUTDATED;
508                 break;
509         case 7:
510                 if (fp != FP_STONITH)
511                         drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
512                 ex_to_string = "peer was stonithed";
513                 mask.pdsk = D_MASK;
514                 val.pdsk = D_OUTDATED;
515                 break;
516         default:
517                 /* The script is broken ... */
518                 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
519                 return false; /* Eventually leave IO frozen */
520         }
521
522         drbd_info(connection, "fence-peer helper returned %d (%s)\n",
523                   (r>>8) & 0xff, ex_to_string);
524
525  out:
526
527         /* Not using
528            conn_request_state(connection, mask, val, CS_VERBOSE);
529            here, because we might were able to re-establish the connection in the
530            meantime. */
531         spin_lock_irq(&connection->resource->req_lock);
532         if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
533                 if (connection->connect_cnt != connect_cnt)
534                         /* In case the connection was established and droped
535                            while the fence-peer handler was running, ignore it */
536                         drbd_info(connection, "Ignoring fence-peer exit code\n");
537                 else
538                         _conn_request_state(connection, mask, val, CS_VERBOSE);
539         }
540         spin_unlock_irq(&connection->resource->req_lock);
541
542         return conn_highest_pdsk(connection) <= D_OUTDATED;
543 }
544
545 static int _try_outdate_peer_async(void *data)
546 {
547         struct drbd_connection *connection = (struct drbd_connection *)data;
548
549         conn_try_outdate_peer(connection);
550
551         kref_put(&connection->kref, drbd_destroy_connection);
552         return 0;
553 }
554
555 void conn_try_outdate_peer_async(struct drbd_connection *connection)
556 {
557         struct task_struct *opa;
558
559         kref_get(&connection->kref);
560         /* We may just have force_sig()'ed this thread
561          * to get it out of some blocking network function.
562          * Clear signals; otherwise kthread_run(), which internally uses
563          * wait_on_completion_killable(), will mistake our pending signal
564          * for a new fatal signal and fail. */
565         flush_signals(current);
566         opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
567         if (IS_ERR(opa)) {
568                 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
569                 kref_put(&connection->kref, drbd_destroy_connection);
570         }
571 }
572
573 enum drbd_state_rv
574 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
575 {
576         struct drbd_peer_device *const peer_device = first_peer_device(device);
577         struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
578         const int max_tries = 4;
579         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
580         struct net_conf *nc;
581         int try = 0;
582         int forced = 0;
583         union drbd_state mask, val;
584
585         if (new_role == R_PRIMARY) {
586                 struct drbd_connection *connection;
587
588                 /* Detect dead peers as soon as possible.  */
589
590                 rcu_read_lock();
591                 for_each_connection(connection, device->resource)
592                         request_ping(connection);
593                 rcu_read_unlock();
594         }
595
596         mutex_lock(device->state_mutex);
597
598         mask.i = 0; mask.role = R_MASK;
599         val.i  = 0; val.role  = new_role;
600
601         while (try++ < max_tries) {
602                 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
603
604                 /* in case we first succeeded to outdate,
605                  * but now suddenly could establish a connection */
606                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
607                         val.pdsk = 0;
608                         mask.pdsk = 0;
609                         continue;
610                 }
611
612                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
613                     (device->state.disk < D_UP_TO_DATE &&
614                      device->state.disk >= D_INCONSISTENT)) {
615                         mask.disk = D_MASK;
616                         val.disk  = D_UP_TO_DATE;
617                         forced = 1;
618                         continue;
619                 }
620
621                 if (rv == SS_NO_UP_TO_DATE_DISK &&
622                     device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
623                         D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
624
625                         if (conn_try_outdate_peer(connection)) {
626                                 val.disk = D_UP_TO_DATE;
627                                 mask.disk = D_MASK;
628                         }
629                         continue;
630                 }
631
632                 if (rv == SS_NOTHING_TO_DO)
633                         goto out;
634                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
635                         if (!conn_try_outdate_peer(connection) && force) {
636                                 drbd_warn(device, "Forced into split brain situation!\n");
637                                 mask.pdsk = D_MASK;
638                                 val.pdsk  = D_OUTDATED;
639
640                         }
641                         continue;
642                 }
643                 if (rv == SS_TWO_PRIMARIES) {
644                         /* Maybe the peer is detected as dead very soon...
645                            retry at most once more in this case. */
646                         int timeo;
647                         rcu_read_lock();
648                         nc = rcu_dereference(connection->net_conf);
649                         timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
650                         rcu_read_unlock();
651                         schedule_timeout_interruptible(timeo);
652                         if (try < max_tries)
653                                 try = max_tries - 1;
654                         continue;
655                 }
656                 if (rv < SS_SUCCESS) {
657                         rv = _drbd_request_state(device, mask, val,
658                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
659                         if (rv < SS_SUCCESS)
660                                 goto out;
661                 }
662                 break;
663         }
664
665         if (rv < SS_SUCCESS)
666                 goto out;
667
668         if (forced)
669                 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
670
671         /* Wait until nothing is on the fly :) */
672         wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
673
674         /* FIXME also wait for all pending P_BARRIER_ACK? */
675
676         if (new_role == R_SECONDARY) {
677                 if (get_ldev(device)) {
678                         device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
679                         put_ldev(device);
680                 }
681         } else {
682                 mutex_lock(&device->resource->conf_update);
683                 nc = connection->net_conf;
684                 if (nc)
685                         nc->discard_my_data = 0; /* without copy; single bit op is atomic */
686                 mutex_unlock(&device->resource->conf_update);
687
688                 if (get_ldev(device)) {
689                         if (((device->state.conn < C_CONNECTED ||
690                                device->state.pdsk <= D_FAILED)
691                               && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
692                                 drbd_uuid_new_current(device);
693
694                         device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
695                         put_ldev(device);
696                 }
697         }
698
699         /* writeout of activity log covered areas of the bitmap
700          * to stable storage done in after state change already */
701
702         if (device->state.conn >= C_WF_REPORT_PARAMS) {
703                 /* if this was forced, we should consider sync */
704                 if (forced)
705                         drbd_send_uuids(peer_device);
706                 drbd_send_current_state(peer_device);
707         }
708
709         drbd_md_sync(device);
710         set_disk_ro(device->vdisk, new_role == R_SECONDARY);
711         kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
712 out:
713         mutex_unlock(device->state_mutex);
714         return rv;
715 }
716
717 static const char *from_attrs_err_to_txt(int err)
718 {
719         return  err == -ENOMSG ? "required attribute missing" :
720                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
721                 err == -EEXIST ? "can not change invariant setting" :
722                 "invalid attribute value";
723 }
724
725 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
726 {
727         struct drbd_config_context adm_ctx;
728         struct set_role_parms parms;
729         int err;
730         enum drbd_ret_code retcode;
731
732         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
733         if (!adm_ctx.reply_skb)
734                 return retcode;
735         if (retcode != NO_ERROR)
736                 goto out;
737
738         memset(&parms, 0, sizeof(parms));
739         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
740                 err = set_role_parms_from_attrs(&parms, info);
741                 if (err) {
742                         retcode = ERR_MANDATORY_TAG;
743                         drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
744                         goto out;
745                 }
746         }
747         genl_unlock();
748         mutex_lock(&adm_ctx.resource->adm_mutex);
749
750         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
751                 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
752         else
753                 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
754
755         mutex_unlock(&adm_ctx.resource->adm_mutex);
756         genl_lock();
757 out:
758         drbd_adm_finish(&adm_ctx, info, retcode);
759         return 0;
760 }
761
762 /* Initializes the md.*_offset members, so we are able to find
763  * the on disk meta data.
764  *
765  * We currently have two possible layouts:
766  * external:
767  *   |----------- md_size_sect ------------------|
768  *   [ 4k superblock ][ activity log ][  Bitmap  ]
769  *   | al_offset == 8 |
770  *   | bm_offset = al_offset + X      |
771  *  ==> bitmap sectors = md_size_sect - bm_offset
772  *
773  * internal:
774  *            |----------- md_size_sect ------------------|
775  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ]
776  *                        | al_offset < 0 |
777  *            | bm_offset = al_offset - Y |
778  *  ==> bitmap sectors = Y = al_offset - bm_offset
779  *
780  *  Activity log size used to be fixed 32kB,
781  *  but is about to become configurable.
782  */
783 static void drbd_md_set_sector_offsets(struct drbd_device *device,
784                                        struct drbd_backing_dev *bdev)
785 {
786         sector_t md_size_sect = 0;
787         unsigned int al_size_sect = bdev->md.al_size_4k * 8;
788
789         bdev->md.md_offset = drbd_md_ss(bdev);
790
791         switch (bdev->md.meta_dev_idx) {
792         default:
793                 /* v07 style fixed size indexed meta data */
794                 bdev->md.md_size_sect = MD_128MB_SECT;
795                 bdev->md.al_offset = MD_4kB_SECT;
796                 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
797                 break;
798         case DRBD_MD_INDEX_FLEX_EXT:
799                 /* just occupy the full device; unit: sectors */
800                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
801                 bdev->md.al_offset = MD_4kB_SECT;
802                 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
803                 break;
804         case DRBD_MD_INDEX_INTERNAL:
805         case DRBD_MD_INDEX_FLEX_INT:
806                 /* al size is still fixed */
807                 bdev->md.al_offset = -al_size_sect;
808                 /* we need (slightly less than) ~ this much bitmap sectors: */
809                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
810                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
811                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
812                 md_size_sect = ALIGN(md_size_sect, 8);
813
814                 /* plus the "drbd meta data super block",
815                  * and the activity log; */
816                 md_size_sect += MD_4kB_SECT + al_size_sect;
817
818                 bdev->md.md_size_sect = md_size_sect;
819                 /* bitmap offset is adjusted by 'super' block size */
820                 bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
821                 break;
822         }
823 }
824
825 /* input size is expected to be in KB */
826 char *ppsize(char *buf, unsigned long long size)
827 {
828         /* Needs 9 bytes at max including trailing NUL:
829          * -1ULL ==> "16384 EB" */
830         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
831         int base = 0;
832         while (size >= 10000 && base < sizeof(units)-1) {
833                 /* shift + round */
834                 size = (size >> 10) + !!(size & (1<<9));
835                 base++;
836         }
837         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
838
839         return buf;
840 }
841
842 /* there is still a theoretical deadlock when called from receiver
843  * on an D_INCONSISTENT R_PRIMARY:
844  *  remote READ does inc_ap_bio, receiver would need to receive answer
845  *  packet from remote to dec_ap_bio again.
846  *  receiver receive_sizes(), comes here,
847  *  waits for ap_bio_cnt == 0. -> deadlock.
848  * but this cannot happen, actually, because:
849  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
850  *  (not connected, or bad/no disk on peer):
851  *  see drbd_fail_request_early, ap_bio_cnt is zero.
852  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
853  *  peer may not initiate a resize.
854  */
855 /* Note these are not to be confused with
856  * drbd_adm_suspend_io/drbd_adm_resume_io,
857  * which are (sub) state changes triggered by admin (drbdsetup),
858  * and can be long lived.
859  * This changes an device->flag, is triggered by drbd internals,
860  * and should be short-lived. */
861 void drbd_suspend_io(struct drbd_device *device)
862 {
863         set_bit(SUSPEND_IO, &device->flags);
864         if (drbd_suspended(device))
865                 return;
866         wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
867 }
868
869 void drbd_resume_io(struct drbd_device *device)
870 {
871         clear_bit(SUSPEND_IO, &device->flags);
872         wake_up(&device->misc_wait);
873 }
874
875 /**
876  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
877  * @device:     DRBD device.
878  *
879  * Returns 0 on success, negative return values indicate errors.
880  * You should call drbd_md_sync() after calling this function.
881  */
882 enum determine_dev_size
883 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
884 {
885         sector_t prev_first_sect, prev_size; /* previous meta location */
886         sector_t la_size_sect, u_size;
887         struct drbd_md *md = &device->ldev->md;
888         u32 prev_al_stripe_size_4k;
889         u32 prev_al_stripes;
890         sector_t size;
891         char ppb[10];
892         void *buffer;
893
894         int md_moved, la_size_changed;
895         enum determine_dev_size rv = DS_UNCHANGED;
896
897         /* race:
898          * application request passes inc_ap_bio,
899          * but then cannot get an AL-reference.
900          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
901          *
902          * to avoid that:
903          * Suspend IO right here.
904          * still lock the act_log to not trigger ASSERTs there.
905          */
906         drbd_suspend_io(device);
907         buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
908         if (!buffer) {
909                 drbd_resume_io(device);
910                 return DS_ERROR;
911         }
912
913         /* no wait necessary anymore, actually we could assert that */
914         wait_event(device->al_wait, lc_try_lock(device->act_log));
915
916         prev_first_sect = drbd_md_first_sector(device->ldev);
917         prev_size = device->ldev->md.md_size_sect;
918         la_size_sect = device->ldev->md.la_size_sect;
919
920         if (rs) {
921                 /* rs is non NULL if we should change the AL layout only */
922
923                 prev_al_stripes = md->al_stripes;
924                 prev_al_stripe_size_4k = md->al_stripe_size_4k;
925
926                 md->al_stripes = rs->al_stripes;
927                 md->al_stripe_size_4k = rs->al_stripe_size / 4;
928                 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
929         }
930
931         drbd_md_set_sector_offsets(device, device->ldev);
932
933         rcu_read_lock();
934         u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
935         rcu_read_unlock();
936         size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
937
938         if (size < la_size_sect) {
939                 if (rs && u_size == 0) {
940                         /* Remove "rs &&" later. This check should always be active, but
941                            right now the receiver expects the permissive behavior */
942                         drbd_warn(device, "Implicit shrink not allowed. "
943                                  "Use --size=%llus for explicit shrink.\n",
944                                  (unsigned long long)size);
945                         rv = DS_ERROR_SHRINK;
946                 }
947                 if (u_size > size)
948                         rv = DS_ERROR_SPACE_MD;
949                 if (rv != DS_UNCHANGED)
950                         goto err_out;
951         }
952
953         if (drbd_get_capacity(device->this_bdev) != size ||
954             drbd_bm_capacity(device) != size) {
955                 int err;
956                 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
957                 if (unlikely(err)) {
958                         /* currently there is only one error: ENOMEM! */
959                         size = drbd_bm_capacity(device)>>1;
960                         if (size == 0) {
961                                 drbd_err(device, "OUT OF MEMORY! "
962                                     "Could not allocate bitmap!\n");
963                         } else {
964                                 drbd_err(device, "BM resizing failed. "
965                                     "Leaving size unchanged at size = %lu KB\n",
966                                     (unsigned long)size);
967                         }
968                         rv = DS_ERROR;
969                 }
970                 /* racy, see comments above. */
971                 drbd_set_my_capacity(device, size);
972                 device->ldev->md.la_size_sect = size;
973                 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
974                      (unsigned long long)size>>1);
975         }
976         if (rv <= DS_ERROR)
977                 goto err_out;
978
979         la_size_changed = (la_size_sect != device->ldev->md.la_size_sect);
980
981         md_moved = prev_first_sect != drbd_md_first_sector(device->ldev)
982                 || prev_size       != device->ldev->md.md_size_sect;
983
984         if (la_size_changed || md_moved || rs) {
985                 u32 prev_flags;
986
987                 /* We do some synchronous IO below, which may take some time.
988                  * Clear the timer, to avoid scary "timer expired!" messages,
989                  * "Superblock" is written out at least twice below, anyways. */
990                 del_timer(&device->md_sync_timer);
991                 drbd_al_shrink(device); /* All extents inactive. */
992
993                 prev_flags = md->flags;
994                 md->flags &= ~MDF_PRIMARY_IND;
995                 drbd_md_write(device, buffer);
996
997                 drbd_info(device, "Writing the whole bitmap, %s\n",
998                          la_size_changed && md_moved ? "size changed and md moved" :
999                          la_size_changed ? "size changed" : "md moved");
1000                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1001                 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1002                                "size changed", BM_LOCKED_MASK);
1003                 drbd_initialize_al(device, buffer);
1004
1005                 md->flags = prev_flags;
1006                 drbd_md_write(device, buffer);
1007
1008                 if (rs)
1009                         drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1010                                   md->al_stripes, md->al_stripe_size_4k * 4);
1011         }
1012
1013         if (size > la_size_sect)
1014                 rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1015         if (size < la_size_sect)
1016                 rv = DS_SHRUNK;
1017
1018         if (0) {
1019         err_out:
1020                 if (rs) {
1021                         md->al_stripes = prev_al_stripes;
1022                         md->al_stripe_size_4k = prev_al_stripe_size_4k;
1023                         md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
1024
1025                         drbd_md_set_sector_offsets(device, device->ldev);
1026                 }
1027         }
1028         lc_unlock(device->act_log);
1029         wake_up(&device->al_wait);
1030         drbd_md_put_buffer(device);
1031         drbd_resume_io(device);
1032
1033         return rv;
1034 }
1035
1036 sector_t
1037 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1038                   sector_t u_size, int assume_peer_has_space)
1039 {
1040         sector_t p_size = device->p_size;   /* partner's disk size. */
1041         sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1042         sector_t m_size; /* my size */
1043         sector_t size = 0;
1044
1045         m_size = drbd_get_max_capacity(bdev);
1046
1047         if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1048                 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1049                 p_size = m_size;
1050         }
1051
1052         if (p_size && m_size) {
1053                 size = min_t(sector_t, p_size, m_size);
1054         } else {
1055                 if (la_size_sect) {
1056                         size = la_size_sect;
1057                         if (m_size && m_size < size)
1058                                 size = m_size;
1059                         if (p_size && p_size < size)
1060                                 size = p_size;
1061                 } else {
1062                         if (m_size)
1063                                 size = m_size;
1064                         if (p_size)
1065                                 size = p_size;
1066                 }
1067         }
1068
1069         if (size == 0)
1070                 drbd_err(device, "Both nodes diskless!\n");
1071
1072         if (u_size) {
1073                 if (u_size > size)
1074                         drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1075                             (unsigned long)u_size>>1, (unsigned long)size>>1);
1076                 else
1077                         size = u_size;
1078         }
1079
1080         return size;
1081 }
1082
1083 /**
1084  * drbd_check_al_size() - Ensures that the AL is of the right size
1085  * @device:     DRBD device.
1086  *
1087  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1088  * failed, and 0 on success. You should call drbd_md_sync() after you called
1089  * this function.
1090  */
1091 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1092 {
1093         struct lru_cache *n, *t;
1094         struct lc_element *e;
1095         unsigned int in_use;
1096         int i;
1097
1098         if (device->act_log &&
1099             device->act_log->nr_elements == dc->al_extents)
1100                 return 0;
1101
1102         in_use = 0;
1103         t = device->act_log;
1104         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1105                 dc->al_extents, sizeof(struct lc_element), 0);
1106
1107         if (n == NULL) {
1108                 drbd_err(device, "Cannot allocate act_log lru!\n");
1109                 return -ENOMEM;
1110         }
1111         spin_lock_irq(&device->al_lock);
1112         if (t) {
1113                 for (i = 0; i < t->nr_elements; i++) {
1114                         e = lc_element_by_index(t, i);
1115                         if (e->refcnt)
1116                                 drbd_err(device, "refcnt(%d)==%d\n",
1117                                     e->lc_number, e->refcnt);
1118                         in_use += e->refcnt;
1119                 }
1120         }
1121         if (!in_use)
1122                 device->act_log = n;
1123         spin_unlock_irq(&device->al_lock);
1124         if (in_use) {
1125                 drbd_err(device, "Activity log still in use!\n");
1126                 lc_destroy(n);
1127                 return -EBUSY;
1128         } else {
1129                 if (t)
1130                         lc_destroy(t);
1131         }
1132         drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1133         return 0;
1134 }
1135
1136 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1137                                    unsigned int max_bio_size)
1138 {
1139         struct request_queue * const q = device->rq_queue;
1140         unsigned int max_hw_sectors = max_bio_size >> 9;
1141         unsigned int max_segments = 0;
1142         struct request_queue *b = NULL;
1143
1144         if (bdev) {
1145                 b = bdev->backing_bdev->bd_disk->queue;
1146
1147                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1148                 rcu_read_lock();
1149                 max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
1150                 rcu_read_unlock();
1151
1152                 blk_set_stacking_limits(&q->limits);
1153                 blk_queue_max_write_same_sectors(q, 0);
1154         }
1155
1156         blk_queue_logical_block_size(q, 512);
1157         blk_queue_max_hw_sectors(q, max_hw_sectors);
1158         /* This is the workaround for "bio would need to, but cannot, be split" */
1159         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1160         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1161
1162         if (b) {
1163                 struct drbd_connection *connection = first_peer_device(device)->connection;
1164
1165                 if (blk_queue_discard(b) &&
1166                     (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
1167                         /* For now, don't allow more than one activity log extent worth of data
1168                          * to be discarded in one go. We may need to rework drbd_al_begin_io()
1169                          * to allow for even larger discard ranges */
1170                         blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
1171
1172                         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1173                         /* REALLY? Is stacking secdiscard "legal"? */
1174                         if (blk_queue_secdiscard(b))
1175                                 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
1176                 } else {
1177                         blk_queue_max_discard_sectors(q, 0);
1178                         queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1179                         queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
1180                 }
1181
1182                 blk_queue_stack_limits(q, b);
1183
1184                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1185                         drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1186                                  q->backing_dev_info.ra_pages,
1187                                  b->backing_dev_info.ra_pages);
1188                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1189                 }
1190         }
1191 }
1192
1193 void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
1194 {
1195         unsigned int now, new, local, peer;
1196
1197         now = queue_max_hw_sectors(device->rq_queue) << 9;
1198         local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1199         peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1200
1201         if (bdev) {
1202                 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1203                 device->local_max_bio_size = local;
1204         }
1205         local = min(local, DRBD_MAX_BIO_SIZE);
1206
1207         /* We may ignore peer limits if the peer is modern enough.
1208            Because new from 8.3.8 onwards the peer can use multiple
1209            BIOs for a single peer_request */
1210         if (device->state.conn >= C_WF_REPORT_PARAMS) {
1211                 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1212                         peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1213                         /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1214                 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1215                         peer = DRBD_MAX_SIZE_H80_PACKET;
1216                 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1217                         peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
1218                 else
1219                         peer = DRBD_MAX_BIO_SIZE;
1220
1221                 /* We may later detach and re-attach on a disconnected Primary.
1222                  * Avoid this setting to jump back in that case.
1223                  * We want to store what we know the peer DRBD can handle,
1224                  * not what the peer IO backend can handle. */
1225                 if (peer > device->peer_max_bio_size)
1226                         device->peer_max_bio_size = peer;
1227         }
1228         new = min(local, peer);
1229
1230         if (device->state.role == R_PRIMARY && new < now)
1231                 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1232
1233         if (new != now)
1234                 drbd_info(device, "max BIO size = %u\n", new);
1235
1236         drbd_setup_queue_param(device, bdev, new);
1237 }
1238
1239 /* Starts the worker thread */
1240 static void conn_reconfig_start(struct drbd_connection *connection)
1241 {
1242         drbd_thread_start(&connection->worker);
1243         drbd_flush_workqueue(&connection->sender_work);
1244 }
1245
1246 /* if still unconfigured, stops worker again. */
1247 static void conn_reconfig_done(struct drbd_connection *connection)
1248 {
1249         bool stop_threads;
1250         spin_lock_irq(&connection->resource->req_lock);
1251         stop_threads = conn_all_vols_unconf(connection) &&
1252                 connection->cstate == C_STANDALONE;
1253         spin_unlock_irq(&connection->resource->req_lock);
1254         if (stop_threads) {
1255                 /* asender is implicitly stopped by receiver
1256                  * in conn_disconnect() */
1257                 drbd_thread_stop(&connection->receiver);
1258                 drbd_thread_stop(&connection->worker);
1259         }
1260 }
1261
1262 /* Make sure IO is suspended before calling this function(). */
1263 static void drbd_suspend_al(struct drbd_device *device)
1264 {
1265         int s = 0;
1266
1267         if (!lc_try_lock(device->act_log)) {
1268                 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1269                 return;
1270         }
1271
1272         drbd_al_shrink(device);
1273         spin_lock_irq(&device->resource->req_lock);
1274         if (device->state.conn < C_CONNECTED)
1275                 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1276         spin_unlock_irq(&device->resource->req_lock);
1277         lc_unlock(device->act_log);
1278
1279         if (s)
1280                 drbd_info(device, "Suspended AL updates\n");
1281 }
1282
1283
1284 static bool should_set_defaults(struct genl_info *info)
1285 {
1286         unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1287         return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1288 }
1289
1290 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1291 {
1292         /* This is limited by 16 bit "slot" numbers,
1293          * and by available on-disk context storage.
1294          *
1295          * Also (u16)~0 is special (denotes a "free" extent).
1296          *
1297          * One transaction occupies one 4kB on-disk block,
1298          * we have n such blocks in the on disk ring buffer,
1299          * the "current" transaction may fail (n-1),
1300          * and there is 919 slot numbers context information per transaction.
1301          *
1302          * 72 transaction blocks amounts to more than 2**16 context slots,
1303          * so cap there first.
1304          */
1305         const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1306         const unsigned int sufficient_on_disk =
1307                 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1308                 /AL_CONTEXT_PER_TRANSACTION;
1309
1310         unsigned int al_size_4k = bdev->md.al_size_4k;
1311
1312         if (al_size_4k > sufficient_on_disk)
1313                 return max_al_nr;
1314
1315         return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1316 }
1317
1318 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1319 {
1320         return  a->disk_barrier != b->disk_barrier ||
1321                 a->disk_flushes != b->disk_flushes ||
1322                 a->disk_drain != b->disk_drain;
1323 }
1324
1325 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1326 {
1327         struct drbd_config_context adm_ctx;
1328         enum drbd_ret_code retcode;
1329         struct drbd_device *device;
1330         struct disk_conf *new_disk_conf, *old_disk_conf;
1331         struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1332         int err, fifo_size;
1333
1334         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1335         if (!adm_ctx.reply_skb)
1336                 return retcode;
1337         if (retcode != NO_ERROR)
1338                 goto finish;
1339
1340         device = adm_ctx.device;
1341         mutex_lock(&adm_ctx.resource->adm_mutex);
1342
1343         /* we also need a disk
1344          * to change the options on */
1345         if (!get_ldev(device)) {
1346                 retcode = ERR_NO_DISK;
1347                 goto out;
1348         }
1349
1350         new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1351         if (!new_disk_conf) {
1352                 retcode = ERR_NOMEM;
1353                 goto fail;
1354         }
1355
1356         mutex_lock(&device->resource->conf_update);
1357         old_disk_conf = device->ldev->disk_conf;
1358         *new_disk_conf = *old_disk_conf;
1359         if (should_set_defaults(info))
1360                 set_disk_conf_defaults(new_disk_conf);
1361
1362         err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1363         if (err && err != -ENOMSG) {
1364                 retcode = ERR_MANDATORY_TAG;
1365                 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1366                 goto fail_unlock;
1367         }
1368
1369         if (!expect(new_disk_conf->resync_rate >= 1))
1370                 new_disk_conf->resync_rate = 1;
1371
1372         if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1373                 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1374         if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
1375                 new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
1376
1377         if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1378                 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1379
1380         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1381         if (fifo_size != device->rs_plan_s->size) {
1382                 new_plan = fifo_alloc(fifo_size);
1383                 if (!new_plan) {
1384                         drbd_err(device, "kmalloc of fifo_buffer failed");
1385                         retcode = ERR_NOMEM;
1386                         goto fail_unlock;
1387                 }
1388         }
1389
1390         drbd_suspend_io(device);
1391         wait_event(device->al_wait, lc_try_lock(device->act_log));
1392         drbd_al_shrink(device);
1393         err = drbd_check_al_size(device, new_disk_conf);
1394         lc_unlock(device->act_log);
1395         wake_up(&device->al_wait);
1396         drbd_resume_io(device);
1397
1398         if (err) {
1399                 retcode = ERR_NOMEM;
1400                 goto fail_unlock;
1401         }
1402
1403         lock_all_resources();
1404         retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1405         if (retcode == NO_ERROR) {
1406                 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1407                 drbd_resync_after_changed(device);
1408         }
1409         unlock_all_resources();
1410
1411         if (retcode != NO_ERROR)
1412                 goto fail_unlock;
1413
1414         if (new_plan) {
1415                 old_plan = device->rs_plan_s;
1416                 rcu_assign_pointer(device->rs_plan_s, new_plan);
1417         }
1418
1419         mutex_unlock(&device->resource->conf_update);
1420
1421         if (new_disk_conf->al_updates)
1422                 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1423         else
1424                 device->ldev->md.flags |= MDF_AL_DISABLED;
1425
1426         if (new_disk_conf->md_flushes)
1427                 clear_bit(MD_NO_FUA, &device->flags);
1428         else
1429                 set_bit(MD_NO_FUA, &device->flags);
1430
1431         if (write_ordering_changed(old_disk_conf, new_disk_conf))
1432                 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1433
1434         drbd_md_sync(device);
1435
1436         if (device->state.conn >= C_CONNECTED) {
1437                 struct drbd_peer_device *peer_device;
1438
1439                 for_each_peer_device(peer_device, device)
1440                         drbd_send_sync_param(peer_device);
1441         }
1442
1443         synchronize_rcu();
1444         kfree(old_disk_conf);
1445         kfree(old_plan);
1446         mod_timer(&device->request_timer, jiffies + HZ);
1447         goto success;
1448
1449 fail_unlock:
1450         mutex_unlock(&device->resource->conf_update);
1451  fail:
1452         kfree(new_disk_conf);
1453         kfree(new_plan);
1454 success:
1455         put_ldev(device);
1456  out:
1457         mutex_unlock(&adm_ctx.resource->adm_mutex);
1458  finish:
1459         drbd_adm_finish(&adm_ctx, info, retcode);
1460         return 0;
1461 }
1462
1463 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1464 {
1465         struct drbd_config_context adm_ctx;
1466         struct drbd_device *device;
1467         struct drbd_peer_device *peer_device;
1468         struct drbd_connection *connection;
1469         int err;
1470         enum drbd_ret_code retcode;
1471         enum determine_dev_size dd;
1472         sector_t max_possible_sectors;
1473         sector_t min_md_device_sectors;
1474         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1475         struct disk_conf *new_disk_conf = NULL;
1476         struct block_device *bdev;
1477         struct lru_cache *resync_lru = NULL;
1478         struct fifo_buffer *new_plan = NULL;
1479         union drbd_state ns, os;
1480         enum drbd_state_rv rv;
1481         struct net_conf *nc;
1482
1483         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1484         if (!adm_ctx.reply_skb)
1485                 return retcode;
1486         if (retcode != NO_ERROR)
1487                 goto finish;
1488
1489         device = adm_ctx.device;
1490         mutex_lock(&adm_ctx.resource->adm_mutex);
1491         peer_device = first_peer_device(device);
1492         connection = peer_device->connection;
1493         conn_reconfig_start(connection);
1494
1495         /* if you want to reconfigure, please tear down first */
1496         if (device->state.disk > D_DISKLESS) {
1497                 retcode = ERR_DISK_CONFIGURED;
1498                 goto fail;
1499         }
1500         /* It may just now have detached because of IO error.  Make sure
1501          * drbd_ldev_destroy is done already, we may end up here very fast,
1502          * e.g. if someone calls attach from the on-io-error handler,
1503          * to realize a "hot spare" feature (not that I'd recommend that) */
1504         wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1505
1506         /* make sure there is no leftover from previous force-detach attempts */
1507         clear_bit(FORCE_DETACH, &device->flags);
1508         clear_bit(WAS_IO_ERROR, &device->flags);
1509         clear_bit(WAS_READ_ERROR, &device->flags);
1510
1511         /* and no leftover from previously aborted resync or verify, either */
1512         device->rs_total = 0;
1513         device->rs_failed = 0;
1514         atomic_set(&device->rs_pending_cnt, 0);
1515
1516         /* allocation not in the IO path, drbdsetup context */
1517         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1518         if (!nbc) {
1519                 retcode = ERR_NOMEM;
1520                 goto fail;
1521         }
1522         spin_lock_init(&nbc->md.uuid_lock);
1523
1524         new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1525         if (!new_disk_conf) {
1526                 retcode = ERR_NOMEM;
1527                 goto fail;
1528         }
1529         nbc->disk_conf = new_disk_conf;
1530
1531         set_disk_conf_defaults(new_disk_conf);
1532         err = disk_conf_from_attrs(new_disk_conf, info);
1533         if (err) {
1534                 retcode = ERR_MANDATORY_TAG;
1535                 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1536                 goto fail;
1537         }
1538
1539         if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1540                 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1541
1542         new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1543         if (!new_plan) {
1544                 retcode = ERR_NOMEM;
1545                 goto fail;
1546         }
1547
1548         if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1549                 retcode = ERR_MD_IDX_INVALID;
1550                 goto fail;
1551         }
1552
1553         rcu_read_lock();
1554         nc = rcu_dereference(connection->net_conf);
1555         if (nc) {
1556                 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1557                         rcu_read_unlock();
1558                         retcode = ERR_STONITH_AND_PROT_A;
1559                         goto fail;
1560                 }
1561         }
1562         rcu_read_unlock();
1563
1564         bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1565                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
1566         if (IS_ERR(bdev)) {
1567                 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1568                         PTR_ERR(bdev));
1569                 retcode = ERR_OPEN_DISK;
1570                 goto fail;
1571         }
1572         nbc->backing_bdev = bdev;
1573
1574         /*
1575          * meta_dev_idx >= 0: external fixed size, possibly multiple
1576          * drbd sharing one meta device.  TODO in that case, paranoia
1577          * check that [md_bdev, meta_dev_idx] is not yet used by some
1578          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1579          * should check it for you already; but if you don't, or
1580          * someone fooled it, we need to double check here)
1581          */
1582         bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1583                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1584                                   (new_disk_conf->meta_dev_idx < 0) ?
1585                                   (void *)device : (void *)drbd_m_holder);
1586         if (IS_ERR(bdev)) {
1587                 drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1588                         PTR_ERR(bdev));
1589                 retcode = ERR_OPEN_MD_DISK;
1590                 goto fail;
1591         }
1592         nbc->md_bdev = bdev;
1593
1594         if ((nbc->backing_bdev == nbc->md_bdev) !=
1595             (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1596              new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1597                 retcode = ERR_MD_IDX_INVALID;
1598                 goto fail;
1599         }
1600
1601         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1602                         1, 61, sizeof(struct bm_extent),
1603                         offsetof(struct bm_extent, lce));
1604         if (!resync_lru) {
1605                 retcode = ERR_NOMEM;
1606                 goto fail;
1607         }
1608
1609         /* Read our meta data super block early.
1610          * This also sets other on-disk offsets. */
1611         retcode = drbd_md_read(device, nbc);
1612         if (retcode != NO_ERROR)
1613                 goto fail;
1614
1615         if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1616                 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1617         if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1618                 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1619
1620         if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1621                 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1622                         (unsigned long long) drbd_get_max_capacity(nbc),
1623                         (unsigned long long) new_disk_conf->disk_size);
1624                 retcode = ERR_DISK_TOO_SMALL;
1625                 goto fail;
1626         }
1627
1628         if (new_disk_conf->meta_dev_idx < 0) {
1629                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1630                 /* at least one MB, otherwise it does not make sense */
1631                 min_md_device_sectors = (2<<10);
1632         } else {
1633                 max_possible_sectors = DRBD_MAX_SECTORS;
1634                 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1635         }
1636
1637         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1638                 retcode = ERR_MD_DISK_TOO_SMALL;
1639                 drbd_warn(device, "refusing attach: md-device too small, "
1640                      "at least %llu sectors needed for this meta-disk type\n",
1641                      (unsigned long long) min_md_device_sectors);
1642                 goto fail;
1643         }
1644
1645         /* Make sure the new disk is big enough
1646          * (we may currently be R_PRIMARY with no local disk...) */
1647         if (drbd_get_max_capacity(nbc) <
1648             drbd_get_capacity(device->this_bdev)) {
1649                 retcode = ERR_DISK_TOO_SMALL;
1650                 goto fail;
1651         }
1652
1653         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1654
1655         if (nbc->known_size > max_possible_sectors) {
1656                 drbd_warn(device, "==> truncating very big lower level device "
1657                         "to currently maximum possible %llu sectors <==\n",
1658                         (unsigned long long) max_possible_sectors);
1659                 if (new_disk_conf->meta_dev_idx >= 0)
1660                         drbd_warn(device, "==>> using internal or flexible "
1661                                       "meta data may help <<==\n");
1662         }
1663
1664         drbd_suspend_io(device);
1665         /* also wait for the last barrier ack. */
1666         /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1667          * We need a way to either ignore barrier acks for barriers sent before a device
1668          * was attached, or a way to wait for all pending barrier acks to come in.
1669          * As barriers are counted per resource,
1670          * we'd need to suspend io on all devices of a resource.
1671          */
1672         wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1673         /* and for any other previously queued work */
1674         drbd_flush_workqueue(&connection->sender_work);
1675
1676         rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1677         retcode = rv;  /* FIXME: Type mismatch. */
1678         drbd_resume_io(device);
1679         if (rv < SS_SUCCESS)
1680                 goto fail;
1681
1682         if (!get_ldev_if_state(device, D_ATTACHING))
1683                 goto force_diskless;
1684
1685         if (!device->bitmap) {
1686                 if (drbd_bm_init(device)) {
1687                         retcode = ERR_NOMEM;
1688                         goto force_diskless_dec;
1689                 }
1690         }
1691
1692         if (device->state.conn < C_CONNECTED &&
1693             device->state.role == R_PRIMARY && device->ed_uuid &&
1694             (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1695                 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1696                     (unsigned long long)device->ed_uuid);
1697                 retcode = ERR_DATA_NOT_CURRENT;
1698                 goto force_diskless_dec;
1699         }
1700
1701         /* Since we are diskless, fix the activity log first... */
1702         if (drbd_check_al_size(device, new_disk_conf)) {
1703                 retcode = ERR_NOMEM;
1704                 goto force_diskless_dec;
1705         }
1706
1707         /* Prevent shrinking of consistent devices ! */
1708         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1709             drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1710                 drbd_warn(device, "refusing to truncate a consistent device\n");
1711                 retcode = ERR_DISK_TOO_SMALL;
1712                 goto force_diskless_dec;
1713         }
1714
1715         lock_all_resources();
1716         retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1717         if (retcode != NO_ERROR) {
1718                 unlock_all_resources();
1719                 goto force_diskless_dec;
1720         }
1721
1722         /* Reset the "barriers don't work" bits here, then force meta data to
1723          * be written, to ensure we determine if barriers are supported. */
1724         if (new_disk_conf->md_flushes)
1725                 clear_bit(MD_NO_FUA, &device->flags);
1726         else
1727                 set_bit(MD_NO_FUA, &device->flags);
1728
1729         /* Point of no return reached.
1730          * Devices and memory are no longer released by error cleanup below.
1731          * now device takes over responsibility, and the state engine should
1732          * clean it up somewhere.  */
1733         D_ASSERT(device, device->ldev == NULL);
1734         device->ldev = nbc;
1735         device->resync = resync_lru;
1736         device->rs_plan_s = new_plan;
1737         nbc = NULL;
1738         resync_lru = NULL;
1739         new_disk_conf = NULL;
1740         new_plan = NULL;
1741
1742         drbd_resync_after_changed(device);
1743         drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
1744         unlock_all_resources();
1745
1746         if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1747                 set_bit(CRASHED_PRIMARY, &device->flags);
1748         else
1749                 clear_bit(CRASHED_PRIMARY, &device->flags);
1750
1751         if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1752             !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1753                 set_bit(CRASHED_PRIMARY, &device->flags);
1754
1755         device->send_cnt = 0;
1756         device->recv_cnt = 0;
1757         device->read_cnt = 0;
1758         device->writ_cnt = 0;
1759
1760         drbd_reconsider_max_bio_size(device, device->ldev);
1761
1762         /* If I am currently not R_PRIMARY,
1763          * but meta data primary indicator is set,
1764          * I just now recover from a hard crash,
1765          * and have been R_PRIMARY before that crash.
1766          *
1767          * Now, if I had no connection before that crash
1768          * (have been degraded R_PRIMARY), chances are that
1769          * I won't find my peer now either.
1770          *
1771          * In that case, and _only_ in that case,
1772          * we use the degr-wfc-timeout instead of the default,
1773          * so we can automatically recover from a crash of a
1774          * degraded but active "cluster" after a certain timeout.
1775          */
1776         clear_bit(USE_DEGR_WFC_T, &device->flags);
1777         if (device->state.role != R_PRIMARY &&
1778              drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1779             !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
1780                 set_bit(USE_DEGR_WFC_T, &device->flags);
1781
1782         dd = drbd_determine_dev_size(device, 0, NULL);
1783         if (dd <= DS_ERROR) {
1784                 retcode = ERR_NOMEM_BITMAP;
1785                 goto force_diskless_dec;
1786         } else if (dd == DS_GREW)
1787                 set_bit(RESYNC_AFTER_NEG, &device->flags);
1788
1789         if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
1790             (test_bit(CRASHED_PRIMARY, &device->flags) &&
1791              drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
1792                 drbd_info(device, "Assuming that all blocks are out of sync "
1793                      "(aka FullSync)\n");
1794                 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
1795                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1796                         retcode = ERR_IO_MD_DISK;
1797                         goto force_diskless_dec;
1798                 }
1799         } else {
1800                 if (drbd_bitmap_io(device, &drbd_bm_read,
1801                         "read from attaching", BM_LOCKED_MASK)) {
1802                         retcode = ERR_IO_MD_DISK;
1803                         goto force_diskless_dec;
1804                 }
1805         }
1806
1807         if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
1808                 drbd_suspend_al(device); /* IO is still suspended here... */
1809
1810         spin_lock_irq(&device->resource->req_lock);
1811         os = drbd_read_state(device);
1812         ns = os;
1813         /* If MDF_CONSISTENT is not set go into inconsistent state,
1814            otherwise investigate MDF_WasUpToDate...
1815            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1816            otherwise into D_CONSISTENT state.
1817         */
1818         if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
1819                 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
1820                         ns.disk = D_CONSISTENT;
1821                 else
1822                         ns.disk = D_OUTDATED;
1823         } else {
1824                 ns.disk = D_INCONSISTENT;
1825         }
1826
1827         if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
1828                 ns.pdsk = D_OUTDATED;
1829
1830         rcu_read_lock();
1831         if (ns.disk == D_CONSISTENT &&
1832             (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
1833                 ns.disk = D_UP_TO_DATE;
1834
1835         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1836            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1837            this point, because drbd_request_state() modifies these
1838            flags. */
1839
1840         if (rcu_dereference(device->ldev->disk_conf)->al_updates)
1841                 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1842         else
1843                 device->ldev->md.flags |= MDF_AL_DISABLED;
1844
1845         rcu_read_unlock();
1846
1847         /* In case we are C_CONNECTED postpone any decision on the new disk
1848            state after the negotiation phase. */
1849         if (device->state.conn == C_CONNECTED) {
1850                 device->new_state_tmp.i = ns.i;
1851                 ns.i = os.i;
1852                 ns.disk = D_NEGOTIATING;
1853
1854                 /* We expect to receive up-to-date UUIDs soon.
1855                    To avoid a race in receive_state, free p_uuid while
1856                    holding req_lock. I.e. atomic with the state change */
1857                 kfree(device->p_uuid);
1858                 device->p_uuid = NULL;
1859         }
1860
1861         rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1862         spin_unlock_irq(&device->resource->req_lock);
1863
1864         if (rv < SS_SUCCESS)
1865                 goto force_diskless_dec;
1866
1867         mod_timer(&device->request_timer, jiffies + HZ);
1868
1869         if (device->state.role == R_PRIMARY)
1870                 device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1871         else
1872                 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1873
1874         drbd_md_mark_dirty(device);
1875         drbd_md_sync(device);
1876
1877         kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1878         put_ldev(device);
1879         conn_reconfig_done(connection);
1880         mutex_unlock(&adm_ctx.resource->adm_mutex);
1881         drbd_adm_finish(&adm_ctx, info, retcode);
1882         return 0;
1883
1884  force_diskless_dec:
1885         put_ldev(device);
1886  force_diskless:
1887         drbd_force_state(device, NS(disk, D_DISKLESS));
1888         drbd_md_sync(device);
1889  fail:
1890         conn_reconfig_done(connection);
1891         if (nbc) {
1892                 if (nbc->backing_bdev)
1893                         blkdev_put(nbc->backing_bdev,
1894                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1895                 if (nbc->md_bdev)
1896                         blkdev_put(nbc->md_bdev,
1897                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1898                 kfree(nbc);
1899         }
1900         kfree(new_disk_conf);
1901         lc_destroy(resync_lru);
1902         kfree(new_plan);
1903         mutex_unlock(&adm_ctx.resource->adm_mutex);
1904  finish:
1905         drbd_adm_finish(&adm_ctx, info, retcode);
1906         return 0;
1907 }
1908
1909 static int adm_detach(struct drbd_device *device, int force)
1910 {
1911         enum drbd_state_rv retcode;
1912         int ret;
1913
1914         if (force) {
1915                 set_bit(FORCE_DETACH, &device->flags);
1916                 drbd_force_state(device, NS(disk, D_FAILED));
1917                 retcode = SS_SUCCESS;
1918                 goto out;
1919         }
1920
1921         drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
1922         drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
1923         retcode = drbd_request_state(device, NS(disk, D_FAILED));
1924         drbd_md_put_buffer(device);
1925         /* D_FAILED will transition to DISKLESS. */
1926         ret = wait_event_interruptible(device->misc_wait,
1927                         device->state.disk != D_FAILED);
1928         drbd_resume_io(device);
1929         if ((int)retcode == (int)SS_IS_DISKLESS)
1930                 retcode = SS_NOTHING_TO_DO;
1931         if (ret)
1932                 retcode = ERR_INTR;
1933 out:
1934         return retcode;
1935 }
1936
1937 /* Detaching the disk is a process in multiple stages.  First we need to lock
1938  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1939  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1940  * internal references as well.
1941  * Only then we have finally detached. */
1942 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1943 {
1944         struct drbd_config_context adm_ctx;
1945         enum drbd_ret_code retcode;
1946         struct detach_parms parms = { };
1947         int err;
1948
1949         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1950         if (!adm_ctx.reply_skb)
1951                 return retcode;
1952         if (retcode != NO_ERROR)
1953                 goto out;
1954
1955         if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1956                 err = detach_parms_from_attrs(&parms, info);
1957                 if (err) {
1958                         retcode = ERR_MANDATORY_TAG;
1959                         drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1960                         goto out;
1961                 }
1962         }
1963
1964         mutex_lock(&adm_ctx.resource->adm_mutex);
1965         retcode = adm_detach(adm_ctx.device, parms.force_detach);
1966         mutex_unlock(&adm_ctx.resource->adm_mutex);
1967 out:
1968         drbd_adm_finish(&adm_ctx, info, retcode);
1969         return 0;
1970 }
1971
1972 static bool conn_resync_running(struct drbd_connection *connection)
1973 {
1974         struct drbd_peer_device *peer_device;
1975         bool rv = false;
1976         int vnr;
1977
1978         rcu_read_lock();
1979         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
1980                 struct drbd_device *device = peer_device->device;
1981                 if (device->state.conn == C_SYNC_SOURCE ||
1982                     device->state.conn == C_SYNC_TARGET ||
1983                     device->state.conn == C_PAUSED_SYNC_S ||
1984                     device->state.conn == C_PAUSED_SYNC_T) {
1985                         rv = true;
1986                         break;
1987                 }
1988         }
1989         rcu_read_unlock();
1990
1991         return rv;
1992 }
1993
1994 static bool conn_ov_running(struct drbd_connection *connection)
1995 {
1996         struct drbd_peer_device *peer_device;
1997         bool rv = false;
1998         int vnr;
1999
2000         rcu_read_lock();
2001         idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2002                 struct drbd_device *device = peer_device->device;
2003                 if (device->state.conn == C_VERIFY_S ||
2004                     device->state.conn == C_VERIFY_T) {
2005                         rv = true;
2006                         break;
2007                 }
2008         }
2009         rcu_read_unlock();
2010
2011         return rv;
2012 }
2013
2014 static enum drbd_ret_code
2015 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2016 {
2017         struct drbd_peer_device *peer_device;
2018         int i;
2019
2020         if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2021                 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2022                         return ERR_NEED_APV_100;
2023
2024                 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2025                         return ERR_NEED_APV_100;
2026
2027                 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2028                         return ERR_NEED_APV_100;
2029         }
2030
2031         if (!new_net_conf->two_primaries &&
2032             conn_highest_role(connection) == R_PRIMARY &&
2033             conn_highest_peer(connection) == R_PRIMARY)
2034                 return ERR_NEED_ALLOW_TWO_PRI;
2035
2036         if (new_net_conf->two_primaries &&
2037             (new_net_conf->wire_protocol != DRBD_PROT_C))
2038                 return ERR_NOT_PROTO_C;
2039
2040         idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2041                 struct drbd_device *device = peer_device->device;
2042                 if (get_ldev(device)) {
2043                         enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2044                         put_ldev(device);
2045                         if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2046                                 return ERR_STONITH_AND_PROT_A;
2047                 }
2048                 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2049                         return ERR_DISCARD_IMPOSSIBLE;
2050         }
2051
2052         if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2053                 return ERR_CONG_NOT_PROTO_A;
2054
2055         return NO_ERROR;
2056 }
2057
2058 static enum drbd_ret_code
2059 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2060 {
2061         static enum drbd_ret_code rv;
2062         struct drbd_peer_device *peer_device;
2063         int i;
2064
2065         rcu_read_lock();
2066         rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2067         rcu_read_unlock();
2068
2069         /* connection->peer_devices protected by genl_lock() here */
2070         idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2071                 struct drbd_device *device = peer_device->device;
2072                 if (!device->bitmap) {
2073                         if (drbd_bm_init(device))
2074                                 return ERR_NOMEM;
2075                 }
2076         }
2077
2078         return rv;
2079 }
2080
2081 struct crypto {
2082         struct crypto_hash *verify_tfm;
2083         struct crypto_hash *csums_tfm;
2084         struct crypto_hash *cram_hmac_tfm;
2085         struct crypto_hash *integrity_tfm;
2086 };
2087
2088 static int
2089 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
2090 {
2091         if (!tfm_name[0])
2092                 return NO_ERROR;
2093
2094         *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
2095         if (IS_ERR(*tfm)) {
2096                 *tfm = NULL;
2097                 return err_alg;
2098         }
2099
2100         return NO_ERROR;
2101 }
2102
2103 static enum drbd_ret_code
2104 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2105 {
2106         char hmac_name[CRYPTO_MAX_ALG_NAME];
2107         enum drbd_ret_code rv;
2108
2109         rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
2110                        ERR_CSUMS_ALG);
2111         if (rv != NO_ERROR)
2112                 return rv;
2113         rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
2114                        ERR_VERIFY_ALG);
2115         if (rv != NO_ERROR)
2116                 return rv;
2117         rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2118                        ERR_INTEGRITY_ALG);
2119         if (rv != NO_ERROR)
2120                 return rv;
2121         if (new_net_conf->cram_hmac_alg[0] != 0) {
2122                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2123                          new_net_conf->cram_hmac_alg);
2124
2125                 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
2126                                ERR_AUTH_ALG);
2127         }
2128
2129         return rv;
2130 }
2131
2132 static void free_crypto(struct crypto *crypto)
2133 {
2134         crypto_free_hash(crypto->cram_hmac_tfm);
2135         crypto_free_hash(crypto->integrity_tfm);
2136         crypto_free_hash(crypto->csums_tfm);
2137         crypto_free_hash(crypto->verify_tfm);
2138 }
2139
2140 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2141 {
2142         struct drbd_config_context adm_ctx;
2143         enum drbd_ret_code retcode;
2144         struct drbd_connection *connection;
2145         struct net_conf *old_net_conf, *new_net_conf = NULL;
2146         int err;
2147         int ovr; /* online verify running */
2148         int rsr; /* re-sync running */
2149         struct crypto crypto = { };
2150
2151         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2152         if (!adm_ctx.reply_skb)
2153                 return retcode;
2154         if (retcode != NO_ERROR)
2155                 goto finish;
2156
2157         connection = adm_ctx.connection;
2158         mutex_lock(&adm_ctx.resource->adm_mutex);
2159
2160         new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2161         if (!new_net_conf) {
2162                 retcode = ERR_NOMEM;
2163                 goto out;
2164         }
2165
2166         conn_reconfig_start(connection);
2167
2168         mutex_lock(&connection->data.mutex);
2169         mutex_lock(&connection->resource->conf_update);
2170         old_net_conf = connection->net_conf;
2171
2172         if (!old_net_conf) {
2173                 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2174                 retcode = ERR_INVALID_REQUEST;
2175                 goto fail;
2176         }
2177
2178         *new_net_conf = *old_net_conf;
2179         if (should_set_defaults(info))
2180                 set_net_conf_defaults(new_net_conf);
2181
2182         err = net_conf_from_attrs_for_change(new_net_conf, info);
2183         if (err && err != -ENOMSG) {
2184                 retcode = ERR_MANDATORY_TAG;
2185                 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2186                 goto fail;
2187         }
2188
2189         retcode = check_net_options(connection, new_net_conf);
2190         if (retcode != NO_ERROR)
2191                 goto fail;
2192
2193         /* re-sync running */
2194         rsr = conn_resync_running(connection);
2195         if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2196                 retcode = ERR_CSUMS_RESYNC_RUNNING;
2197                 goto fail;
2198         }
2199
2200         /* online verify running */
2201         ovr = conn_ov_running(connection);
2202         if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2203                 retcode = ERR_VERIFY_RUNNING;
2204                 goto fail;
2205         }
2206
2207         retcode = alloc_crypto(&crypto, new_net_conf);
2208         if (retcode != NO_ERROR)
2209                 goto fail;
2210
2211         rcu_assign_pointer(connection->net_conf, new_net_conf);
2212
2213         if (!rsr) {
2214                 crypto_free_hash(connection->csums_tfm);
2215                 connection->csums_tfm = crypto.csums_tfm;
2216                 crypto.csums_tfm = NULL;
2217         }
2218         if (!ovr) {
2219                 crypto_free_hash(connection->verify_tfm);
2220                 connection->verify_tfm = crypto.verify_tfm;
2221                 crypto.verify_tfm = NULL;
2222         }
2223
2224         crypto_free_hash(connection->integrity_tfm);
2225         connection->integrity_tfm = crypto.integrity_tfm;
2226         if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2227                 /* Do this without trying to take connection->data.mutex again.  */
2228                 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2229
2230         crypto_free_hash(connection->cram_hmac_tfm);
2231         connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2232
2233         mutex_unlock(&connection->resource->conf_update);
2234         mutex_unlock(&connection->data.mutex);
2235         synchronize_rcu();
2236         kfree(old_net_conf);
2237
2238         if (connection->cstate >= C_WF_REPORT_PARAMS) {
2239                 struct drbd_peer_device *peer_device;
2240                 int vnr;
2241
2242                 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2243                         drbd_send_sync_param(peer_device);
2244         }
2245
2246         goto done;
2247
2248  fail:
2249         mutex_unlock(&connection->resource->conf_update);
2250         mutex_unlock(&connection->data.mutex);
2251         free_crypto(&crypto);
2252         kfree(new_net_conf);
2253  done:
2254         conn_reconfig_done(connection);
2255  out:
2256         mutex_unlock(&adm_ctx.resource->adm_mutex);
2257  finish:
2258         drbd_adm_finish(&adm_ctx, info, retcode);
2259         return 0;
2260 }
2261
2262 static void connection_to_info(struct connection_info *info,
2263                                struct drbd_connection *connection)
2264 {
2265         info->conn_connection_state = connection->cstate;
2266         info->conn_role = conn_highest_peer(connection);
2267 }
2268
2269 static void peer_device_to_info(struct peer_device_info *info,
2270                                 struct drbd_peer_device *peer_device)
2271 {
2272         struct drbd_device *device = peer_device->device;
2273
2274         info->peer_repl_state =
2275                 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2276         info->peer_disk_state = device->state.pdsk;
2277         info->peer_resync_susp_user = device->state.user_isp;
2278         info->peer_resync_susp_peer = device->state.peer_isp;
2279         info->peer_resync_susp_dependency = device->state.aftr_isp;
2280 }
2281
2282 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2283 {
2284         struct connection_info connection_info;
2285         enum drbd_notification_type flags;
2286         unsigned int peer_devices = 0;
2287         struct drbd_config_context adm_ctx;
2288         struct drbd_peer_device *peer_device;
2289         struct net_conf *old_net_conf, *new_net_conf = NULL;
2290         struct crypto crypto = { };
2291         struct drbd_resource *resource;
2292         struct drbd_connection *connection;
2293         enum drbd_ret_code retcode;
2294         int i;
2295         int err;
2296
2297         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2298
2299         if (!adm_ctx.reply_skb)
2300                 return retcode;
2301         if (retcode != NO_ERROR)
2302                 goto out;
2303         if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2304                 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2305                 retcode = ERR_INVALID_REQUEST;
2306                 goto out;
2307         }
2308
2309         /* No need for _rcu here. All reconfiguration is
2310          * strictly serialized on genl_lock(). We are protected against
2311          * concurrent reconfiguration/addition/deletion */
2312         for_each_resource(resource, &drbd_resources) {
2313                 for_each_connection(connection, resource) {
2314                         if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2315                             !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2316                                     connection->my_addr_len)) {
2317                                 retcode = ERR_LOCAL_ADDR;
2318                                 goto out;
2319                         }
2320
2321                         if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2322                             !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2323                                     connection->peer_addr_len)) {
2324                                 retcode = ERR_PEER_ADDR;
2325                                 goto out;
2326                         }
2327                 }
2328         }
2329
2330         mutex_lock(&adm_ctx.resource->adm_mutex);
2331         connection = first_connection(adm_ctx.resource);
2332         conn_reconfig_start(connection);
2333
2334         if (connection->cstate > C_STANDALONE) {
2335                 retcode = ERR_NET_CONFIGURED;
2336                 goto fail;
2337         }
2338
2339         /* allocation not in the IO path, drbdsetup / netlink process context */
2340         new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2341         if (!new_net_conf) {
2342                 retcode = ERR_NOMEM;
2343                 goto fail;
2344         }
2345
2346         set_net_conf_defaults(new_net_conf);
2347
2348         err = net_conf_from_attrs(new_net_conf, info);
2349         if (err && err != -ENOMSG) {
2350                 retcode = ERR_MANDATORY_TAG;
2351                 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2352                 goto fail;
2353         }
2354
2355         retcode = check_net_options(connection, new_net_conf);
2356         if (retcode != NO_ERROR)
2357                 goto fail;
2358
2359         retcode = alloc_crypto(&crypto, new_net_conf);
2360         if (retcode != NO_ERROR)
2361                 goto fail;
2362
2363         ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2364
2365         drbd_flush_workqueue(&connection->sender_work);
2366
2367         mutex_lock(&adm_ctx.resource->conf_update);
2368         old_net_conf = connection->net_conf;
2369         if (old_net_conf) {
2370                 retcode = ERR_NET_CONFIGURED;
2371                 mutex_unlock(&adm_ctx.resource->conf_update);
2372                 goto fail;
2373         }
2374         rcu_assign_pointer(connection->net_conf, new_net_conf);
2375
2376         conn_free_crypto(connection);
2377         connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2378         connection->integrity_tfm = crypto.integrity_tfm;
2379         connection->csums_tfm = crypto.csums_tfm;
2380         connection->verify_tfm = crypto.verify_tfm;
2381
2382         connection->my_addr_len = nla_len(adm_ctx.my_addr);
2383         memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2384         connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2385         memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2386
2387         idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2388                 peer_devices++;
2389         }
2390
2391         connection_to_info(&connection_info, connection);
2392         flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2393         mutex_lock(&notification_mutex);
2394         notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2395         idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2396                 struct peer_device_info peer_device_info;
2397
2398                 peer_device_to_info(&peer_device_info, peer_device);
2399                 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2400                 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2401         }
2402         mutex_unlock(&notification_mutex);
2403         mutex_unlock(&adm_ctx.resource->conf_update);
2404
2405         rcu_read_lock();
2406         idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2407                 struct drbd_device *device = peer_device->device;
2408                 device->send_cnt = 0;
2409                 device->recv_cnt = 0;
2410         }
2411         rcu_read_unlock();
2412
2413         retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2414
2415         conn_reconfig_done(connection);
2416         mutex_unlock(&adm_ctx.resource->adm_mutex);
2417         drbd_adm_finish(&adm_ctx, info, retcode);
2418         return 0;
2419
2420 fail:
2421         free_crypto(&crypto);
2422         kfree(new_net_conf);
2423
2424         conn_reconfig_done(connection);
2425         mutex_unlock(&adm_ctx.resource->adm_mutex);
2426 out:
2427         drbd_adm_finish(&adm_ctx, info, retcode);
2428         return 0;
2429 }
2430
2431 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2432 {
2433         enum drbd_state_rv rv;
2434
2435         rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2436                         force ? CS_HARD : 0);
2437
2438         switch (rv) {
2439         case SS_NOTHING_TO_DO:
2440                 break;
2441         case SS_ALREADY_STANDALONE:
2442                 return SS_SUCCESS;
2443         case SS_PRIMARY_NOP:
2444                 /* Our state checking code wants to see the peer outdated. */
2445                 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2446
2447                 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2448                         rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2449
2450                 break;
2451         case SS_CW_FAILED_BY_PEER:
2452                 /* The peer probably wants to see us outdated. */
2453                 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2454                                                         disk, D_OUTDATED), 0);
2455                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2456                         rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2457                                         CS_HARD);
2458                 }
2459                 break;
2460         default:;
2461                 /* no special handling necessary */
2462         }
2463
2464         if (rv >= SS_SUCCESS) {
2465                 enum drbd_state_rv rv2;
2466                 /* No one else can reconfigure the network while I am here.
2467                  * The state handling only uses drbd_thread_stop_nowait(),
2468                  * we want to really wait here until the receiver is no more.
2469                  */
2470                 drbd_thread_stop(&connection->receiver);
2471
2472                 /* Race breaker.  This additional state change request may be
2473                  * necessary, if this was a forced disconnect during a receiver
2474                  * restart.  We may have "killed" the receiver thread just
2475                  * after drbd_receiver() returned.  Typically, we should be
2476                  * C_STANDALONE already, now, and this becomes a no-op.
2477                  */
2478                 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2479                                 CS_VERBOSE | CS_HARD);
2480                 if (rv2 < SS_SUCCESS)
2481                         drbd_err(connection,
2482                                 "unexpected rv2=%d in conn_try_disconnect()\n",
2483                                 rv2);
2484                 /* Unlike in DRBD 9, the state engine has generated
2485                  * NOTIFY_DESTROY events before clearing connection->net_conf. */
2486         }
2487         return rv;
2488 }
2489
2490 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2491 {
2492         struct drbd_config_context adm_ctx;
2493         struct disconnect_parms parms;
2494         struct drbd_connection *connection;
2495         enum drbd_state_rv rv;
2496         enum drbd_ret_code retcode;
2497         int err;
2498
2499         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2500         if (!adm_ctx.reply_skb)
2501                 return retcode;
2502         if (retcode != NO_ERROR)
2503                 goto fail;
2504
2505         connection = adm_ctx.connection;
2506         memset(&parms, 0, sizeof(parms));
2507         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2508                 err = disconnect_parms_from_attrs(&parms, info);
2509                 if (err) {
2510                         retcode = ERR_MANDATORY_TAG;
2511                         drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2512                         goto fail;
2513                 }
2514         }
2515
2516         mutex_lock(&adm_ctx.resource->adm_mutex);
2517         rv = conn_try_disconnect(connection, parms.force_disconnect);
2518         if (rv < SS_SUCCESS)
2519                 retcode = rv;  /* FIXME: Type mismatch. */
2520         else
2521                 retcode = NO_ERROR;
2522         mutex_unlock(&adm_ctx.resource->adm_mutex);
2523  fail:
2524         drbd_adm_finish(&adm_ctx, info, retcode);
2525         return 0;
2526 }
2527
2528 void resync_after_online_grow(struct drbd_device *device)
2529 {
2530         int iass; /* I am sync source */
2531
2532         drbd_info(device, "Resync of new storage after online grow\n");
2533         if (device->state.role != device->state.peer)
2534                 iass = (device->state.role == R_PRIMARY);
2535         else
2536                 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2537
2538         if (iass)
2539                 drbd_start_resync(device, C_SYNC_SOURCE);
2540         else
2541                 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2542 }
2543
2544 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2545 {
2546         struct drbd_config_context adm_ctx;
2547         struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2548         struct resize_parms rs;
2549         struct drbd_device *device;
2550         enum drbd_ret_code retcode;
2551         enum determine_dev_size dd;
2552         bool change_al_layout = false;
2553         enum dds_flags ddsf;
2554         sector_t u_size;
2555         int err;
2556
2557         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2558         if (!adm_ctx.reply_skb)
2559                 return retcode;
2560         if (retcode != NO_ERROR)
2561                 goto finish;
2562
2563         mutex_lock(&adm_ctx.resource->adm_mutex);
2564         device = adm_ctx.device;
2565         if (!get_ldev(device)) {
2566                 retcode = ERR_NO_DISK;
2567                 goto fail;
2568         }
2569
2570         memset(&rs, 0, sizeof(struct resize_parms));
2571         rs.al_stripes = device->ldev->md.al_stripes;
2572         rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2573         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2574                 err = resize_parms_from_attrs(&rs, info);
2575                 if (err) {
2576                         retcode = ERR_MANDATORY_TAG;
2577                         drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2578                         goto fail_ldev;
2579                 }
2580         }
2581
2582         if (device->state.conn > C_CONNECTED) {
2583                 retcode = ERR_RESIZE_RESYNC;
2584                 goto fail_ldev;
2585         }
2586
2587         if (device->state.role == R_SECONDARY &&
2588             device->state.peer == R_SECONDARY) {
2589                 retcode = ERR_NO_PRIMARY;
2590                 goto fail_ldev;
2591         }
2592
2593         if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2594                 retcode = ERR_NEED_APV_93;
2595                 goto fail_ldev;
2596         }
2597
2598         rcu_read_lock();
2599         u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2600         rcu_read_unlock();
2601         if (u_size != (sector_t)rs.resize_size) {
2602                 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2603                 if (!new_disk_conf) {
2604                         retcode = ERR_NOMEM;
2605                         goto fail_ldev;
2606                 }
2607         }
2608
2609         if (device->ldev->md.al_stripes != rs.al_stripes ||
2610             device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2611                 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2612
2613                 if (al_size_k > (16 * 1024 * 1024)) {
2614                         retcode = ERR_MD_LAYOUT_TOO_BIG;
2615                         goto fail_ldev;
2616                 }
2617
2618                 if (al_size_k < MD_32kB_SECT/2) {
2619                         retcode = ERR_MD_LAYOUT_TOO_SMALL;
2620                         goto fail_ldev;
2621                 }
2622
2623                 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2624                         retcode = ERR_MD_LAYOUT_CONNECTED;
2625                         goto fail_ldev;
2626                 }
2627
2628                 change_al_layout = true;
2629         }
2630
2631         if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2632                 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2633
2634         if (new_disk_conf) {
2635                 mutex_lock(&device->resource->conf_update);
2636                 old_disk_conf = device->ldev->disk_conf;
2637                 *new_disk_conf = *old_disk_conf;
2638                 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2639                 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2640                 mutex_unlock(&device->resource->conf_update);
2641                 synchronize_rcu();
2642                 kfree(old_disk_conf);
2643         }
2644
2645         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2646         dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2647         drbd_md_sync(device);
2648         put_ldev(device);
2649         if (dd == DS_ERROR) {
2650                 retcode = ERR_NOMEM_BITMAP;
2651                 goto fail;
2652         } else if (dd == DS_ERROR_SPACE_MD) {
2653                 retcode = ERR_MD_LAYOUT_NO_FIT;
2654                 goto fail;
2655         } else if (dd == DS_ERROR_SHRINK) {
2656                 retcode = ERR_IMPLICIT_SHRINK;
2657                 goto fail;
2658         }
2659
2660         if (device->state.conn == C_CONNECTED) {
2661                 if (dd == DS_GREW)
2662                         set_bit(RESIZE_PENDING, &device->flags);
2663
2664                 drbd_send_uuids(first_peer_device(device));
2665                 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2666         }
2667
2668  fail:
2669         mutex_unlock(&adm_ctx.resource->adm_mutex);
2670  finish:
2671         drbd_adm_finish(&adm_ctx, info, retcode);
2672         return 0;
2673
2674  fail_ldev:
2675         put_ldev(device);
2676         goto fail;
2677 }
2678
2679 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2680 {
2681         struct drbd_config_context adm_ctx;
2682         enum drbd_ret_code retcode;
2683         struct res_opts res_opts;
2684         int err;
2685
2686         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2687         if (!adm_ctx.reply_skb)
2688                 return retcode;
2689         if (retcode != NO_ERROR)
2690                 goto fail;
2691
2692         res_opts = adm_ctx.resource->res_opts;
2693         if (should_set_defaults(info))
2694                 set_res_opts_defaults(&res_opts);
2695
2696         err = res_opts_from_attrs(&res_opts, info);
2697         if (err && err != -ENOMSG) {
2698                 retcode = ERR_MANDATORY_TAG;
2699                 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2700                 goto fail;
2701         }
2702
2703         mutex_lock(&adm_ctx.resource->adm_mutex);
2704         err = set_resource_options(adm_ctx.resource, &res_opts);
2705         if (err) {
2706                 retcode = ERR_INVALID_REQUEST;
2707                 if (err == -ENOMEM)
2708                         retcode = ERR_NOMEM;
2709         }
2710         mutex_unlock(&adm_ctx.resource->adm_mutex);
2711
2712 fail:
2713         drbd_adm_finish(&adm_ctx, info, retcode);
2714         return 0;
2715 }
2716
2717 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2718 {
2719         struct drbd_config_context adm_ctx;
2720         struct drbd_device *device;
2721         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2722
2723         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2724         if (!adm_ctx.reply_skb)
2725                 return retcode;
2726         if (retcode != NO_ERROR)
2727                 goto out;
2728
2729         device = adm_ctx.device;
2730         if (!get_ldev(device)) {
2731                 retcode = ERR_NO_DISK;
2732                 goto out;
2733         }
2734
2735         mutex_lock(&adm_ctx.resource->adm_mutex);
2736
2737         /* If there is still bitmap IO pending, probably because of a previous
2738          * resync just being finished, wait for it before requesting a new resync.
2739          * Also wait for it's after_state_ch(). */
2740         drbd_suspend_io(device);
2741         wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2742         drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2743
2744         /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2745          * D_INCONSISTENT, and set all bits in the bitmap.  Otherwise,
2746          * try to start a resync handshake as sync target for full sync.
2747          */
2748         if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2749                 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
2750                 if (retcode >= SS_SUCCESS) {
2751                         if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2752                                 "set_n_write from invalidate", BM_LOCKED_MASK))
2753                                 retcode = ERR_IO_MD_DISK;
2754                 }
2755         } else
2756                 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2757         drbd_resume_io(device);
2758         mutex_unlock(&adm_ctx.resource->adm_mutex);
2759         put_ldev(device);
2760 out:
2761         drbd_adm_finish(&adm_ctx, info, retcode);
2762         return 0;
2763 }
2764
2765 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2766                 union drbd_state mask, union drbd_state val)
2767 {
2768         struct drbd_config_context adm_ctx;
2769         enum drbd_ret_code retcode;
2770
2771         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2772         if (!adm_ctx.reply_skb)
2773                 return retcode;
2774         if (retcode != NO_ERROR)
2775                 goto out;
2776
2777         mutex_lock(&adm_ctx.resource->adm_mutex);
2778         retcode = drbd_request_state(adm_ctx.device, mask, val);
2779         mutex_unlock(&adm_ctx.resource->adm_mutex);
2780 out:
2781         drbd_adm_finish(&adm_ctx, info, retcode);
2782         return 0;
2783 }
2784
2785 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
2786 {
2787         int rv;
2788
2789         rv = drbd_bmio_set_n_write(device);
2790         drbd_suspend_al(device);
2791         return rv;
2792 }
2793
2794 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2795 {
2796         struct drbd_config_context adm_ctx;
2797         int retcode; /* drbd_ret_code, drbd_state_rv */
2798         struct drbd_device *device;
2799
2800         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2801         if (!adm_ctx.reply_skb)
2802                 return retcode;
2803         if (retcode != NO_ERROR)
2804                 goto out;
2805
2806         device = adm_ctx.device;
2807         if (!get_ldev(device)) {
2808                 retcode = ERR_NO_DISK;
2809                 goto out;
2810         }
2811
2812         mutex_lock(&adm_ctx.resource->adm_mutex);
2813
2814         /* If there is still bitmap IO pending, probably because of a previous
2815          * resync just being finished, wait for it before requesting a new resync.
2816          * Also wait for it's after_state_ch(). */
2817         drbd_suspend_io(device);
2818         wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2819         drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2820
2821         /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2822          * in the bitmap.  Otherwise, try to start a resync handshake
2823          * as sync source for full sync.
2824          */
2825         if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
2826                 /* The peer will get a resync upon connect anyways. Just make that
2827                    into a full resync. */
2828                 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
2829                 if (retcode >= SS_SUCCESS) {
2830                         if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
2831                                 "set_n_write from invalidate_peer",
2832                                 BM_LOCKED_SET_ALLOWED))
2833                                 retcode = ERR_IO_MD_DISK;
2834                 }
2835         } else
2836                 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
2837         drbd_resume_io(device);
2838         mutex_unlock(&adm_ctx.resource->adm_mutex);
2839         put_ldev(device);
2840 out:
2841         drbd_adm_finish(&adm_ctx, info, retcode);
2842         return 0;
2843 }
2844
2845 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2846 {
2847         struct drbd_config_context adm_ctx;
2848         enum drbd_ret_code retcode;
2849
2850         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2851         if (!adm_ctx.reply_skb)
2852                 return retcode;
2853         if (retcode != NO_ERROR)
2854                 goto out;
2855
2856         mutex_lock(&adm_ctx.resource->adm_mutex);
2857         if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2858                 retcode = ERR_PAUSE_IS_SET;
2859         mutex_unlock(&adm_ctx.resource->adm_mutex);
2860 out:
2861         drbd_adm_finish(&adm_ctx, info, retcode);
2862         return 0;
2863 }
2864
2865 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2866 {
2867         struct drbd_config_context adm_ctx;
2868         union drbd_dev_state s;
2869         enum drbd_ret_code retcode;
2870
2871         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2872         if (!adm_ctx.reply_skb)
2873                 return retcode;
2874         if (retcode != NO_ERROR)
2875                 goto out;
2876
2877         mutex_lock(&adm_ctx.resource->adm_mutex);
2878         if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2879                 s = adm_ctx.device->state;
2880                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2881                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2882                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2883                 } else {
2884                         retcode = ERR_PAUSE_IS_CLEAR;
2885                 }
2886         }
2887         mutex_unlock(&adm_ctx.resource->adm_mutex);
2888 out:
2889         drbd_adm_finish(&adm_ctx, info, retcode);
2890         return 0;
2891 }
2892
2893 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2894 {
2895         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2896 }
2897
2898 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2899 {
2900         struct drbd_config_context adm_ctx;
2901         struct drbd_device *device;
2902         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2903
2904         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2905         if (!adm_ctx.reply_skb)
2906                 return retcode;
2907         if (retcode != NO_ERROR)
2908                 goto out;
2909
2910         mutex_lock(&adm_ctx.resource->adm_mutex);
2911         device = adm_ctx.device;
2912         if (test_bit(NEW_CUR_UUID, &device->flags)) {
2913                 drbd_uuid_new_current(device);
2914                 clear_bit(NEW_CUR_UUID, &device->flags);
2915         }
2916         drbd_suspend_io(device);
2917         retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2918         if (retcode == SS_SUCCESS) {
2919                 if (device->state.conn < C_CONNECTED)
2920                         tl_clear(first_peer_device(device)->connection);
2921                 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
2922                         tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
2923         }
2924         drbd_resume_io(device);
2925         mutex_unlock(&adm_ctx.resource->adm_mutex);
2926 out:
2927         drbd_adm_finish(&adm_ctx, info, retcode);
2928         return 0;
2929 }
2930
2931 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2932 {
2933         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2934 }
2935
2936 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
2937                                     struct drbd_resource *resource,
2938                                     struct drbd_connection *connection,
2939                                     struct drbd_device *device)
2940 {
2941         struct nlattr *nla;
2942         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2943         if (!nla)
2944                 goto nla_put_failure;
2945         if (device &&
2946             nla_put_u32(skb, T_ctx_volume, device->vnr))
2947                 goto nla_put_failure;
2948         if (nla_put_string(skb, T_ctx_resource_name, resource->name))
2949                 goto nla_put_failure;
2950         if (connection) {
2951                 if (connection->my_addr_len &&
2952                     nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
2953                         goto nla_put_failure;
2954                 if (connection->peer_addr_len &&
2955                     nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
2956                         goto nla_put_failure;
2957         }
2958         nla_nest_end(skb, nla);
2959         return 0;
2960
2961 nla_put_failure:
2962         if (nla)
2963                 nla_nest_cancel(skb, nla);
2964         return -EMSGSIZE;
2965 }
2966
2967 /*
2968  * Return the connection of @resource if @resource has exactly one connection.
2969  */
2970 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
2971 {
2972         struct list_head *connections = &resource->connections;
2973
2974         if (list_empty(connections) || connections->next->next != connections)
2975                 return NULL;
2976         return list_first_entry(&resource->connections, struct drbd_connection, connections);
2977 }
2978
2979 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
2980                 const struct sib_info *sib)
2981 {
2982         struct drbd_resource *resource = device->resource;
2983         struct state_info *si = NULL; /* for sizeof(si->member); */
2984         struct nlattr *nla;
2985         int got_ldev;
2986         int err = 0;
2987         int exclude_sensitive;
2988
2989         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2990          * to.  So we better exclude_sensitive information.
2991          *
2992          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2993          * in the context of the requesting user process. Exclude sensitive
2994          * information, unless current has superuser.
2995          *
2996          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2997          * relies on the current implementation of netlink_dump(), which
2998          * executes the dump callback successively from netlink_recvmsg(),
2999          * always in the context of the receiving process */
3000         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3001
3002         got_ldev = get_ldev(device);
3003
3004         /* We need to add connection name and volume number information still.
3005          * Minor number is in drbd_genlmsghdr. */
3006         if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3007                 goto nla_put_failure;
3008
3009         if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3010                 goto nla_put_failure;
3011
3012         rcu_read_lock();
3013         if (got_ldev) {
3014                 struct disk_conf *disk_conf;
3015
3016                 disk_conf = rcu_dereference(device->ldev->disk_conf);
3017                 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3018         }
3019         if (!err) {
3020                 struct net_conf *nc;
3021
3022                 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3023                 if (nc)
3024                         err = net_conf_to_skb(skb, nc, exclude_sensitive);
3025         }
3026         rcu_read_unlock();
3027         if (err)
3028                 goto nla_put_failure;
3029
3030         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
3031         if (!nla)
3032                 goto nla_put_failure;
3033         if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3034             nla_put_u32(skb, T_current_state, device->state.i) ||
3035             nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
3036             nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
3037             nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
3038             nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
3039             nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
3040             nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
3041             nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3042             nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3043             nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3044             nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3045             nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3046                 goto nla_put_failure;
3047
3048         if (got_ldev) {
3049                 int err;
3050
3051                 spin_lock_irq(&device->ldev->md.uuid_lock);
3052                 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3053                 spin_unlock_irq(&device->ldev->md.uuid_lock);
3054
3055                 if (err)
3056                         goto nla_put_failure;
3057
3058                 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3059                     nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
3060                     nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
3061                         goto nla_put_failure;
3062                 if (C_SYNC_SOURCE <= device->state.conn &&
3063                     C_PAUSED_SYNC_T >= device->state.conn) {
3064                         if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
3065                             nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
3066                                 goto nla_put_failure;
3067                 }
3068         }
3069
3070         if (sib) {
3071                 switch(sib->sib_reason) {
3072                 case SIB_SYNC_PROGRESS:
3073                 case SIB_GET_STATUS_REPLY:
3074                         break;
3075                 case SIB_STATE_CHANGE:
3076                         if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3077                             nla_put_u32(skb, T_new_state, sib->ns.i))
3078                                 goto nla_put_failure;
3079                         break;
3080                 case SIB_HELPER_POST:
3081                         if (nla_put_u32(skb, T_helper_exit_code,
3082                                         sib->helper_exit_code))
3083                                 goto nla_put_failure;
3084                         /* fall through */
3085                 case SIB_HELPER_PRE:
3086                         if (nla_put_string(skb, T_helper, sib->helper_name))
3087                                 goto nla_put_failure;
3088                         break;
3089                 }
3090         }
3091         nla_nest_end(skb, nla);
3092
3093         if (0)
3094 nla_put_failure:
3095                 err = -EMSGSIZE;
3096         if (got_ldev)
3097                 put_ldev(device);
3098         return err;
3099 }
3100
3101 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3102 {
3103         struct drbd_config_context adm_ctx;
3104         enum drbd_ret_code retcode;
3105         int err;
3106
3107         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3108         if (!adm_ctx.reply_skb)
3109                 return retcode;
3110         if (retcode != NO_ERROR)
3111                 goto out;
3112
3113         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3114         if (err) {
3115                 nlmsg_free(adm_ctx.reply_skb);
3116                 return err;
3117         }
3118 out:
3119         drbd_adm_finish(&adm_ctx, info, retcode);
3120         return 0;
3121 }
3122
3123 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3124 {
3125         struct drbd_device *device;
3126         struct drbd_genlmsghdr *dh;
3127         struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3128         struct drbd_resource *resource = NULL;
3129         struct drbd_resource *tmp;
3130         unsigned volume = cb->args[1];
3131
3132         /* Open coded, deferred, iteration:
3133          * for_each_resource_safe(resource, tmp, &drbd_resources) {
3134          *      connection = "first connection of resource or undefined";
3135          *      idr_for_each_entry(&resource->devices, device, i) {
3136          *        ...
3137          *      }
3138          * }
3139          * where resource is cb->args[0];
3140          * and i is cb->args[1];
3141          *
3142          * cb->args[2] indicates if we shall loop over all resources,
3143          * or just dump all volumes of a single resource.
3144          *
3145          * This may miss entries inserted after this dump started,
3146          * or entries deleted before they are reached.
3147          *
3148          * We need to make sure the device won't disappear while
3149          * we are looking at it, and revalidate our iterators
3150          * on each iteration.
3151          */
3152
3153         /* synchronize with conn_create()/drbd_destroy_connection() */
3154         rcu_read_lock();
3155         /* revalidate iterator position */
3156         for_each_resource_rcu(tmp, &drbd_resources) {
3157                 if (pos == NULL) {
3158                         /* first iteration */
3159                         pos = tmp;
3160                         resource = pos;
3161                         break;
3162                 }
3163                 if (tmp == pos) {
3164                         resource = pos;
3165                         break;
3166                 }
3167         }
3168         if (resource) {
3169 next_resource:
3170                 device = idr_get_next(&resource->devices, &volume);
3171                 if (!device) {
3172                         /* No more volumes to dump on this resource.
3173                          * Advance resource iterator. */
3174                         pos = list_entry_rcu(resource->resources.next,
3175                                              struct drbd_resource, resources);
3176                         /* Did we dump any volume of this resource yet? */
3177                         if (volume != 0) {
3178                                 /* If we reached the end of the list,
3179                                  * or only a single resource dump was requested,
3180                                  * we are done. */
3181                                 if (&pos->resources == &drbd_resources || cb->args[2])
3182                                         goto out;
3183                                 volume = 0;
3184                                 resource = pos;
3185                                 goto next_resource;
3186                         }
3187                 }
3188
3189                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3190                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
3191                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3192                 if (!dh)
3193                         goto out;
3194
3195                 if (!device) {
3196                         /* This is a connection without a single volume.
3197                          * Suprisingly enough, it may have a network
3198                          * configuration. */
3199                         struct drbd_connection *connection;
3200
3201                         dh->minor = -1U;
3202                         dh->ret_code = NO_ERROR;
3203                         connection = the_only_connection(resource);
3204                         if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3205                                 goto cancel;
3206                         if (connection) {
3207                                 struct net_conf *nc;
3208
3209                                 nc = rcu_dereference(connection->net_conf);
3210                                 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3211                                         goto cancel;
3212                         }
3213                         goto done;
3214                 }
3215
3216                 D_ASSERT(device, device->vnr == volume);
3217                 D_ASSERT(device, device->resource == resource);
3218
3219                 dh->minor = device_to_minor(device);
3220                 dh->ret_code = NO_ERROR;
3221
3222                 if (nla_put_status_info(skb, device, NULL)) {
3223 cancel:
3224                         genlmsg_cancel(skb, dh);
3225                         goto out;
3226                 }
3227 done:
3228                 genlmsg_end(skb, dh);
3229         }
3230
3231 out:
3232         rcu_read_unlock();
3233         /* where to start the next iteration */
3234         cb->args[0] = (long)pos;
3235         cb->args[1] = (pos == resource) ? volume + 1 : 0;
3236
3237         /* No more resources/volumes/minors found results in an empty skb.
3238          * Which will terminate the dump. */
3239         return skb->len;
3240 }
3241
3242 /*
3243  * Request status of all resources, or of all volumes within a single resource.
3244  *
3245  * This is a dump, as the answer may not fit in a single reply skb otherwise.
3246  * Which means we cannot use the family->attrbuf or other such members, because
3247  * dump is NOT protected by the genl_lock().  During dump, we only have access
3248  * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3249  *
3250  * Once things are setup properly, we call into get_one_status().
3251  */
3252 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3253 {
3254         const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3255         struct nlattr *nla;
3256         const char *resource_name;
3257         struct drbd_resource *resource;
3258         int maxtype;
3259
3260         /* Is this a followup call? */
3261         if (cb->args[0]) {
3262                 /* ... of a single resource dump,
3263                  * and the resource iterator has been advanced already? */
3264                 if (cb->args[2] && cb->args[2] != cb->args[0])
3265                         return 0; /* DONE. */
3266                 goto dump;
3267         }
3268
3269         /* First call (from netlink_dump_start).  We need to figure out
3270          * which resource(s) the user wants us to dump. */
3271         nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
3272                         nlmsg_attrlen(cb->nlh, hdrlen),
3273                         DRBD_NLA_CFG_CONTEXT);
3274
3275         /* No explicit context given.  Dump all. */
3276         if (!nla)
3277                 goto dump;
3278         maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3279         nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
3280         if (IS_ERR(nla))
3281                 return PTR_ERR(nla);
3282         /* context given, but no name present? */
3283         if (!nla)
3284                 return -EINVAL;
3285         resource_name = nla_data(nla);
3286         if (!*resource_name)
3287                 return -ENODEV;
3288         resource = drbd_find_resource(resource_name);
3289         if (!resource)
3290                 return -ENODEV;
3291
3292         kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
3293
3294         /* prime iterators, and set "filter" mode mark:
3295          * only dump this connection. */
3296         cb->args[0] = (long)resource;
3297         /* cb->args[1] = 0; passed in this way. */
3298         cb->args[2] = (long)resource;
3299
3300 dump:
3301         return get_one_status(skb, cb);
3302 }
3303
3304 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
3305 {
3306         struct drbd_config_context adm_ctx;
3307         enum drbd_ret_code retcode;
3308         struct timeout_parms tp;
3309         int err;
3310
3311         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3312         if (!adm_ctx.reply_skb)
3313                 return retcode;
3314         if (retcode != NO_ERROR)
3315                 goto out;
3316
3317         tp.timeout_type =
3318                 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
3319                 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
3320                 UT_DEFAULT;
3321
3322         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
3323         if (err) {
3324                 nlmsg_free(adm_ctx.reply_skb);
3325                 return err;
3326         }
3327 out:
3328         drbd_adm_finish(&adm_ctx, info, retcode);
3329         return 0;
3330 }
3331
3332 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
3333 {
3334         struct drbd_config_context adm_ctx;
3335         struct drbd_device *device;
3336         enum drbd_ret_code retcode;
3337         struct start_ov_parms parms;
3338
3339         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3340         if (!adm_ctx.reply_skb)
3341                 return retcode;
3342         if (retcode != NO_ERROR)
3343                 goto out;
3344
3345         device = adm_ctx.device;
3346
3347         /* resume from last known position, if possible */
3348         parms.ov_start_sector = device->ov_start_sector;
3349         parms.ov_stop_sector = ULLONG_MAX;
3350         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
3351                 int err = start_ov_parms_from_attrs(&parms, info);
3352                 if (err) {
3353                         retcode = ERR_MANDATORY_TAG;
3354                         drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3355                         goto out;
3356                 }
3357         }
3358         mutex_lock(&adm_ctx.resource->adm_mutex);
3359
3360         /* w_make_ov_request expects position to be aligned */
3361         device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3362         device->ov_stop_sector = parms.ov_stop_sector;
3363
3364         /* If there is still bitmap IO pending, e.g. previous resync or verify
3365          * just being finished, wait for it before requesting a new resync. */
3366         drbd_suspend_io(device);
3367         wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3368         retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
3369         drbd_resume_io(device);
3370
3371         mutex_unlock(&adm_ctx.resource->adm_mutex);
3372 out:
3373         drbd_adm_finish(&adm_ctx, info, retcode);
3374         return 0;
3375 }
3376
3377
3378 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3379 {
3380         struct drbd_config_context adm_ctx;
3381         struct drbd_device *device;
3382         enum drbd_ret_code retcode;
3383         int skip_initial_sync = 0;
3384         int err;
3385         struct new_c_uuid_parms args;
3386
3387         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3388         if (!adm_ctx.reply_skb)
3389                 return retcode;
3390         if (retcode != NO_ERROR)
3391                 goto out_nolock;
3392
3393         device = adm_ctx.device;
3394         memset(&args, 0, sizeof(args));
3395         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3396                 err = new_c_uuid_parms_from_attrs(&args, info);
3397                 if (err) {
3398                         retcode = ERR_MANDATORY_TAG;
3399                         drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3400                         goto out_nolock;
3401                 }
3402         }
3403
3404         mutex_lock(&adm_ctx.resource->adm_mutex);
3405         mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
3406
3407         if (!get_ldev(device)) {
3408                 retcode = ERR_NO_DISK;
3409                 goto out;
3410         }
3411
3412         /* this is "skip initial sync", assume to be clean */
3413         if (device->state.conn == C_CONNECTED &&
3414             first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3415             device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3416                 drbd_info(device, "Preparing to skip initial sync\n");
3417                 skip_initial_sync = 1;
3418         } else if (device->state.conn != C_STANDALONE) {
3419                 retcode = ERR_CONNECTED;
3420                 goto out_dec;
3421         }
3422
3423         drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3424         drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
3425
3426         if (args.clear_bm) {
3427                 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3428                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3429                 if (err) {
3430                         drbd_err(device, "Writing bitmap failed with %d\n", err);
3431                         retcode = ERR_IO_MD_DISK;
3432                 }
3433                 if (skip_initial_sync) {
3434                         drbd_send_uuids_skip_initial_sync(first_peer_device(device));
3435                         _drbd_uuid_set(device, UI_BITMAP, 0);
3436                         drbd_print_uuids(device, "cleared bitmap UUID");
3437                         spin_lock_irq(&device->resource->req_lock);
3438                         _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3439                                         CS_VERBOSE, NULL);
3440                         spin_unlock_irq(&device->resource->req_lock);
3441                 }
3442         }
3443
3444         drbd_md_sync(device);
3445 out_dec:
3446         put_ldev(device);
3447 out:
3448         mutex_unlock(device->state_mutex);
3449         mutex_unlock(&adm_ctx.resource->adm_mutex);
3450 out_nolock:
3451         drbd_adm_finish(&adm_ctx, info, retcode);
3452         return 0;
3453 }
3454
3455 static enum drbd_ret_code
3456 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
3457 {
3458         const char *name = adm_ctx->resource_name;
3459         if (!name || !name[0]) {
3460                 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
3461                 return ERR_MANDATORY_TAG;
3462         }
3463         /* if we want to use these in sysfs/configfs/debugfs some day,
3464          * we must not allow slashes */
3465         if (strchr(name, '/')) {
3466                 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
3467                 return ERR_INVALID_REQUEST;
3468         }
3469         return NO_ERROR;
3470 }
3471
3472 static void resource_to_info(struct resource_info *info,
3473                              struct drbd_resource *resource)
3474 {
3475         info->res_role = conn_highest_role(first_connection(resource));
3476         info->res_susp = resource->susp;
3477         info->res_susp_nod = resource->susp_nod;
3478         info->res_susp_fen = resource->susp_fen;
3479 }
3480
3481 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3482 {
3483         struct drbd_connection *connection;
3484         struct drbd_config_context adm_ctx;
3485         enum drbd_ret_code retcode;
3486         struct res_opts res_opts;
3487         int err;
3488
3489         retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
3490         if (!adm_ctx.reply_skb)
3491                 return retcode;
3492         if (retcode != NO_ERROR)
3493                 goto out;
3494
3495         set_res_opts_defaults(&res_opts);
3496         err = res_opts_from_attrs(&res_opts, info);
3497         if (err && err != -ENOMSG) {
3498                 retcode = ERR_MANDATORY_TAG;
3499                 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
3500                 goto out;
3501         }
3502
3503         retcode = drbd_check_resource_name(&adm_ctx);
3504         if (retcode != NO_ERROR)
3505                 goto out;
3506
3507         if (adm_ctx.resource) {
3508                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3509                         retcode = ERR_INVALID_REQUEST;
3510                         drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
3511                 }
3512                 /* else: still NO_ERROR */
3513                 goto out;
3514         }
3515
3516         /* not yet safe for genl_family.parallel_ops */
3517         mutex_lock(&resources_mutex);
3518         connection = conn_create(adm_ctx.resource_name, &res_opts);
3519         mutex_unlock(&resources_mutex);
3520
3521         if (connection) {
3522                 struct resource_info resource_info;
3523
3524                 mutex_lock(&notification_mutex);
3525                 resource_to_info(&resource_info, connection->resource);
3526                 notify_resource_state(NULL, 0, connection->resource,
3527                                       &resource_info, NOTIFY_CREATE);
3528                 mutex_unlock(&notification_mutex);
3529         } else
3530                 retcode = ERR_NOMEM;
3531
3532 out:
3533         drbd_adm_finish(&adm_ctx, info, retcode);
3534         return 0;
3535 }
3536
3537 static void device_to_info(struct device_info *info,
3538                            struct drbd_device *device)
3539 {
3540         info->dev_disk_state = device->state.disk;
3541 }
3542
3543
3544 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
3545 {
3546         struct drbd_config_context adm_ctx;
3547         struct drbd_genlmsghdr *dh = info->userhdr;
3548         enum drbd_ret_code retcode;
3549
3550         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3551         if (!adm_ctx.reply_skb)
3552                 return retcode;
3553         if (retcode != NO_ERROR)
3554                 goto out;
3555
3556         if (dh->minor > MINORMASK) {
3557                 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
3558                 retcode = ERR_INVALID_REQUEST;
3559                 goto out;
3560         }
3561         if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3562                 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
3563                 retcode = ERR_INVALID_REQUEST;
3564                 goto out;
3565         }
3566
3567         /* drbd_adm_prepare made sure already
3568          * that first_peer_device(device)->connection and device->vnr match the request. */
3569         if (adm_ctx.device) {
3570                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3571                         retcode = ERR_MINOR_OR_VOLUME_EXISTS;
3572                 /* else: still NO_ERROR */
3573                 goto out;
3574         }
3575
3576         mutex_lock(&adm_ctx.resource->adm_mutex);
3577         retcode = drbd_create_device(&adm_ctx, dh->minor);
3578         if (retcode == NO_ERROR) {
3579                 struct drbd_device *device;
3580                 struct drbd_peer_device *peer_device;
3581                 struct device_info info;
3582                 unsigned int peer_devices = 0;
3583                 enum drbd_notification_type flags;
3584
3585                 device = minor_to_device(dh->minor);
3586                 for_each_peer_device(peer_device, device) {
3587                         if (!has_net_conf(peer_device->connection))
3588                                 continue;
3589                         peer_devices++;
3590                 }
3591
3592                 device_to_info(&info, device);
3593                 mutex_lock(&notification_mutex);
3594                 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
3595                 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
3596                 for_each_peer_device(peer_device, device) {
3597                         struct peer_device_info peer_device_info;
3598
3599                         if (!has_net_conf(peer_device->connection))
3600                                 continue;
3601                         peer_device_to_info(&peer_device_info, peer_device);
3602                         flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
3603                         notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
3604                                                  NOTIFY_CREATE | flags);
3605                 }
3606                 mutex_unlock(&notification_mutex);
3607         }
3608         mutex_unlock(&adm_ctx.resource->adm_mutex);
3609 out:
3610         drbd_adm_finish(&adm_ctx, info, retcode);
3611         return 0;
3612 }
3613
3614 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
3615 {
3616         struct drbd_peer_device *peer_device;
3617
3618         if (device->state.disk == D_DISKLESS &&
3619             /* no need to be device->state.conn == C_STANDALONE &&
3620              * we may want to delete a minor from a live replication group.
3621              */
3622             device->state.role == R_SECONDARY) {
3623                 struct drbd_connection *connection =
3624                         first_connection(device->resource);
3625
3626                 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
3627                                     CS_VERBOSE + CS_WAIT_COMPLETE);
3628
3629                 /* If the state engine hasn't stopped the sender thread yet, we
3630                  * need to flush the sender work queue before generating the
3631                  * DESTROY events here. */
3632                 if (get_t_state(&connection->worker) == RUNNING)
3633                         drbd_flush_workqueue(&connection->sender_work);
3634
3635                 mutex_lock(&notification_mutex);
3636                 for_each_peer_device(peer_device, device) {
3637                         if (!has_net_conf(peer_device->connection))
3638                                 continue;
3639                         notify_peer_device_state(NULL, 0, peer_device, NULL,
3640                                                  NOTIFY_DESTROY | NOTIFY_CONTINUES);
3641                 }
3642                 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
3643                 mutex_unlock(&notification_mutex);
3644
3645                 drbd_delete_device(device);
3646                 return NO_ERROR;
3647         } else
3648                 return ERR_MINOR_CONFIGURED;
3649 }
3650
3651 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
3652 {
3653         struct drbd_config_context adm_ctx;
3654         enum drbd_ret_code retcode;
3655
3656         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3657         if (!adm_ctx.reply_skb)
3658                 return retcode;
3659         if (retcode != NO_ERROR)
3660                 goto out;
3661
3662         mutex_lock(&adm_ctx.resource->adm_mutex);
3663         retcode = adm_del_minor(adm_ctx.device);
3664         mutex_unlock(&adm_ctx.resource->adm_mutex);
3665 out:
3666         drbd_adm_finish(&adm_ctx, info, retcode);
3667         return 0;
3668 }
3669
3670 static int adm_del_resource(struct drbd_resource *resource)
3671 {
3672         struct drbd_connection *connection;
3673
3674         for_each_connection(connection, resource) {
3675                 if (connection->cstate > C_STANDALONE)
3676                         return ERR_NET_CONFIGURED;
3677         }
3678         if (!idr_is_empty(&resource->devices))
3679                 return ERR_RES_IN_USE;
3680
3681         /* The state engine has stopped the sender thread, so we don't
3682          * need to flush the sender work queue before generating the
3683          * DESTROY event here. */
3684         mutex_lock(&notification_mutex);
3685         notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
3686         mutex_unlock(&notification_mutex);
3687
3688         mutex_lock(&resources_mutex);
3689         list_del_rcu(&resource->resources);
3690         mutex_unlock(&resources_mutex);
3691         /* Make sure all threads have actually stopped: state handling only
3692          * does drbd_thread_stop_nowait(). */
3693         list_for_each_entry(connection, &resource->connections, connections)
3694                 drbd_thread_stop(&connection->worker);
3695         synchronize_rcu();
3696         drbd_free_resource(resource);
3697         return NO_ERROR;
3698 }
3699
3700 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3701 {
3702         struct drbd_config_context adm_ctx;
3703         struct drbd_resource *resource;
3704         struct drbd_connection *connection;
3705         struct drbd_device *device;
3706         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3707         unsigned i;
3708
3709         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3710         if (!adm_ctx.reply_skb)
3711                 return retcode;
3712         if (retcode != NO_ERROR)
3713                 goto finish;
3714
3715         resource = adm_ctx.resource;
3716         mutex_lock(&resource->adm_mutex);
3717         /* demote */
3718         for_each_connection(connection, resource) {
3719                 struct drbd_peer_device *peer_device;
3720
3721                 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
3722                         retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
3723                         if (retcode < SS_SUCCESS) {
3724                                 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
3725                                 goto out;
3726                         }
3727                 }
3728
3729                 retcode = conn_try_disconnect(connection, 0);
3730                 if (retcode < SS_SUCCESS) {
3731                         drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
3732                         goto out;
3733                 }
3734         }
3735
3736         /* detach */
3737         idr_for_each_entry(&resource->devices, device, i) {
3738                 retcode = adm_detach(device, 0);
3739                 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3740                         drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
3741                         goto out;
3742                 }
3743         }
3744
3745         /* delete volumes */
3746         idr_for_each_entry(&resource->devices, device, i) {
3747                 retcode = adm_del_minor(device);
3748                 if (retcode != NO_ERROR) {
3749                         /* "can not happen" */
3750                         drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
3751                         goto out;
3752                 }
3753         }
3754
3755         retcode = adm_del_resource(resource);
3756 out:
3757         mutex_unlock(&resource->adm_mutex);
3758 finish:
3759         drbd_adm_finish(&adm_ctx, info, retcode);
3760         return 0;
3761 }
3762
3763 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3764 {
3765         struct drbd_config_context adm_ctx;
3766         struct drbd_resource *resource;
3767         enum drbd_ret_code retcode;
3768
3769         retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
3770         if (!adm_ctx.reply_skb)
3771                 return retcode;
3772         if (retcode != NO_ERROR)
3773                 goto finish;
3774         resource = adm_ctx.resource;
3775
3776         mutex_lock(&resource->adm_mutex);
3777         retcode = adm_del_resource(resource);
3778         mutex_unlock(&resource->adm_mutex);
3779 finish:
3780         drbd_adm_finish(&adm_ctx, info, retcode);
3781         return 0;
3782 }
3783
3784 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
3785 {
3786         struct sk_buff *msg;
3787         struct drbd_genlmsghdr *d_out;
3788         unsigned seq;
3789         int err = -ENOMEM;
3790
3791         seq = atomic_inc_return(&drbd_genl_seq);
3792         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3793         if (!msg)
3794                 goto failed;
3795
3796         err = -EMSGSIZE;
3797         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3798         if (!d_out) /* cannot happen, but anyways. */
3799                 goto nla_put_failure;
3800         d_out->minor = device_to_minor(device);
3801         d_out->ret_code = NO_ERROR;
3802
3803         if (nla_put_status_info(msg, device, sib))
3804                 goto nla_put_failure;
3805         genlmsg_end(msg, d_out);
3806         err = drbd_genl_multicast_events(msg, 0);
3807         /* msg has been consumed or freed in netlink_broadcast() */
3808         if (err && err != -ESRCH)
3809                 goto failed;
3810
3811         return;
3812
3813 nla_put_failure:
3814         nlmsg_free(msg);
3815 failed:
3816         drbd_err(device, "Error %d while broadcasting event. "
3817                         "Event seq:%u sib_reason:%u\n",
3818                         err, seq, sib->sib_reason);
3819 }
3820
3821 static void device_to_statistics(struct device_statistics *s,
3822                                  struct drbd_device *device)
3823 {
3824         memset(s, 0, sizeof(*s));
3825         s->dev_upper_blocked = !may_inc_ap_bio(device);
3826         if (get_ldev(device)) {
3827                 struct drbd_md *md = &device->ldev->md;
3828                 u64 *history_uuids = (u64 *)s->history_uuids;
3829                 struct request_queue *q;
3830                 int n;
3831
3832                 spin_lock_irq(&md->uuid_lock);
3833                 s->dev_current_uuid = md->uuid[UI_CURRENT];
3834                 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3835                 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3836                         history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3837                 for (; n < HISTORY_UUIDS; n++)
3838                         history_uuids[n] = 0;
3839                 s->history_uuids_len = HISTORY_UUIDS;
3840                 spin_unlock_irq(&md->uuid_lock);
3841
3842                 s->dev_disk_flags = md->flags;
3843                 q = bdev_get_queue(device->ldev->backing_bdev);
3844                 s->dev_lower_blocked =
3845                         bdi_congested(&q->backing_dev_info,
3846                                       (1 << WB_async_congested) |
3847                                       (1 << WB_sync_congested));
3848                 put_ldev(device);
3849         }
3850         s->dev_size = drbd_get_capacity(device->this_bdev);
3851         s->dev_read = device->read_cnt;
3852         s->dev_write = device->writ_cnt;
3853         s->dev_al_writes = device->al_writ_cnt;
3854         s->dev_bm_writes = device->bm_writ_cnt;
3855         s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3856         s->dev_lower_pending = atomic_read(&device->local_cnt);
3857         s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3858         s->dev_exposed_data_uuid = device->ed_uuid;
3859 }
3860
3861 enum mdf_peer_flag {
3862         MDF_PEER_CONNECTED =    1 << 0,
3863         MDF_PEER_OUTDATED =     1 << 1,
3864         MDF_PEER_FENCING =      1 << 2,
3865         MDF_PEER_FULL_SYNC =    1 << 3,
3866 };
3867
3868 static void peer_device_to_statistics(struct peer_device_statistics *s,
3869                                       struct drbd_peer_device *peer_device)
3870 {
3871         struct drbd_device *device = peer_device->device;
3872
3873         memset(s, 0, sizeof(*s));
3874         s->peer_dev_received = device->recv_cnt;
3875         s->peer_dev_sent = device->send_cnt;
3876         s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3877                               atomic_read(&device->rs_pending_cnt);
3878         s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3879         s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3880         s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3881         if (get_ldev(device)) {
3882                 struct drbd_md *md = &device->ldev->md;
3883
3884                 spin_lock_irq(&md->uuid_lock);
3885                 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3886                 spin_unlock_irq(&md->uuid_lock);
3887                 s->peer_dev_flags =
3888                         (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3889                                 MDF_PEER_CONNECTED : 0) +
3890                         (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3891                          !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3892                                 MDF_PEER_OUTDATED : 0) +
3893                         /* FIXME: MDF_PEER_FENCING? */
3894                         (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3895                                 MDF_PEER_FULL_SYNC : 0);
3896                 put_ldev(device);
3897         }
3898 }
3899
3900 static int nla_put_notification_header(struct sk_buff *msg,
3901                                        enum drbd_notification_type type)
3902 {
3903         struct drbd_notification_header nh = {
3904                 .nh_type = type,
3905         };
3906
3907         return drbd_notification_header_to_skb(msg, &nh, true);
3908 }
3909
3910 void notify_resource_state(struct sk_buff *skb,
3911                            unsigned int seq,
3912                            struct drbd_resource *resource,
3913                            struct resource_info *resource_info,
3914                            enum drbd_notification_type type)
3915 {
3916         struct resource_statistics resource_statistics;
3917         struct drbd_genlmsghdr *dh;
3918         bool multicast = false;
3919         int err;
3920
3921         if (!skb) {
3922                 seq = atomic_inc_return(&notify_genl_seq);
3923                 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3924                 err = -ENOMEM;
3925                 if (!skb)
3926                         goto failed;
3927                 multicast = true;
3928         }
3929
3930         err = -EMSGSIZE;
3931         dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
3932         if (!dh)
3933                 goto nla_put_failure;
3934         dh->minor = -1U;
3935         dh->ret_code = NO_ERROR;
3936         if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
3937             nla_put_notification_header(skb, type) ||
3938             ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
3939              resource_info_to_skb(skb, resource_info, true)))
3940                 goto nla_put_failure;
3941         resource_statistics.res_stat_write_ordering = resource->write_ordering;
3942         err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3943         if (err)
3944                 goto nla_put_failure;
3945         genlmsg_end(skb, dh);
3946         if (multicast) {
3947                 err = drbd_genl_multicast_events(skb, 0);
3948                 /* skb has been consumed or freed in netlink_broadcast() */
3949                 if (err && err != -ESRCH)
3950                         goto failed;
3951         }
3952         return;
3953
3954 nla_put_failure:
3955         nlmsg_free(skb);
3956 failed:
3957         drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
3958                         err, seq);
3959 }
3960
3961 void notify_device_state(struct sk_buff *skb,
3962                          unsigned int seq,
3963                          struct drbd_device *device,
3964                          struct device_info *device_info,
3965                          enum drbd_notification_type type)
3966 {
3967         struct device_statistics device_statistics;
3968         struct drbd_genlmsghdr *dh;
3969         bool multicast = false;
3970         int err;
3971
3972         if (!skb) {
3973                 seq = atomic_inc_return(&notify_genl_seq);
3974                 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3975                 err = -ENOMEM;
3976                 if (!skb)
3977                         goto failed;
3978                 multicast = true;
3979         }
3980
3981         err = -EMSGSIZE;
3982         dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
3983         if (!dh)
3984                 goto nla_put_failure;
3985         dh->minor = device->minor;
3986         dh->ret_code = NO_ERROR;
3987         if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
3988             nla_put_notification_header(skb, type) ||
3989             ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
3990              device_info_to_skb(skb, device_info, true)))
3991                 goto nla_put_failure;
3992         device_to_statistics(&device_statistics, device);
3993         device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3994         genlmsg_end(skb, dh);
3995         if (multicast) {
3996                 err = drbd_genl_multicast_events(skb, 0);
3997                 /* skb has been consumed or freed in netlink_broadcast() */
3998                 if (err && err != -ESRCH)
3999                         goto failed;
4000         }
4001         return;
4002
4003 nla_put_failure:
4004         nlmsg_free(skb);
4005 failed:
4006         drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4007                  err, seq);
4008 }
4009
4010 void notify_connection_state(struct sk_buff *skb,
4011                              unsigned int seq,
4012                              struct drbd_connection *connection,
4013                              struct connection_info *connection_info,
4014                              enum drbd_notification_type type)
4015 {
4016         struct connection_statistics connection_statistics;
4017         struct drbd_genlmsghdr *dh;
4018         bool multicast = false;
4019         int err;
4020
4021         if (!skb) {
4022                 seq = atomic_inc_return(&notify_genl_seq);
4023                 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4024                 err = -ENOMEM;
4025                 if (!skb)
4026                         goto failed;
4027                 multicast = true;
4028         }
4029
4030         err = -EMSGSIZE;
4031         dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4032         if (!dh)
4033                 goto nla_put_failure;
4034         dh->minor = -1U;
4035         dh->ret_code = NO_ERROR;
4036         if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4037             nla_put_notification_header(skb, type) ||
4038             ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4039              connection_info_to_skb(skb, connection_info, true)))
4040                 goto nla_put_failure;
4041         connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4042         connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4043         genlmsg_end(skb, dh);
4044         if (multicast) {
4045                 err = drbd_genl_multicast_events(skb, 0);
4046                 /* skb has been consumed or freed in netlink_broadcast() */
4047                 if (err && err != -ESRCH)
4048                         goto failed;
4049         }
4050         return;
4051
4052 nla_put_failure:
4053         nlmsg_free(skb);
4054 failed:
4055         drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4056                  err, seq);
4057 }
4058
4059 void notify_peer_device_state(struct sk_buff *skb,
4060                               unsigned int seq,
4061                               struct drbd_peer_device *peer_device,
4062                               struct peer_device_info *peer_device_info,
4063                               enum drbd_notification_type type)
4064 {
4065         struct peer_device_statistics peer_device_statistics;
4066         struct drbd_resource *resource = peer_device->device->resource;
4067         struct drbd_genlmsghdr *dh;
4068         bool multicast = false;
4069         int err;
4070
4071         if (!skb) {
4072                 seq = atomic_inc_return(&notify_genl_seq);
4073                 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4074                 err = -ENOMEM;
4075                 if (!skb)
4076                         goto failed;
4077                 multicast = true;
4078         }
4079
4080         err = -EMSGSIZE;
4081         dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4082         if (!dh)
4083                 goto nla_put_failure;
4084         dh->minor = -1U;
4085         dh->ret_code = NO_ERROR;
4086         if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4087             nla_put_notification_header(skb, type) ||
4088             ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4089              peer_device_info_to_skb(skb, peer_device_info, true)))
4090                 goto nla_put_failure;
4091         peer_device_to_statistics(&peer_device_statistics, peer_device);
4092         peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4093         genlmsg_end(skb, dh);
4094         if (multicast) {
4095                 err = drbd_genl_multicast_events(skb, 0);
4096                 /* skb has been consumed or freed in netlink_broadcast() */
4097                 if (err && err != -ESRCH)
4098                         goto failed;
4099         }
4100         return;
4101
4102 nla_put_failure:
4103         nlmsg_free(skb);
4104 failed:
4105         drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4106                  err, seq);
4107 }
4108
4109 void notify_helper(enum drbd_notification_type type,
4110                    struct drbd_device *device, struct drbd_connection *connection,
4111                    const char *name, int status)
4112 {
4113         struct drbd_resource *resource = device ? device->resource : connection->resource;
4114         struct drbd_helper_info helper_info;
4115         unsigned int seq = atomic_inc_return(&notify_genl_seq);
4116         struct sk_buff *skb = NULL;
4117         struct drbd_genlmsghdr *dh;
4118         int err;
4119
4120         strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4121         helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4122         helper_info.helper_status = status;
4123
4124         skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4125         err = -ENOMEM;
4126         if (!skb)
4127                 goto fail;
4128
4129         err = -EMSGSIZE;
4130         dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4131         if (!dh)
4132                 goto fail;
4133         dh->minor = device ? device->minor : -1;
4134         dh->ret_code = NO_ERROR;
4135         mutex_lock(&notification_mutex);
4136         if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4137             nla_put_notification_header(skb, type) ||
4138             drbd_helper_info_to_skb(skb, &helper_info, true))
4139                 goto unlock_fail;
4140         genlmsg_end(skb, dh);
4141         err = drbd_genl_multicast_events(skb, 0);
4142         skb = NULL;
4143         /* skb has been consumed or freed in netlink_broadcast() */
4144         if (err && err != -ESRCH)
4145                 goto unlock_fail;
4146         mutex_unlock(&notification_mutex);
4147         return;
4148
4149 unlock_fail:
4150         mutex_unlock(&notification_mutex);
4151 fail:
4152         nlmsg_free(skb);
4153         drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4154                  err, seq);
4155 }
4156
4157 static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4158 {
4159         struct drbd_genlmsghdr *dh;
4160         int err;
4161
4162         err = -EMSGSIZE;
4163         dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4164         if (!dh)
4165                 goto nla_put_failure;
4166         dh->minor = -1U;
4167         dh->ret_code = NO_ERROR;
4168         if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4169                 goto nla_put_failure;
4170         genlmsg_end(skb, dh);
4171         return;
4172
4173 nla_put_failure:
4174         nlmsg_free(skb);
4175         pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4176 }
4177
4178 static void free_state_changes(struct list_head *list)
4179 {
4180         while (!list_empty(list)) {
4181                 struct drbd_state_change *state_change =
4182                         list_first_entry(list, struct drbd_state_change, list);
4183                 list_del(&state_change->list);
4184                 forget_state_change(state_change);
4185         }
4186 }
4187
4188 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4189 {
4190         return 1 +
4191                state_change->n_connections +
4192                state_change->n_devices +
4193                state_change->n_devices * state_change->n_connections;
4194 }
4195
4196 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4197 {
4198         struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4199         unsigned int seq = cb->args[2];
4200         unsigned int n;
4201         enum drbd_notification_type flags = 0;
4202
4203         /* There is no need for taking notification_mutex here: it doesn't
4204            matter if the initial state events mix with later state chage
4205            events; we can always tell the events apart by the NOTIFY_EXISTS
4206            flag. */
4207
4208         cb->args[5]--;
4209         if (cb->args[5] == 1) {
4210                 notify_initial_state_done(skb, seq);
4211                 goto out;
4212         }
4213         n = cb->args[4]++;
4214         if (cb->args[4] < cb->args[3])
4215                 flags |= NOTIFY_CONTINUES;
4216         if (n < 1) {
4217                 notify_resource_state_change(skb, seq, state_change->resource,
4218                                              NOTIFY_EXISTS | flags);
4219                 goto next;
4220         }
4221         n--;
4222         if (n < state_change->n_connections) {
4223                 notify_connection_state_change(skb, seq, &state_change->connections[n],
4224                                                NOTIFY_EXISTS | flags);
4225                 goto next;
4226         }
4227         n -= state_change->n_connections;
4228         if (n < state_change->n_devices) {
4229                 notify_device_state_change(skb, seq, &state_change->devices[n],
4230                                            NOTIFY_EXISTS | flags);
4231                 goto next;
4232         }
4233         n -= state_change->n_devices;
4234         if (n < state_change->n_devices * state_change->n_connections) {
4235                 notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4236                                                 NOTIFY_EXISTS | flags);
4237                 goto next;
4238         }
4239
4240 next:
4241         if (cb->args[4] == cb->args[3]) {
4242                 struct drbd_state_change *next_state_change =
4243                         list_entry(state_change->list.next,
4244                                    struct drbd_state_change, list);
4245                 cb->args[0] = (long)next_state_change;
4246                 cb->args[3] = notifications_for_state_change(next_state_change);
4247                 cb->args[4] = 0;
4248         }
4249 out:
4250         return skb->len;
4251 }
4252
4253 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4254 {
4255         struct drbd_resource *resource;
4256         LIST_HEAD(head);
4257
4258         if (cb->args[5] >= 1) {
4259                 if (cb->args[5] > 1)
4260                         return get_initial_state(skb, cb);
4261                 if (cb->args[0]) {
4262                         struct drbd_state_change *state_change =
4263                                 (struct drbd_state_change *)cb->args[0];
4264
4265                         /* connect list to head */
4266                         list_add(&head, &state_change->list);
4267                         free_state_changes(&head);
4268                 }
4269                 return 0;
4270         }
4271
4272         cb->args[5] = 2;  /* number of iterations */
4273         mutex_lock(&resources_mutex);
4274         for_each_resource(resource, &drbd_resources) {
4275                 struct drbd_state_change *state_change;
4276
4277                 state_change = remember_old_state(resource, GFP_KERNEL);
4278                 if (!state_change) {
4279                         if (!list_empty(&head))
4280                                 free_state_changes(&head);
4281                         mutex_unlock(&resources_mutex);
4282                         return -ENOMEM;
4283                 }
4284                 copy_old_to_new_state_change(state_change);
4285                 list_add_tail(&state_change->list, &head);
4286                 cb->args[5] += notifications_for_state_change(state_change);
4287         }
4288         mutex_unlock(&resources_mutex);
4289
4290         if (!list_empty(&head)) {
4291                 struct drbd_state_change *state_change =
4292                         list_entry(head.next, struct drbd_state_change, list);
4293                 cb->args[0] = (long)state_change;
4294                 cb->args[3] = notifications_for_state_change(state_change);
4295                 list_del(&head);  /* detach list from head */
4296         }
4297
4298         cb->args[2] = cb->nlh->nlmsg_seq;
4299         return get_initial_state(skb, cb);
4300 }