drbd: Do not call generic_make_request() while holding req_lock
[cascardo/linux.git] / drivers / block / drbd / drbd_req.c
1 /*
2    drbd_req.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27
28 #include <linux/slab.h>
29 #include <linux/drbd.h>
30 #include "drbd_int.h"
31 #include "drbd_req.h"
32
33
34 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
35
36 /* Update disk stats at start of I/O request */
37 static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
38 {
39         const int rw = bio_data_dir(bio);
40         int cpu;
41         cpu = part_stat_lock();
42         part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
43         part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
44         (void) cpu; /* The macro invocations above want the cpu argument, I do not like
45                        the compiler warning about cpu only assigned but never used... */
46         part_inc_in_flight(&mdev->vdisk->part0, rw);
47         part_stat_unlock();
48 }
49
50 /* Update disk stats when completing request upwards */
51 static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
52 {
53         int rw = bio_data_dir(req->master_bio);
54         unsigned long duration = jiffies - req->start_time;
55         int cpu;
56         cpu = part_stat_lock();
57         part_stat_add(cpu, &mdev->vdisk->part0, ticks[rw], duration);
58         part_round_stats(cpu, &mdev->vdisk->part0);
59         part_dec_in_flight(&mdev->vdisk->part0, rw);
60         part_stat_unlock();
61 }
62
63 static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
64                                                struct bio *bio_src)
65 {
66         struct drbd_request *req;
67
68         req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
69         if (!req)
70                 return NULL;
71
72         drbd_req_make_private_bio(req, bio_src);
73         req->rq_state    = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
74         req->w.mdev      = mdev;
75         req->master_bio  = bio_src;
76         req->epoch       = 0;
77
78         drbd_clear_interval(&req->i);
79         req->i.sector     = bio_src->bi_sector;
80         req->i.size      = bio_src->bi_size;
81         req->i.local = true;
82         req->i.waiting = false;
83
84         INIT_LIST_HEAD(&req->tl_requests);
85         INIT_LIST_HEAD(&req->w.list);
86
87         return req;
88 }
89
90 static void drbd_req_free(struct drbd_request *req)
91 {
92         mempool_free(req, drbd_request_mempool);
93 }
94
95 /* rw is bio_data_dir(), only READ or WRITE */
96 static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
97 {
98         const unsigned long s = req->rq_state;
99
100         /* remove it from the transfer log.
101          * well, only if it had been there in the first
102          * place... if it had not (local only or conflicting
103          * and never sent), it should still be "empty" as
104          * initialized in drbd_req_new(), so we can list_del() it
105          * here unconditionally */
106         list_del(&req->tl_requests);
107
108         /* if it was a write, we may have to set the corresponding
109          * bit(s) out-of-sync first. If it had a local part, we need to
110          * release the reference to the activity log. */
111         if (rw == WRITE) {
112                 /* Set out-of-sync unless both OK flags are set
113                  * (local only or remote failed).
114                  * Other places where we set out-of-sync:
115                  * READ with local io-error */
116                 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
117                         drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
118
119                 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
120                         drbd_set_in_sync(mdev, req->i.sector, req->i.size);
121
122                 /* one might be tempted to move the drbd_al_complete_io
123                  * to the local io completion callback drbd_request_endio.
124                  * but, if this was a mirror write, we may only
125                  * drbd_al_complete_io after this is RQ_NET_DONE,
126                  * otherwise the extent could be dropped from the al
127                  * before it has actually been written on the peer.
128                  * if we crash before our peer knows about the request,
129                  * but after the extent has been dropped from the al,
130                  * we would forget to resync the corresponding extent.
131                  */
132                 if (s & RQ_LOCAL_MASK) {
133                         if (get_ldev_if_state(mdev, D_FAILED)) {
134                                 if (s & RQ_IN_ACT_LOG)
135                                         drbd_al_complete_io(mdev, &req->i);
136                                 put_ldev(mdev);
137                         } else if (__ratelimit(&drbd_ratelimit_state)) {
138                                 dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu, %u), "
139                                          "but my Disk seems to have failed :(\n",
140                                          (unsigned long long) req->i.sector, req->i.size);
141                         }
142                 }
143         }
144
145         drbd_req_free(req);
146 }
147
148 static void queue_barrier(struct drbd_conf *mdev)
149 {
150         struct drbd_tl_epoch *b;
151         struct drbd_tconn *tconn = mdev->tconn;
152
153         /* We are within the req_lock. Once we queued the barrier for sending,
154          * we set the CREATE_BARRIER bit. It is cleared as soon as a new
155          * barrier/epoch object is added. This is the only place this bit is
156          * set. It indicates that the barrier for this epoch is already queued,
157          * and no new epoch has been created yet. */
158         if (test_bit(CREATE_BARRIER, &tconn->flags))
159                 return;
160
161         b = tconn->newest_tle;
162         b->w.cb = w_send_barrier;
163         b->w.mdev = mdev;
164         /* inc_ap_pending done here, so we won't
165          * get imbalanced on connection loss.
166          * dec_ap_pending will be done in got_BarrierAck
167          * or (on connection loss) in tl_clear.  */
168         inc_ap_pending(mdev);
169         drbd_queue_work(&tconn->data.work, &b->w);
170         set_bit(CREATE_BARRIER, &tconn->flags);
171 }
172
173 static void _about_to_complete_local_write(struct drbd_conf *mdev,
174         struct drbd_request *req)
175 {
176         const unsigned long s = req->rq_state;
177
178         /* Before we can signal completion to the upper layers,
179          * we may need to close the current epoch.
180          * We can skip this, if this request has not even been sent, because we
181          * did not have a fully established connection yet/anymore, during
182          * bitmap exchange, or while we are C_AHEAD due to congestion policy.
183          */
184         if (mdev->state.conn >= C_CONNECTED &&
185             (s & RQ_NET_SENT) != 0 &&
186             req->epoch == mdev->tconn->newest_tle->br_number)
187                 queue_barrier(mdev);
188 }
189
190 void complete_master_bio(struct drbd_conf *mdev,
191                 struct bio_and_error *m)
192 {
193         bio_endio(m->bio, m->error);
194         dec_ap_bio(mdev);
195 }
196
197
198 static void drbd_remove_request_interval(struct rb_root *root,
199                                          struct drbd_request *req)
200 {
201         struct drbd_conf *mdev = req->w.mdev;
202         struct drbd_interval *i = &req->i;
203
204         drbd_remove_interval(root, i);
205
206         /* Wake up any processes waiting for this request to complete.  */
207         if (i->waiting)
208                 wake_up(&mdev->misc_wait);
209 }
210
211 /* Helper for __req_mod().
212  * Set m->bio to the master bio, if it is fit to be completed,
213  * or leave it alone (it is initialized to NULL in __req_mod),
214  * if it has already been completed, or cannot be completed yet.
215  * If m->bio is set, the error status to be returned is placed in m->error.
216  */
217 void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
218 {
219         const unsigned long s = req->rq_state;
220         struct drbd_conf *mdev = req->w.mdev;
221         int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
222
223         /* we must not complete the master bio, while it is
224          *      still being processed by _drbd_send_zc_bio (drbd_send_dblock)
225          *      not yet acknowledged by the peer
226          *      not yet completed by the local io subsystem
227          * these flags may get cleared in any order by
228          *      the worker,
229          *      the receiver,
230          *      the bio_endio completion callbacks.
231          */
232         if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
233                 return;
234         if (req->i.waiting) {
235                 /* Retry all conflicting peer requests.  */
236                 wake_up(&mdev->misc_wait);
237         }
238         if (s & RQ_NET_QUEUED)
239                 return;
240         if (s & RQ_NET_PENDING)
241                 return;
242
243         if (req->master_bio) {
244                 /* this is DATA_RECEIVED (remote read)
245                  * or protocol C P_WRITE_ACK
246                  * or protocol B P_RECV_ACK
247                  * or protocol A "HANDED_OVER_TO_NETWORK" (SendAck)
248                  * or canceled or failed,
249                  * or killed from the transfer log due to connection loss.
250                  */
251
252                 /*
253                  * figure out whether to report success or failure.
254                  *
255                  * report success when at least one of the operations succeeded.
256                  * or, to put the other way,
257                  * only report failure, when both operations failed.
258                  *
259                  * what to do about the failures is handled elsewhere.
260                  * what we need to do here is just: complete the master_bio.
261                  *
262                  * local completion error, if any, has been stored as ERR_PTR
263                  * in private_bio within drbd_request_endio.
264                  */
265                 int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
266                 int error = PTR_ERR(req->private_bio);
267
268                 /* remove the request from the conflict detection
269                  * respective block_id verification hash */
270                 if (!drbd_interval_empty(&req->i)) {
271                         struct rb_root *root;
272
273                         if (rw == WRITE)
274                                 root = &mdev->write_requests;
275                         else
276                                 root = &mdev->read_requests;
277                         drbd_remove_request_interval(root, req);
278                 } else if (!(s & RQ_POSTPONED))
279                         D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
280
281                 /* for writes we need to do some extra housekeeping */
282                 if (rw == WRITE)
283                         _about_to_complete_local_write(mdev, req);
284
285                 /* Update disk stats */
286                 _drbd_end_io_acct(mdev, req);
287
288                 if (!(s & RQ_POSTPONED)) {
289                         m->error = ok ? 0 : (error ?: -EIO);
290                         m->bio = req->master_bio;
291                 }
292                 req->master_bio = NULL;
293         }
294
295         if (s & RQ_LOCAL_PENDING)
296                 return;
297
298         if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
299                 /* this is disconnected (local only) operation,
300                  * or protocol A, B, or C P_BARRIER_ACK,
301                  * or killed from the transfer log due to connection loss. */
302                 _req_is_done(mdev, req, rw);
303         }
304         /* else: network part and not DONE yet. that is
305          * protocol A, B, or C, barrier ack still pending... */
306 }
307
308 static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m)
309 {
310         struct drbd_conf *mdev = req->w.mdev;
311
312         if (!drbd_suspended(mdev))
313                 _req_may_be_done(req, m);
314 }
315
316 /* obviously this could be coded as many single functions
317  * instead of one huge switch,
318  * or by putting the code directly in the respective locations
319  * (as it has been before).
320  *
321  * but having it this way
322  *  enforces that it is all in this one place, where it is easier to audit,
323  *  it makes it obvious that whatever "event" "happens" to a request should
324  *  happen "atomically" within the req_lock,
325  *  and it enforces that we have to think in a very structured manner
326  *  about the "events" that may happen to a request during its life time ...
327  */
328 int __req_mod(struct drbd_request *req, enum drbd_req_event what,
329                 struct bio_and_error *m)
330 {
331         struct drbd_conf *mdev = req->w.mdev;
332         struct net_conf *nc;
333         int p, rv = 0;
334
335         if (m)
336                 m->bio = NULL;
337
338         switch (what) {
339         default:
340                 dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
341                 break;
342
343         /* does not happen...
344          * initialization done in drbd_req_new
345         case CREATED:
346                 break;
347                 */
348
349         case TO_BE_SENT: /* via network */
350                 /* reached via __drbd_make_request
351                  * and from w_read_retry_remote */
352                 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
353                 req->rq_state |= RQ_NET_PENDING;
354                 rcu_read_lock();
355                 nc = rcu_dereference(mdev->tconn->net_conf);
356                 p = nc->wire_protocol;
357                 rcu_read_unlock();
358                 req->rq_state |=
359                         p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
360                         p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
361                 inc_ap_pending(mdev);
362                 break;
363
364         case TO_BE_SUBMITTED: /* locally */
365                 /* reached via __drbd_make_request */
366                 D_ASSERT(!(req->rq_state & RQ_LOCAL_MASK));
367                 req->rq_state |= RQ_LOCAL_PENDING;
368                 break;
369
370         case COMPLETED_OK:
371                 if (req->rq_state & RQ_WRITE)
372                         mdev->writ_cnt += req->i.size >> 9;
373                 else
374                         mdev->read_cnt += req->i.size >> 9;
375
376                 req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
377                 req->rq_state &= ~RQ_LOCAL_PENDING;
378
379                 _req_may_be_done_not_susp(req, m);
380                 put_ldev(mdev);
381                 break;
382
383         case ABORT_DISK_IO:
384                 req->rq_state |= RQ_LOCAL_ABORTED;
385                 if (req->rq_state & RQ_WRITE)
386                         _req_may_be_done_not_susp(req, m);
387                 else
388                         goto goto_queue_for_net_read;
389                 break;
390
391         case WRITE_COMPLETED_WITH_ERROR:
392                 req->rq_state |= RQ_LOCAL_COMPLETED;
393                 req->rq_state &= ~RQ_LOCAL_PENDING;
394
395                 __drbd_chk_io_error(mdev, false);
396                 _req_may_be_done_not_susp(req, m);
397                 put_ldev(mdev);
398                 break;
399
400         case READ_AHEAD_COMPLETED_WITH_ERROR:
401                 /* it is legal to fail READA */
402                 req->rq_state |= RQ_LOCAL_COMPLETED;
403                 req->rq_state &= ~RQ_LOCAL_PENDING;
404                 _req_may_be_done_not_susp(req, m);
405                 put_ldev(mdev);
406                 break;
407
408         case READ_COMPLETED_WITH_ERROR:
409                 drbd_set_out_of_sync(mdev, req->i.sector, req->i.size);
410
411                 req->rq_state |= RQ_LOCAL_COMPLETED;
412                 req->rq_state &= ~RQ_LOCAL_PENDING;
413
414                 D_ASSERT(!(req->rq_state & RQ_NET_MASK));
415
416                 __drbd_chk_io_error(mdev, false);
417                 put_ldev(mdev);
418
419         goto_queue_for_net_read:
420
421                 /* no point in retrying if there is no good remote data,
422                  * or we have no connection. */
423                 if (mdev->state.pdsk != D_UP_TO_DATE) {
424                         _req_may_be_done_not_susp(req, m);
425                         break;
426                 }
427
428                 /* _req_mod(req,TO_BE_SENT); oops, recursion... */
429                 req->rq_state |= RQ_NET_PENDING;
430                 inc_ap_pending(mdev);
431                 /* fall through: _req_mod(req,QUEUE_FOR_NET_READ); */
432
433         case QUEUE_FOR_NET_READ:
434                 /* READ or READA, and
435                  * no local disk,
436                  * or target area marked as invalid,
437                  * or just got an io-error. */
438                 /* from __drbd_make_request
439                  * or from bio_endio during read io-error recovery */
440
441                 /* so we can verify the handle in the answer packet
442                  * corresponding hlist_del is in _req_may_be_done() */
443                 D_ASSERT(drbd_interval_empty(&req->i));
444                 drbd_insert_interval(&mdev->read_requests, &req->i);
445
446                 set_bit(UNPLUG_REMOTE, &mdev->flags);
447
448                 D_ASSERT(req->rq_state & RQ_NET_PENDING);
449                 req->rq_state |= RQ_NET_QUEUED;
450                 req->w.cb = (req->rq_state & RQ_LOCAL_MASK)
451                         ? w_read_retry_remote
452                         : w_send_read_req;
453                 drbd_queue_work(&mdev->tconn->data.work, &req->w);
454                 break;
455
456         case QUEUE_FOR_NET_WRITE:
457                 /* assert something? */
458                 /* from __drbd_make_request only */
459
460                 /* corresponding hlist_del is in _req_may_be_done() */
461                 D_ASSERT(drbd_interval_empty(&req->i));
462                 drbd_insert_interval(&mdev->write_requests, &req->i);
463
464                 /* NOTE
465                  * In case the req ended up on the transfer log before being
466                  * queued on the worker, it could lead to this request being
467                  * missed during cleanup after connection loss.
468                  * So we have to do both operations here,
469                  * within the same lock that protects the transfer log.
470                  *
471                  * _req_add_to_epoch(req); this has to be after the
472                  * _maybe_start_new_epoch(req); which happened in
473                  * __drbd_make_request, because we now may set the bit
474                  * again ourselves to close the current epoch.
475                  *
476                  * Add req to the (now) current epoch (barrier). */
477
478                 /* otherwise we may lose an unplug, which may cause some remote
479                  * io-scheduler timeout to expire, increasing maximum latency,
480                  * hurting performance. */
481                 set_bit(UNPLUG_REMOTE, &mdev->flags);
482
483                 /* see __drbd_make_request,
484                  * just after it grabs the req_lock */
485                 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->tconn->flags) == 0);
486
487                 req->epoch = mdev->tconn->newest_tle->br_number;
488
489                 /* increment size of current epoch */
490                 mdev->tconn->newest_tle->n_writes++;
491
492                 /* queue work item to send data */
493                 D_ASSERT(req->rq_state & RQ_NET_PENDING);
494                 req->rq_state |= RQ_NET_QUEUED;
495                 req->w.cb =  w_send_dblock;
496                 drbd_queue_work(&mdev->tconn->data.work, &req->w);
497
498                 /* close the epoch, in case it outgrew the limit */
499                 rcu_read_lock();
500                 nc = rcu_dereference(mdev->tconn->net_conf);
501                 p = nc->max_epoch_size;
502                 rcu_read_unlock();
503                 if (mdev->tconn->newest_tle->n_writes >= p)
504                         queue_barrier(mdev);
505
506                 break;
507
508         case QUEUE_FOR_SEND_OOS:
509                 req->rq_state |= RQ_NET_QUEUED;
510                 req->w.cb =  w_send_out_of_sync;
511                 drbd_queue_work(&mdev->tconn->data.work, &req->w);
512                 break;
513
514         case OOS_HANDED_TO_NETWORK:
515                 /* actually the same */
516         case SEND_CANCELED:
517                 /* treat it the same */
518         case SEND_FAILED:
519                 /* real cleanup will be done from tl_clear.  just update flags
520                  * so it is no longer marked as on the worker queue */
521                 req->rq_state &= ~RQ_NET_QUEUED;
522                 /* if we did it right, tl_clear should be scheduled only after
523                  * this, so this should not be necessary! */
524                 _req_may_be_done_not_susp(req, m);
525                 break;
526
527         case HANDED_OVER_TO_NETWORK:
528                 /* assert something? */
529                 if (bio_data_dir(req->master_bio) == WRITE)
530                         atomic_add(req->i.size >> 9, &mdev->ap_in_flight);
531
532                 if (bio_data_dir(req->master_bio) == WRITE &&
533                     !(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK))) {
534                         /* this is what is dangerous about protocol A:
535                          * pretend it was successfully written on the peer. */
536                         if (req->rq_state & RQ_NET_PENDING) {
537                                 dec_ap_pending(mdev);
538                                 req->rq_state &= ~RQ_NET_PENDING;
539                                 req->rq_state |= RQ_NET_OK;
540                         } /* else: neg-ack was faster... */
541                         /* it is still not yet RQ_NET_DONE until the
542                          * corresponding epoch barrier got acked as well,
543                          * so we know what to dirty on connection loss */
544                 }
545                 req->rq_state &= ~RQ_NET_QUEUED;
546                 req->rq_state |= RQ_NET_SENT;
547                 /* because _drbd_send_zc_bio could sleep, and may want to
548                  * dereference the bio even after the "WRITE_ACKED_BY_PEER" and
549                  * "COMPLETED_OK" events came in, once we return from
550                  * _drbd_send_zc_bio (drbd_send_dblock), we have to check
551                  * whether it is done already, and end it.  */
552                 _req_may_be_done_not_susp(req, m);
553                 break;
554
555         case READ_RETRY_REMOTE_CANCELED:
556                 req->rq_state &= ~RQ_NET_QUEUED;
557                 /* fall through, in case we raced with drbd_disconnect */
558         case CONNECTION_LOST_WHILE_PENDING:
559                 /* transfer log cleanup after connection loss */
560                 /* assert something? */
561                 if (req->rq_state & RQ_NET_PENDING)
562                         dec_ap_pending(mdev);
563
564                 p = !(req->rq_state & RQ_WRITE) && req->rq_state & RQ_NET_PENDING;
565
566                 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
567                 req->rq_state |= RQ_NET_DONE;
568                 if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
569                         atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
570
571                 /* if it is still queued, we may not complete it here.
572                  * it will be canceled soon. */
573                 if (!(req->rq_state & RQ_NET_QUEUED)) {
574                         if (p)
575                                 goto goto_read_retry_local;
576                         _req_may_be_done(req, m); /* Allowed while state.susp */
577                 }
578                 break;
579
580         case WRITE_ACKED_BY_PEER_AND_SIS:
581                 req->rq_state |= RQ_NET_SIS;
582         case DISCARD_WRITE:
583                 /* for discarded conflicting writes of multiple primaries,
584                  * there is no need to keep anything in the tl, potential
585                  * node crashes are covered by the activity log. */
586                 req->rq_state |= RQ_NET_DONE;
587                 /* fall through */
588         case WRITE_ACKED_BY_PEER:
589                 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
590                 /* protocol C; successfully written on peer.
591                  * Nothing to do here.
592                  * We want to keep the tl in place for all protocols, to cater
593                  * for volatile write-back caches on lower level devices.
594                  *
595                  * A barrier request is expected to have forced all prior
596                  * requests onto stable storage, so completion of a barrier
597                  * request could set NET_DONE right here, and not wait for the
598                  * P_BARRIER_ACK, but that is an unnecessary optimization. */
599
600                 goto ack_common;
601                 /* this makes it effectively the same as for: */
602         case RECV_ACKED_BY_PEER:
603                 D_ASSERT(req->rq_state & RQ_EXP_RECEIVE_ACK);
604                 /* protocol B; pretends to be successfully written on peer.
605                  * see also notes above in HANDED_OVER_TO_NETWORK about
606                  * protocol != C */
607         ack_common:
608                 req->rq_state |= RQ_NET_OK;
609                 D_ASSERT(req->rq_state & RQ_NET_PENDING);
610                 dec_ap_pending(mdev);
611                 atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
612                 req->rq_state &= ~RQ_NET_PENDING;
613                 _req_may_be_done_not_susp(req, m);
614                 break;
615
616         case POSTPONE_WRITE:
617                 D_ASSERT(req->rq_state & RQ_EXP_WRITE_ACK);
618                 /* If this node has already detected the write conflict, the
619                  * worker will be waiting on misc_wait.  Wake it up once this
620                  * request has completed locally.
621                  */
622                 D_ASSERT(req->rq_state & RQ_NET_PENDING);
623                 req->rq_state |= RQ_POSTPONED;
624                 _req_may_be_done_not_susp(req, m);
625                 break;
626
627         case NEG_ACKED:
628                 /* assert something? */
629                 if (req->rq_state & RQ_NET_PENDING) {
630                         dec_ap_pending(mdev);
631                         atomic_sub(req->i.size >> 9, &mdev->ap_in_flight);
632                 }
633                 req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
634
635                 req->rq_state |= RQ_NET_DONE;
636
637                 if (!(req->rq_state & RQ_WRITE))
638                         goto goto_read_retry_local;
639
640                 _req_may_be_done_not_susp(req, m);
641                 /* else: done by HANDED_OVER_TO_NETWORK */
642                 break;
643
644         goto_read_retry_local:
645                 if (!drbd_may_do_local_read(mdev, req->i.sector, req->i.size)) {
646                         _req_may_be_done_not_susp(req, m);
647                         break;
648                 }
649                 D_ASSERT(!(req->rq_state & RQ_LOCAL_PENDING));
650                 req->rq_state |= RQ_LOCAL_PENDING;
651
652                 get_ldev(mdev);
653                 req->w.cb = w_restart_disk_io;
654                 drbd_queue_work(&mdev->tconn->data.work, &req->w);
655                 break;
656
657         case FAIL_FROZEN_DISK_IO:
658                 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
659                         break;
660
661                 _req_may_be_done(req, m); /* Allowed while state.susp */
662                 break;
663
664         case RESTART_FROZEN_DISK_IO:
665                 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
666                         break;
667
668                 req->rq_state &= ~RQ_LOCAL_COMPLETED;
669
670                 rv = MR_READ;
671                 if (bio_data_dir(req->master_bio) == WRITE)
672                         rv = MR_WRITE;
673
674                 get_ldev(mdev);
675                 req->w.cb = w_restart_disk_io;
676                 drbd_queue_work(&mdev->tconn->data.work, &req->w);
677                 break;
678
679         case RESEND:
680                 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
681                    before the connection loss (B&C only); only P_BARRIER_ACK was missing.
682                    Trowing them out of the TL here by pretending we got a BARRIER_ACK
683                    We ensure that the peer was not rebooted */
684                 if (!(req->rq_state & RQ_NET_OK)) {
685                         if (req->w.cb) {
686                                 drbd_queue_work(&mdev->tconn->data.work, &req->w);
687                                 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
688                         }
689                         break;
690                 }
691                 /* else, fall through to BARRIER_ACKED */
692
693         case BARRIER_ACKED:
694                 if (!(req->rq_state & RQ_WRITE))
695                         break;
696
697                 if (req->rq_state & RQ_NET_PENDING) {
698                         /* barrier came in before all requests were acked.
699                          * this is bad, because if the connection is lost now,
700                          * we won't be able to clean them up... */
701                         dev_err(DEV, "FIXME (BARRIER_ACKED but pending)\n");
702                         list_move(&req->tl_requests, &mdev->tconn->out_of_sequence_requests);
703                 }
704                 if ((req->rq_state & RQ_NET_MASK) != 0) {
705                         req->rq_state |= RQ_NET_DONE;
706                         if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)))
707                                 atomic_sub(req->i.size>>9, &mdev->ap_in_flight);
708                 }
709                 _req_may_be_done(req, m); /* Allowed while state.susp */
710                 break;
711
712         case DATA_RECEIVED:
713                 D_ASSERT(req->rq_state & RQ_NET_PENDING);
714                 dec_ap_pending(mdev);
715                 req->rq_state &= ~RQ_NET_PENDING;
716                 req->rq_state |= (RQ_NET_OK|RQ_NET_DONE);
717                 _req_may_be_done_not_susp(req, m);
718                 break;
719         };
720
721         return rv;
722 }
723
724 /* we may do a local read if:
725  * - we are consistent (of course),
726  * - or we are generally inconsistent,
727  *   BUT we are still/already IN SYNC for this area.
728  *   since size may be bigger than BM_BLOCK_SIZE,
729  *   we may need to check several bits.
730  */
731 static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
732 {
733         unsigned long sbnr, ebnr;
734         sector_t esector, nr_sectors;
735
736         if (mdev->state.disk == D_UP_TO_DATE)
737                 return true;
738         if (mdev->state.disk != D_INCONSISTENT)
739                 return false;
740         esector = sector + (size >> 9) - 1;
741         nr_sectors = drbd_get_capacity(mdev->this_bdev);
742         D_ASSERT(sector  < nr_sectors);
743         D_ASSERT(esector < nr_sectors);
744
745         sbnr = BM_SECT_TO_BIT(sector);
746         ebnr = BM_SECT_TO_BIT(esector);
747
748         return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
749 }
750
751 static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector)
752 {
753         enum drbd_read_balancing rbm;
754         struct backing_dev_info *bdi;
755         int stripe_shift;
756
757         if (mdev->state.pdsk < D_UP_TO_DATE)
758                 return false;
759
760         rcu_read_lock();
761         rbm = rcu_dereference(mdev->ldev->disk_conf)->read_balancing;
762         rcu_read_unlock();
763
764         switch (rbm) {
765         case RB_CONGESTED_REMOTE:
766                 bdi = &mdev->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
767                 return bdi_read_congested(bdi);
768         case RB_LEAST_PENDING:
769                 return atomic_read(&mdev->local_cnt) >
770                         atomic_read(&mdev->ap_pending_cnt) + atomic_read(&mdev->rs_pending_cnt);
771         case RB_32K_STRIPING:  /* stripe_shift = 15 */
772         case RB_64K_STRIPING:
773         case RB_128K_STRIPING:
774         case RB_256K_STRIPING:
775         case RB_512K_STRIPING:
776         case RB_1M_STRIPING:   /* stripe_shift = 20 */
777                 stripe_shift = (rbm - RB_32K_STRIPING + 15);
778                 return (sector >> (stripe_shift - 9)) & 1;
779         case RB_ROUND_ROBIN:
780                 return test_and_change_bit(READ_BALANCE_RR, &mdev->flags);
781         case RB_PREFER_REMOTE:
782                 return true;
783         case RB_PREFER_LOCAL:
784         default:
785                 return false;
786         }
787 }
788
789 /*
790  * complete_conflicting_writes  -  wait for any conflicting write requests
791  *
792  * The write_requests tree contains all active write requests which we
793  * currently know about.  Wait for any requests to complete which conflict with
794  * the new one.
795  */
796 static int complete_conflicting_writes(struct drbd_conf *mdev,
797                                        sector_t sector, int size)
798 {
799         for(;;) {
800                 struct drbd_interval *i;
801                 int err;
802
803                 i = drbd_find_overlap(&mdev->write_requests, sector, size);
804                 if (!i)
805                         return 0;
806                 err = drbd_wait_misc(mdev, i);
807                 if (err)
808                         return err;
809         }
810 }
811
812 int __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
813 {
814         const int rw = bio_rw(bio);
815         const int size = bio->bi_size;
816         const sector_t sector = bio->bi_sector;
817         struct drbd_tl_epoch *b = NULL;
818         struct drbd_request *req;
819         struct net_conf *nc;
820         int local, remote, send_oos = 0;
821         int err;
822         int ret = 0;
823
824         /* allocate outside of all locks; */
825         req = drbd_req_new(mdev, bio);
826         if (!req) {
827                 dec_ap_bio(mdev);
828                 /* only pass the error to the upper layers.
829                  * if user cannot handle io errors, that's not our business. */
830                 dev_err(DEV, "could not kmalloc() req\n");
831                 bio_endio(bio, -ENOMEM);
832                 return 0;
833         }
834         req->start_time = start_time;
835
836         local = get_ldev(mdev);
837         if (!local) {
838                 bio_put(req->private_bio); /* or we get a bio leak */
839                 req->private_bio = NULL;
840         }
841         if (rw == WRITE) {
842                 remote = 1;
843         } else {
844                 /* READ || READA */
845                 if (local) {
846                         if (!drbd_may_do_local_read(mdev, sector, size) ||
847                             remote_due_to_read_balancing(mdev, sector)) {
848                                 /* we could kick the syncer to
849                                  * sync this extent asap, wait for
850                                  * it, then continue locally.
851                                  * Or just issue the request remotely.
852                                  */
853                                 local = 0;
854                                 bio_put(req->private_bio);
855                                 req->private_bio = NULL;
856                                 put_ldev(mdev);
857                         }
858                 }
859                 remote = !local && mdev->state.pdsk >= D_UP_TO_DATE;
860         }
861
862         /* If we have a disk, but a READA request is mapped to remote,
863          * we are R_PRIMARY, D_INCONSISTENT, SyncTarget.
864          * Just fail that READA request right here.
865          *
866          * THINK: maybe fail all READA when not local?
867          *        or make this configurable...
868          *        if network is slow, READA won't do any good.
869          */
870         if (rw == READA && mdev->state.disk >= D_INCONSISTENT && !local) {
871                 err = -EWOULDBLOCK;
872                 goto fail_and_free_req;
873         }
874
875         /* For WRITES going to the local disk, grab a reference on the target
876          * extent.  This waits for any resync activity in the corresponding
877          * resync extent to finish, and, if necessary, pulls in the target
878          * extent into the activity log, which involves further disk io because
879          * of transactional on-disk meta data updates. */
880         if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
881                 req->rq_state |= RQ_IN_ACT_LOG;
882                 drbd_al_begin_io(mdev, &req->i);
883         }
884
885         remote = remote && drbd_should_do_remote(mdev->state);
886         send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state);
887         D_ASSERT(!(remote && send_oos));
888
889         if (!(local || remote) && !drbd_suspended(mdev)) {
890                 if (__ratelimit(&drbd_ratelimit_state))
891                         dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
892                 err = -EIO;
893                 goto fail_free_complete;
894         }
895
896         /* For WRITE request, we have to make sure that we have an
897          * unused_spare_tle, in case we need to start a new epoch.
898          * I try to be smart and avoid to pre-allocate always "just in case",
899          * but there is a race between testing the bit and pointer outside the
900          * spinlock, and grabbing the spinlock.
901          * if we lost that race, we retry.  */
902         if (rw == WRITE && (remote || send_oos) &&
903             mdev->tconn->unused_spare_tle == NULL &&
904             test_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
905 allocate_barrier:
906                 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
907                 if (!b) {
908                         dev_err(DEV, "Failed to alloc barrier.\n");
909                         err = -ENOMEM;
910                         goto fail_free_complete;
911                 }
912         }
913
914         /* GOOD, everything prepared, grab the spin_lock */
915         spin_lock_irq(&mdev->tconn->req_lock);
916
917         if (rw == WRITE) {
918                 err = complete_conflicting_writes(mdev, sector, size);
919                 if (err) {
920                         if (err != -ERESTARTSYS)
921                                 _conn_request_state(mdev->tconn,
922                                                     NS(conn, C_TIMEOUT),
923                                                     CS_HARD);
924                         spin_unlock_irq(&mdev->tconn->req_lock);
925                         err = -EIO;
926                         goto fail_free_complete;
927                 }
928         }
929
930         if (drbd_suspended(mdev)) {
931                 /* If we got suspended, use the retry mechanism of
932                    generic_make_request() to restart processing of this
933                    bio. In the next call to drbd_make_request
934                    we sleep in inc_ap_bio() */
935                 ret = 1;
936                 spin_unlock_irq(&mdev->tconn->req_lock);
937                 goto fail_free_complete;
938         }
939
940         if (remote || send_oos) {
941                 remote = drbd_should_do_remote(mdev->state);
942                 send_oos = rw == WRITE && drbd_should_send_out_of_sync(mdev->state);
943                 D_ASSERT(!(remote && send_oos));
944
945                 if (!(remote || send_oos))
946                         dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
947                 if (!(local || remote)) {
948                         dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
949                         spin_unlock_irq(&mdev->tconn->req_lock);
950                         err = -EIO;
951                         goto fail_free_complete;
952                 }
953         }
954
955         if (b && mdev->tconn->unused_spare_tle == NULL) {
956                 mdev->tconn->unused_spare_tle = b;
957                 b = NULL;
958         }
959         if (rw == WRITE && (remote || send_oos) &&
960             mdev->tconn->unused_spare_tle == NULL &&
961             test_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
962                 /* someone closed the current epoch
963                  * while we were grabbing the spinlock */
964                 spin_unlock_irq(&mdev->tconn->req_lock);
965                 goto allocate_barrier;
966         }
967
968
969         /* Update disk stats */
970         _drbd_start_io_acct(mdev, req, bio);
971
972         /* _maybe_start_new_epoch(mdev);
973          * If we need to generate a write barrier packet, we have to add the
974          * new epoch (barrier) object, and queue the barrier packet for sending,
975          * and queue the req's data after it _within the same lock_, otherwise
976          * we have race conditions were the reorder domains could be mixed up.
977          *
978          * Even read requests may start a new epoch and queue the corresponding
979          * barrier packet.  To get the write ordering right, we only have to
980          * make sure that, if this is a write request and it triggered a
981          * barrier packet, this request is queued within the same spinlock. */
982         if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
983             test_and_clear_bit(CREATE_BARRIER, &mdev->tconn->flags)) {
984                 _tl_add_barrier(mdev->tconn, mdev->tconn->unused_spare_tle);
985                 mdev->tconn->unused_spare_tle = NULL;
986         } else {
987                 D_ASSERT(!(remote && rw == WRITE &&
988                            test_bit(CREATE_BARRIER, &mdev->tconn->flags)));
989         }
990
991         /* NOTE
992          * Actually, 'local' may be wrong here already, since we may have failed
993          * to write to the meta data, and may become wrong anytime because of
994          * local io-error for some other request, which would lead to us
995          * "detaching" the local disk.
996          *
997          * 'remote' may become wrong any time because the network could fail.
998          *
999          * This is a harmless race condition, though, since it is handled
1000          * correctly at the appropriate places; so it just defers the failure
1001          * of the respective operation.
1002          */
1003
1004         /* mark them early for readability.
1005          * this just sets some state flags. */
1006         if (remote)
1007                 _req_mod(req, TO_BE_SENT);
1008         if (local)
1009                 _req_mod(req, TO_BE_SUBMITTED);
1010
1011         list_add_tail(&req->tl_requests, &mdev->tconn->newest_tle->requests);
1012
1013         /* NOTE remote first: to get the concurrent write detection right,
1014          * we must register the request before start of local IO.  */
1015         if (remote) {
1016                 /* either WRITE and C_CONNECTED,
1017                  * or READ, and no local disk,
1018                  * or READ, but not in sync.
1019                  */
1020                 _req_mod(req, (rw == WRITE)
1021                                 ? QUEUE_FOR_NET_WRITE
1022                                 : QUEUE_FOR_NET_READ);
1023         }
1024         if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
1025                 _req_mod(req, QUEUE_FOR_SEND_OOS);
1026
1027         rcu_read_lock();
1028         nc = rcu_dereference(mdev->tconn->net_conf);
1029         if (remote &&
1030             nc->on_congestion != OC_BLOCK && mdev->tconn->agreed_pro_version >= 96) {
1031                 int congested = 0;
1032
1033                 if (nc->cong_fill &&
1034                     atomic_read(&mdev->ap_in_flight) >= nc->cong_fill) {
1035                         dev_info(DEV, "Congestion-fill threshold reached\n");
1036                         congested = 1;
1037                 }
1038
1039                 if (mdev->act_log->used >= nc->cong_extents) {
1040                         dev_info(DEV, "Congestion-extents threshold reached\n");
1041                         congested = 1;
1042                 }
1043
1044                 if (congested) {
1045                         queue_barrier(mdev); /* last barrier, after mirrored writes */
1046
1047                         if (nc->on_congestion == OC_PULL_AHEAD)
1048                                 _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
1049                         else  /*nc->on_congestion == OC_DISCONNECT */
1050                                 _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
1051                 }
1052         }
1053         rcu_read_unlock();
1054
1055         spin_unlock_irq(&mdev->tconn->req_lock);
1056         kfree(b); /* if someone else has beaten us to it... */
1057
1058         if (local) {
1059                 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1060
1061                 /* State may have changed since we grabbed our reference on the
1062                  * mdev->ldev member. Double check, and short-circuit to endio.
1063                  * In case the last activity log transaction failed to get on
1064                  * stable storage, and this is a WRITE, we may not even submit
1065                  * this bio. */
1066                 if (get_ldev(mdev)) {
1067                         if (drbd_insert_fault(mdev,   rw == WRITE ? DRBD_FAULT_DT_WR
1068                                                     : rw == READ  ? DRBD_FAULT_DT_RD
1069                                                     :               DRBD_FAULT_DT_RA))
1070                                 bio_endio(req->private_bio, -EIO);
1071                         else
1072                                 generic_make_request(req->private_bio);
1073                         put_ldev(mdev);
1074                 } else
1075                         bio_endio(req->private_bio, -EIO);
1076         }
1077
1078         return 0;
1079
1080 fail_free_complete:
1081         if (req->rq_state & RQ_IN_ACT_LOG)
1082                 drbd_al_complete_io(mdev, &req->i);
1083 fail_and_free_req:
1084         if (local) {
1085                 bio_put(req->private_bio);
1086                 req->private_bio = NULL;
1087                 put_ldev(mdev);
1088         }
1089         if (!ret)
1090                 bio_endio(bio, err);
1091
1092         drbd_req_free(req);
1093         dec_ap_bio(mdev);
1094         kfree(b);
1095
1096         return ret;
1097 }
1098
1099 int drbd_make_request(struct request_queue *q, struct bio *bio)
1100 {
1101         struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
1102         unsigned long start_time;
1103
1104         start_time = jiffies;
1105
1106         /*
1107          * what we "blindly" assume:
1108          */
1109         D_ASSERT(bio->bi_size > 0);
1110         D_ASSERT(IS_ALIGNED(bio->bi_size, 512));
1111
1112         inc_ap_bio(mdev);
1113         return __drbd_make_request(mdev, bio, start_time);
1114 }
1115
1116 /* This is called by bio_add_page().
1117  *
1118  * q->max_hw_sectors and other global limits are already enforced there.
1119  *
1120  * We need to call down to our lower level device,
1121  * in case it has special restrictions.
1122  *
1123  * We also may need to enforce configured max-bio-bvecs limits.
1124  *
1125  * As long as the BIO is empty we have to allow at least one bvec,
1126  * regardless of size and offset, so no need to ask lower levels.
1127  */
1128 int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
1129 {
1130         struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
1131         unsigned int bio_size = bvm->bi_size;
1132         int limit = DRBD_MAX_BIO_SIZE;
1133         int backing_limit;
1134
1135         if (bio_size && get_ldev(mdev)) {
1136                 struct request_queue * const b =
1137                         mdev->ldev->backing_bdev->bd_disk->queue;
1138                 if (b->merge_bvec_fn) {
1139                         backing_limit = b->merge_bvec_fn(b, bvm, bvec);
1140                         limit = min(limit, backing_limit);
1141                 }
1142                 put_ldev(mdev);
1143         }
1144         return limit;
1145 }
1146
1147 void request_timer_fn(unsigned long data)
1148 {
1149         struct drbd_conf *mdev = (struct drbd_conf *) data;
1150         struct drbd_tconn *tconn = mdev->tconn;
1151         struct drbd_request *req; /* oldest request */
1152         struct list_head *le;
1153         struct net_conf *nc;
1154         unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1155
1156         rcu_read_lock();
1157         nc = rcu_dereference(tconn->net_conf);
1158         ent = nc ? nc->timeout * HZ/10 * nc->ko_count : 0;
1159
1160         if (get_ldev(mdev)) {
1161                 dt = rcu_dereference(mdev->ldev->disk_conf)->disk_timeout * HZ / 10;
1162                 put_ldev(mdev);
1163         }
1164         rcu_read_unlock();
1165
1166         et = min_not_zero(dt, ent);
1167
1168         if (!et || (mdev->state.conn < C_WF_REPORT_PARAMS && mdev->state.disk <= D_FAILED))
1169                 return; /* Recurring timer stopped */
1170
1171         spin_lock_irq(&tconn->req_lock);
1172         le = &tconn->oldest_tle->requests;
1173         if (list_empty(le)) {
1174                 spin_unlock_irq(&tconn->req_lock);
1175                 mod_timer(&mdev->request_timer, jiffies + et);
1176                 return;
1177         }
1178
1179         le = le->prev;
1180         req = list_entry(le, struct drbd_request, tl_requests);
1181         if (ent && req->rq_state & RQ_NET_PENDING) {
1182                 if (time_is_before_eq_jiffies(req->start_time + ent)) {
1183                         dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
1184                         _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
1185                 }
1186         }
1187         if (dt && req->rq_state & RQ_LOCAL_PENDING) {
1188                 if (time_is_before_eq_jiffies(req->start_time + dt)) {
1189                         dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
1190                         __drbd_chk_io_error(mdev, 1);
1191                 }
1192         }
1193         nt = (time_is_before_eq_jiffies(req->start_time + et) ? jiffies : req->start_time) + et;
1194         spin_unlock_irq(&tconn->req_lock);
1195         mod_timer(&mdev->request_timer, nt);
1196 }