dm snapshot: remove unused definitions
[cascardo/linux.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm-exception-store.h"
24
25 #define DM_MSG_PREFIX "snapshots"
26
27 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
28
29 #define dm_target_is_snapshot_merge(ti) \
30         ((ti)->type->name == dm_snapshot_merge_target_name)
31
32 /*
33  * The size of the mempool used to track chunks in use.
34  */
35 #define MIN_IOS 256
36
37 #define DM_TRACKED_CHUNK_HASH_SIZE      16
38 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
39                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
40
41 struct dm_exception_table {
42         uint32_t hash_mask;
43         unsigned hash_shift;
44         struct list_head *table;
45 };
46
47 struct dm_snapshot {
48         struct rw_semaphore lock;
49
50         struct dm_dev *origin;
51         struct dm_dev *cow;
52
53         struct dm_target *ti;
54
55         /* List of snapshots per Origin */
56         struct list_head list;
57
58         /*
59          * You can't use a snapshot if this is 0 (e.g. if full).
60          * A snapshot-merge target never clears this.
61          */
62         int valid;
63
64         /* Origin writes don't trigger exceptions until this is set */
65         int active;
66
67         atomic_t pending_exceptions_count;
68
69         mempool_t *pending_pool;
70
71         struct dm_exception_table pending;
72         struct dm_exception_table complete;
73
74         /*
75          * pe_lock protects all pending_exception operations and access
76          * as well as the snapshot_bios list.
77          */
78         spinlock_t pe_lock;
79
80         /* Chunks with outstanding reads */
81         spinlock_t tracked_chunk_lock;
82         mempool_t *tracked_chunk_pool;
83         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
84
85         /* The on disk metadata handler */
86         struct dm_exception_store *store;
87
88         struct dm_kcopyd_client *kcopyd_client;
89
90         /* Wait for events based on state_bits */
91         unsigned long state_bits;
92
93         /* Range of chunks currently being merged. */
94         chunk_t first_merging_chunk;
95         int num_merging_chunks;
96
97         /*
98          * The merge operation failed if this flag is set.
99          * Failure modes are handled as follows:
100          * - I/O error reading the header
101          *      => don't load the target; abort.
102          * - Header does not have "valid" flag set
103          *      => use the origin; forget about the snapshot.
104          * - I/O error when reading exceptions
105          *      => don't load the target; abort.
106          *         (We can't use the intermediate origin state.)
107          * - I/O error while merging
108          *      => stop merging; set merge_failed; process I/O normally.
109          */
110         int merge_failed;
111
112         /*
113          * Incoming bios that overlap with chunks being merged must wait
114          * for them to be committed.
115          */
116         struct bio_list bios_queued_during_merge;
117 };
118
119 /*
120  * state_bits:
121  *   RUNNING_MERGE  - Merge operation is in progress.
122  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
123  *                    cleared afterwards.
124  */
125 #define RUNNING_MERGE          0
126 #define SHUTDOWN_MERGE         1
127
128 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
129 {
130         return s->origin;
131 }
132 EXPORT_SYMBOL(dm_snap_origin);
133
134 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
135 {
136         return s->cow;
137 }
138 EXPORT_SYMBOL(dm_snap_cow);
139
140 static sector_t chunk_to_sector(struct dm_exception_store *store,
141                                 chunk_t chunk)
142 {
143         return chunk << store->chunk_shift;
144 }
145
146 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
147 {
148         /*
149          * There is only ever one instance of a particular block
150          * device so we can compare pointers safely.
151          */
152         return lhs == rhs;
153 }
154
155 struct dm_snap_pending_exception {
156         struct dm_exception e;
157
158         /*
159          * Origin buffers waiting for this to complete are held
160          * in a bio list
161          */
162         struct bio_list origin_bios;
163         struct bio_list snapshot_bios;
164
165         /* Pointer back to snapshot context */
166         struct dm_snapshot *snap;
167
168         /*
169          * 1 indicates the exception has already been sent to
170          * kcopyd.
171          */
172         int started;
173 };
174
175 /*
176  * Hash table mapping origin volumes to lists of snapshots and
177  * a lock to protect it
178  */
179 static struct kmem_cache *exception_cache;
180 static struct kmem_cache *pending_cache;
181
182 struct dm_snap_tracked_chunk {
183         struct hlist_node node;
184         chunk_t chunk;
185 };
186
187 static struct kmem_cache *tracked_chunk_cache;
188
189 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
190                                                  chunk_t chunk)
191 {
192         struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
193                                                         GFP_NOIO);
194         unsigned long flags;
195
196         c->chunk = chunk;
197
198         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
199         hlist_add_head(&c->node,
200                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
201         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
202
203         return c;
204 }
205
206 static void stop_tracking_chunk(struct dm_snapshot *s,
207                                 struct dm_snap_tracked_chunk *c)
208 {
209         unsigned long flags;
210
211         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
212         hlist_del(&c->node);
213         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
214
215         mempool_free(c, s->tracked_chunk_pool);
216 }
217
218 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
219 {
220         struct dm_snap_tracked_chunk *c;
221         struct hlist_node *hn;
222         int found = 0;
223
224         spin_lock_irq(&s->tracked_chunk_lock);
225
226         hlist_for_each_entry(c, hn,
227             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
228                 if (c->chunk == chunk) {
229                         found = 1;
230                         break;
231                 }
232         }
233
234         spin_unlock_irq(&s->tracked_chunk_lock);
235
236         return found;
237 }
238
239 /*
240  * This conflicting I/O is extremely improbable in the caller,
241  * so msleep(1) is sufficient and there is no need for a wait queue.
242  */
243 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
244 {
245         while (__chunk_is_tracked(s, chunk))
246                 msleep(1);
247 }
248
249 /*
250  * One of these per registered origin, held in the snapshot_origins hash
251  */
252 struct origin {
253         /* The origin device */
254         struct block_device *bdev;
255
256         struct list_head hash_list;
257
258         /* List of snapshots for this origin */
259         struct list_head snapshots;
260 };
261
262 /*
263  * Size of the hash table for origin volumes. If we make this
264  * the size of the minors list then it should be nearly perfect
265  */
266 #define ORIGIN_HASH_SIZE 256
267 #define ORIGIN_MASK      0xFF
268 static struct list_head *_origins;
269 static struct rw_semaphore _origins_lock;
270
271 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
272 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
273 static uint64_t _pending_exceptions_done_count;
274
275 static int init_origin_hash(void)
276 {
277         int i;
278
279         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
280                            GFP_KERNEL);
281         if (!_origins) {
282                 DMERR("unable to allocate memory");
283                 return -ENOMEM;
284         }
285
286         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
287                 INIT_LIST_HEAD(_origins + i);
288         init_rwsem(&_origins_lock);
289
290         return 0;
291 }
292
293 static void exit_origin_hash(void)
294 {
295         kfree(_origins);
296 }
297
298 static unsigned origin_hash(struct block_device *bdev)
299 {
300         return bdev->bd_dev & ORIGIN_MASK;
301 }
302
303 static struct origin *__lookup_origin(struct block_device *origin)
304 {
305         struct list_head *ol;
306         struct origin *o;
307
308         ol = &_origins[origin_hash(origin)];
309         list_for_each_entry (o, ol, hash_list)
310                 if (bdev_equal(o->bdev, origin))
311                         return o;
312
313         return NULL;
314 }
315
316 static void __insert_origin(struct origin *o)
317 {
318         struct list_head *sl = &_origins[origin_hash(o->bdev)];
319         list_add_tail(&o->hash_list, sl);
320 }
321
322 /*
323  * _origins_lock must be held when calling this function.
324  * Returns number of snapshots registered using the supplied cow device, plus:
325  * snap_src - a snapshot suitable for use as a source of exception handover
326  * snap_dest - a snapshot capable of receiving exception handover.
327  * snap_merge - an existing snapshot-merge target linked to the same origin.
328  *   There can be at most one snapshot-merge target. The parameter is optional.
329  *
330  * Possible return values and states of snap_src and snap_dest.
331  *   0: NULL, NULL  - first new snapshot
332  *   1: snap_src, NULL - normal snapshot
333  *   2: snap_src, snap_dest  - waiting for handover
334  *   2: snap_src, NULL - handed over, waiting for old to be deleted
335  *   1: NULL, snap_dest - source got destroyed without handover
336  */
337 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
338                                         struct dm_snapshot **snap_src,
339                                         struct dm_snapshot **snap_dest,
340                                         struct dm_snapshot **snap_merge)
341 {
342         struct dm_snapshot *s;
343         struct origin *o;
344         int count = 0;
345         int active;
346
347         o = __lookup_origin(snap->origin->bdev);
348         if (!o)
349                 goto out;
350
351         list_for_each_entry(s, &o->snapshots, list) {
352                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
353                         *snap_merge = s;
354                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
355                         continue;
356
357                 down_read(&s->lock);
358                 active = s->active;
359                 up_read(&s->lock);
360
361                 if (active) {
362                         if (snap_src)
363                                 *snap_src = s;
364                 } else if (snap_dest)
365                         *snap_dest = s;
366
367                 count++;
368         }
369
370 out:
371         return count;
372 }
373
374 /*
375  * On success, returns 1 if this snapshot is a handover destination,
376  * otherwise returns 0.
377  */
378 static int __validate_exception_handover(struct dm_snapshot *snap)
379 {
380         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
381         struct dm_snapshot *snap_merge = NULL;
382
383         /* Does snapshot need exceptions handed over to it? */
384         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
385                                           &snap_merge) == 2) ||
386             snap_dest) {
387                 snap->ti->error = "Snapshot cow pairing for exception "
388                                   "table handover failed";
389                 return -EINVAL;
390         }
391
392         /*
393          * If no snap_src was found, snap cannot become a handover
394          * destination.
395          */
396         if (!snap_src)
397                 return 0;
398
399         /*
400          * Non-snapshot-merge handover?
401          */
402         if (!dm_target_is_snapshot_merge(snap->ti))
403                 return 1;
404
405         /*
406          * Do not allow more than one merging snapshot.
407          */
408         if (snap_merge) {
409                 snap->ti->error = "A snapshot is already merging.";
410                 return -EINVAL;
411         }
412
413         if (!snap_src->store->type->prepare_merge ||
414             !snap_src->store->type->commit_merge) {
415                 snap->ti->error = "Snapshot exception store does not "
416                                   "support snapshot-merge.";
417                 return -EINVAL;
418         }
419
420         return 1;
421 }
422
423 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
424 {
425         struct dm_snapshot *l;
426
427         /* Sort the list according to chunk size, largest-first smallest-last */
428         list_for_each_entry(l, &o->snapshots, list)
429                 if (l->store->chunk_size < s->store->chunk_size)
430                         break;
431         list_add_tail(&s->list, &l->list);
432 }
433
434 /*
435  * Make a note of the snapshot and its origin so we can look it
436  * up when the origin has a write on it.
437  *
438  * Also validate snapshot exception store handovers.
439  * On success, returns 1 if this registration is a handover destination,
440  * otherwise returns 0.
441  */
442 static int register_snapshot(struct dm_snapshot *snap)
443 {
444         struct origin *o, *new_o = NULL;
445         struct block_device *bdev = snap->origin->bdev;
446         int r = 0;
447
448         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
449         if (!new_o)
450                 return -ENOMEM;
451
452         down_write(&_origins_lock);
453
454         r = __validate_exception_handover(snap);
455         if (r < 0) {
456                 kfree(new_o);
457                 goto out;
458         }
459
460         o = __lookup_origin(bdev);
461         if (o)
462                 kfree(new_o);
463         else {
464                 /* New origin */
465                 o = new_o;
466
467                 /* Initialise the struct */
468                 INIT_LIST_HEAD(&o->snapshots);
469                 o->bdev = bdev;
470
471                 __insert_origin(o);
472         }
473
474         __insert_snapshot(o, snap);
475
476 out:
477         up_write(&_origins_lock);
478
479         return r;
480 }
481
482 /*
483  * Move snapshot to correct place in list according to chunk size.
484  */
485 static void reregister_snapshot(struct dm_snapshot *s)
486 {
487         struct block_device *bdev = s->origin->bdev;
488
489         down_write(&_origins_lock);
490
491         list_del(&s->list);
492         __insert_snapshot(__lookup_origin(bdev), s);
493
494         up_write(&_origins_lock);
495 }
496
497 static void unregister_snapshot(struct dm_snapshot *s)
498 {
499         struct origin *o;
500
501         down_write(&_origins_lock);
502         o = __lookup_origin(s->origin->bdev);
503
504         list_del(&s->list);
505         if (o && list_empty(&o->snapshots)) {
506                 list_del(&o->hash_list);
507                 kfree(o);
508         }
509
510         up_write(&_origins_lock);
511 }
512
513 /*
514  * Implementation of the exception hash tables.
515  * The lowest hash_shift bits of the chunk number are ignored, allowing
516  * some consecutive chunks to be grouped together.
517  */
518 static int dm_exception_table_init(struct dm_exception_table *et,
519                                    uint32_t size, unsigned hash_shift)
520 {
521         unsigned int i;
522
523         et->hash_shift = hash_shift;
524         et->hash_mask = size - 1;
525         et->table = dm_vcalloc(size, sizeof(struct list_head));
526         if (!et->table)
527                 return -ENOMEM;
528
529         for (i = 0; i < size; i++)
530                 INIT_LIST_HEAD(et->table + i);
531
532         return 0;
533 }
534
535 static void dm_exception_table_exit(struct dm_exception_table *et,
536                                     struct kmem_cache *mem)
537 {
538         struct list_head *slot;
539         struct dm_exception *ex, *next;
540         int i, size;
541
542         size = et->hash_mask + 1;
543         for (i = 0; i < size; i++) {
544                 slot = et->table + i;
545
546                 list_for_each_entry_safe (ex, next, slot, hash_list)
547                         kmem_cache_free(mem, ex);
548         }
549
550         vfree(et->table);
551 }
552
553 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
554 {
555         return (chunk >> et->hash_shift) & et->hash_mask;
556 }
557
558 static void dm_remove_exception(struct dm_exception *e)
559 {
560         list_del(&e->hash_list);
561 }
562
563 /*
564  * Return the exception data for a sector, or NULL if not
565  * remapped.
566  */
567 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
568                                                 chunk_t chunk)
569 {
570         struct list_head *slot;
571         struct dm_exception *e;
572
573         slot = &et->table[exception_hash(et, chunk)];
574         list_for_each_entry (e, slot, hash_list)
575                 if (chunk >= e->old_chunk &&
576                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
577                         return e;
578
579         return NULL;
580 }
581
582 static struct dm_exception *alloc_completed_exception(void)
583 {
584         struct dm_exception *e;
585
586         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
587         if (!e)
588                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
589
590         return e;
591 }
592
593 static void free_completed_exception(struct dm_exception *e)
594 {
595         kmem_cache_free(exception_cache, e);
596 }
597
598 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
599 {
600         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
601                                                              GFP_NOIO);
602
603         atomic_inc(&s->pending_exceptions_count);
604         pe->snap = s;
605
606         return pe;
607 }
608
609 static void free_pending_exception(struct dm_snap_pending_exception *pe)
610 {
611         struct dm_snapshot *s = pe->snap;
612
613         mempool_free(pe, s->pending_pool);
614         smp_mb__before_atomic_dec();
615         atomic_dec(&s->pending_exceptions_count);
616 }
617
618 static void dm_insert_exception(struct dm_exception_table *eh,
619                                 struct dm_exception *new_e)
620 {
621         struct list_head *l;
622         struct dm_exception *e = NULL;
623
624         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
625
626         /* Add immediately if this table doesn't support consecutive chunks */
627         if (!eh->hash_shift)
628                 goto out;
629
630         /* List is ordered by old_chunk */
631         list_for_each_entry_reverse(e, l, hash_list) {
632                 /* Insert after an existing chunk? */
633                 if (new_e->old_chunk == (e->old_chunk +
634                                          dm_consecutive_chunk_count(e) + 1) &&
635                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
636                                          dm_consecutive_chunk_count(e) + 1)) {
637                         dm_consecutive_chunk_count_inc(e);
638                         free_completed_exception(new_e);
639                         return;
640                 }
641
642                 /* Insert before an existing chunk? */
643                 if (new_e->old_chunk == (e->old_chunk - 1) &&
644                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
645                         dm_consecutive_chunk_count_inc(e);
646                         e->old_chunk--;
647                         e->new_chunk--;
648                         free_completed_exception(new_e);
649                         return;
650                 }
651
652                 if (new_e->old_chunk > e->old_chunk)
653                         break;
654         }
655
656 out:
657         list_add(&new_e->hash_list, e ? &e->hash_list : l);
658 }
659
660 /*
661  * Callback used by the exception stores to load exceptions when
662  * initialising.
663  */
664 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
665 {
666         struct dm_snapshot *s = context;
667         struct dm_exception *e;
668
669         e = alloc_completed_exception();
670         if (!e)
671                 return -ENOMEM;
672
673         e->old_chunk = old;
674
675         /* Consecutive_count is implicitly initialised to zero */
676         e->new_chunk = new;
677
678         dm_insert_exception(&s->complete, e);
679
680         return 0;
681 }
682
683 /*
684  * Return a minimum chunk size of all snapshots that have the specified origin.
685  * Return zero if the origin has no snapshots.
686  */
687 static sector_t __minimum_chunk_size(struct origin *o)
688 {
689         struct dm_snapshot *snap;
690         unsigned chunk_size = 0;
691
692         if (o)
693                 list_for_each_entry(snap, &o->snapshots, list)
694                         chunk_size = min_not_zero(chunk_size,
695                                                   snap->store->chunk_size);
696
697         return chunk_size;
698 }
699
700 /*
701  * Hard coded magic.
702  */
703 static int calc_max_buckets(void)
704 {
705         /* use a fixed size of 2MB */
706         unsigned long mem = 2 * 1024 * 1024;
707         mem /= sizeof(struct list_head);
708
709         return mem;
710 }
711
712 /*
713  * Allocate room for a suitable hash table.
714  */
715 static int init_hash_tables(struct dm_snapshot *s)
716 {
717         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
718
719         /*
720          * Calculate based on the size of the original volume or
721          * the COW volume...
722          */
723         cow_dev_size = get_dev_size(s->cow->bdev);
724         origin_dev_size = get_dev_size(s->origin->bdev);
725         max_buckets = calc_max_buckets();
726
727         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
728         hash_size = min(hash_size, max_buckets);
729
730         if (hash_size < 64)
731                 hash_size = 64;
732         hash_size = rounddown_pow_of_two(hash_size);
733         if (dm_exception_table_init(&s->complete, hash_size,
734                                     DM_CHUNK_CONSECUTIVE_BITS))
735                 return -ENOMEM;
736
737         /*
738          * Allocate hash table for in-flight exceptions
739          * Make this smaller than the real hash table
740          */
741         hash_size >>= 3;
742         if (hash_size < 64)
743                 hash_size = 64;
744
745         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
746                 dm_exception_table_exit(&s->complete, exception_cache);
747                 return -ENOMEM;
748         }
749
750         return 0;
751 }
752
753 static void merge_shutdown(struct dm_snapshot *s)
754 {
755         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
756         smp_mb__after_clear_bit();
757         wake_up_bit(&s->state_bits, RUNNING_MERGE);
758 }
759
760 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
761 {
762         s->first_merging_chunk = 0;
763         s->num_merging_chunks = 0;
764
765         return bio_list_get(&s->bios_queued_during_merge);
766 }
767
768 /*
769  * Remove one chunk from the index of completed exceptions.
770  */
771 static int __remove_single_exception_chunk(struct dm_snapshot *s,
772                                            chunk_t old_chunk)
773 {
774         struct dm_exception *e;
775
776         e = dm_lookup_exception(&s->complete, old_chunk);
777         if (!e) {
778                 DMERR("Corruption detected: exception for block %llu is "
779                       "on disk but not in memory",
780                       (unsigned long long)old_chunk);
781                 return -EINVAL;
782         }
783
784         /*
785          * If this is the only chunk using this exception, remove exception.
786          */
787         if (!dm_consecutive_chunk_count(e)) {
788                 dm_remove_exception(e);
789                 free_completed_exception(e);
790                 return 0;
791         }
792
793         /*
794          * The chunk may be either at the beginning or the end of a
795          * group of consecutive chunks - never in the middle.  We are
796          * removing chunks in the opposite order to that in which they
797          * were added, so this should always be true.
798          * Decrement the consecutive chunk counter and adjust the
799          * starting point if necessary.
800          */
801         if (old_chunk == e->old_chunk) {
802                 e->old_chunk++;
803                 e->new_chunk++;
804         } else if (old_chunk != e->old_chunk +
805                    dm_consecutive_chunk_count(e)) {
806                 DMERR("Attempt to merge block %llu from the "
807                       "middle of a chunk range [%llu - %llu]",
808                       (unsigned long long)old_chunk,
809                       (unsigned long long)e->old_chunk,
810                       (unsigned long long)
811                       e->old_chunk + dm_consecutive_chunk_count(e));
812                 return -EINVAL;
813         }
814
815         dm_consecutive_chunk_count_dec(e);
816
817         return 0;
818 }
819
820 static void flush_bios(struct bio *bio);
821
822 static int remove_single_exception_chunk(struct dm_snapshot *s)
823 {
824         struct bio *b = NULL;
825         int r;
826         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
827
828         down_write(&s->lock);
829
830         /*
831          * Process chunks (and associated exceptions) in reverse order
832          * so that dm_consecutive_chunk_count_dec() accounting works.
833          */
834         do {
835                 r = __remove_single_exception_chunk(s, old_chunk);
836                 if (r)
837                         goto out;
838         } while (old_chunk-- > s->first_merging_chunk);
839
840         b = __release_queued_bios_after_merge(s);
841
842 out:
843         up_write(&s->lock);
844         if (b)
845                 flush_bios(b);
846
847         return r;
848 }
849
850 static int origin_write_extent(struct dm_snapshot *merging_snap,
851                                sector_t sector, unsigned chunk_size);
852
853 static void merge_callback(int read_err, unsigned long write_err,
854                            void *context);
855
856 static uint64_t read_pending_exceptions_done_count(void)
857 {
858         uint64_t pending_exceptions_done;
859
860         spin_lock(&_pending_exceptions_done_spinlock);
861         pending_exceptions_done = _pending_exceptions_done_count;
862         spin_unlock(&_pending_exceptions_done_spinlock);
863
864         return pending_exceptions_done;
865 }
866
867 static void increment_pending_exceptions_done_count(void)
868 {
869         spin_lock(&_pending_exceptions_done_spinlock);
870         _pending_exceptions_done_count++;
871         spin_unlock(&_pending_exceptions_done_spinlock);
872
873         wake_up_all(&_pending_exceptions_done);
874 }
875
876 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
877 {
878         int i, linear_chunks;
879         chunk_t old_chunk, new_chunk;
880         struct dm_io_region src, dest;
881         sector_t io_size;
882         uint64_t previous_count;
883
884         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
885         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
886                 goto shut;
887
888         /*
889          * valid flag never changes during merge, so no lock required.
890          */
891         if (!s->valid) {
892                 DMERR("Snapshot is invalid: can't merge");
893                 goto shut;
894         }
895
896         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
897                                                       &new_chunk);
898         if (linear_chunks <= 0) {
899                 if (linear_chunks < 0) {
900                         DMERR("Read error in exception store: "
901                               "shutting down merge");
902                         down_write(&s->lock);
903                         s->merge_failed = 1;
904                         up_write(&s->lock);
905                 }
906                 goto shut;
907         }
908
909         /* Adjust old_chunk and new_chunk to reflect start of linear region */
910         old_chunk = old_chunk + 1 - linear_chunks;
911         new_chunk = new_chunk + 1 - linear_chunks;
912
913         /*
914          * Use one (potentially large) I/O to copy all 'linear_chunks'
915          * from the exception store to the origin
916          */
917         io_size = linear_chunks * s->store->chunk_size;
918
919         dest.bdev = s->origin->bdev;
920         dest.sector = chunk_to_sector(s->store, old_chunk);
921         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
922
923         src.bdev = s->cow->bdev;
924         src.sector = chunk_to_sector(s->store, new_chunk);
925         src.count = dest.count;
926
927         /*
928          * Reallocate any exceptions needed in other snapshots then
929          * wait for the pending exceptions to complete.
930          * Each time any pending exception (globally on the system)
931          * completes we are woken and repeat the process to find out
932          * if we can proceed.  While this may not seem a particularly
933          * efficient algorithm, it is not expected to have any
934          * significant impact on performance.
935          */
936         previous_count = read_pending_exceptions_done_count();
937         while (origin_write_extent(s, dest.sector, io_size)) {
938                 wait_event(_pending_exceptions_done,
939                            (read_pending_exceptions_done_count() !=
940                             previous_count));
941                 /* Retry after the wait, until all exceptions are done. */
942                 previous_count = read_pending_exceptions_done_count();
943         }
944
945         down_write(&s->lock);
946         s->first_merging_chunk = old_chunk;
947         s->num_merging_chunks = linear_chunks;
948         up_write(&s->lock);
949
950         /* Wait until writes to all 'linear_chunks' drain */
951         for (i = 0; i < linear_chunks; i++)
952                 __check_for_conflicting_io(s, old_chunk + i);
953
954         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
955         return;
956
957 shut:
958         merge_shutdown(s);
959 }
960
961 static void error_bios(struct bio *bio);
962
963 static void merge_callback(int read_err, unsigned long write_err, void *context)
964 {
965         struct dm_snapshot *s = context;
966         struct bio *b = NULL;
967
968         if (read_err || write_err) {
969                 if (read_err)
970                         DMERR("Read error: shutting down merge.");
971                 else
972                         DMERR("Write error: shutting down merge.");
973                 goto shut;
974         }
975
976         if (s->store->type->commit_merge(s->store,
977                                          s->num_merging_chunks) < 0) {
978                 DMERR("Write error in exception store: shutting down merge");
979                 goto shut;
980         }
981
982         if (remove_single_exception_chunk(s) < 0)
983                 goto shut;
984
985         snapshot_merge_next_chunks(s);
986
987         return;
988
989 shut:
990         down_write(&s->lock);
991         s->merge_failed = 1;
992         b = __release_queued_bios_after_merge(s);
993         up_write(&s->lock);
994         error_bios(b);
995
996         merge_shutdown(s);
997 }
998
999 static void start_merge(struct dm_snapshot *s)
1000 {
1001         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1002                 snapshot_merge_next_chunks(s);
1003 }
1004
1005 static int wait_schedule(void *ptr)
1006 {
1007         schedule();
1008
1009         return 0;
1010 }
1011
1012 /*
1013  * Stop the merging process and wait until it finishes.
1014  */
1015 static void stop_merge(struct dm_snapshot *s)
1016 {
1017         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1018         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1019                     TASK_UNINTERRUPTIBLE);
1020         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1021 }
1022
1023 /*
1024  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1025  */
1026 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1027 {
1028         struct dm_snapshot *s;
1029         int i;
1030         int r = -EINVAL;
1031         char *origin_path, *cow_path;
1032         unsigned args_used, num_flush_requests = 1;
1033         fmode_t origin_mode = FMODE_READ;
1034
1035         if (argc != 4) {
1036                 ti->error = "requires exactly 4 arguments";
1037                 r = -EINVAL;
1038                 goto bad;
1039         }
1040
1041         if (dm_target_is_snapshot_merge(ti)) {
1042                 num_flush_requests = 2;
1043                 origin_mode = FMODE_WRITE;
1044         }
1045
1046         s = kmalloc(sizeof(*s), GFP_KERNEL);
1047         if (!s) {
1048                 ti->error = "Cannot allocate snapshot context private "
1049                     "structure";
1050                 r = -ENOMEM;
1051                 goto bad;
1052         }
1053
1054         origin_path = argv[0];
1055         argv++;
1056         argc--;
1057
1058         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1059         if (r) {
1060                 ti->error = "Cannot get origin device";
1061                 goto bad_origin;
1062         }
1063
1064         cow_path = argv[0];
1065         argv++;
1066         argc--;
1067
1068         r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1069         if (r) {
1070                 ti->error = "Cannot get COW device";
1071                 goto bad_cow;
1072         }
1073
1074         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1075         if (r) {
1076                 ti->error = "Couldn't create exception store";
1077                 r = -EINVAL;
1078                 goto bad_store;
1079         }
1080
1081         argv += args_used;
1082         argc -= args_used;
1083
1084         s->ti = ti;
1085         s->valid = 1;
1086         s->active = 0;
1087         atomic_set(&s->pending_exceptions_count, 0);
1088         init_rwsem(&s->lock);
1089         INIT_LIST_HEAD(&s->list);
1090         spin_lock_init(&s->pe_lock);
1091         s->state_bits = 0;
1092         s->merge_failed = 0;
1093         s->first_merging_chunk = 0;
1094         s->num_merging_chunks = 0;
1095         bio_list_init(&s->bios_queued_during_merge);
1096
1097         /* Allocate hash table for COW data */
1098         if (init_hash_tables(s)) {
1099                 ti->error = "Unable to allocate hash table space";
1100                 r = -ENOMEM;
1101                 goto bad_hash_tables;
1102         }
1103
1104         s->kcopyd_client = dm_kcopyd_client_create();
1105         if (IS_ERR(s->kcopyd_client)) {
1106                 r = PTR_ERR(s->kcopyd_client);
1107                 ti->error = "Could not create kcopyd client";
1108                 goto bad_kcopyd;
1109         }
1110
1111         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1112         if (!s->pending_pool) {
1113                 ti->error = "Could not allocate mempool for pending exceptions";
1114                 goto bad_pending_pool;
1115         }
1116
1117         s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
1118                                                          tracked_chunk_cache);
1119         if (!s->tracked_chunk_pool) {
1120                 ti->error = "Could not allocate tracked_chunk mempool for "
1121                             "tracking reads";
1122                 goto bad_tracked_chunk_pool;
1123         }
1124
1125         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1126                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1127
1128         spin_lock_init(&s->tracked_chunk_lock);
1129
1130         ti->private = s;
1131         ti->num_flush_requests = num_flush_requests;
1132
1133         /* Add snapshot to the list of snapshots for this origin */
1134         /* Exceptions aren't triggered till snapshot_resume() is called */
1135         r = register_snapshot(s);
1136         if (r == -ENOMEM) {
1137                 ti->error = "Snapshot origin struct allocation failed";
1138                 goto bad_load_and_register;
1139         } else if (r < 0) {
1140                 /* invalid handover, register_snapshot has set ti->error */
1141                 goto bad_load_and_register;
1142         }
1143
1144         /*
1145          * Metadata must only be loaded into one table at once, so skip this
1146          * if metadata will be handed over during resume.
1147          * Chunk size will be set during the handover - set it to zero to
1148          * ensure it's ignored.
1149          */
1150         if (r > 0) {
1151                 s->store->chunk_size = 0;
1152                 return 0;
1153         }
1154
1155         r = s->store->type->read_metadata(s->store, dm_add_exception,
1156                                           (void *)s);
1157         if (r < 0) {
1158                 ti->error = "Failed to read snapshot metadata";
1159                 goto bad_read_metadata;
1160         } else if (r > 0) {
1161                 s->valid = 0;
1162                 DMWARN("Snapshot is marked invalid.");
1163         }
1164
1165         if (!s->store->chunk_size) {
1166                 ti->error = "Chunk size not set";
1167                 goto bad_read_metadata;
1168         }
1169         ti->split_io = s->store->chunk_size;
1170
1171         return 0;
1172
1173 bad_read_metadata:
1174         unregister_snapshot(s);
1175
1176 bad_load_and_register:
1177         mempool_destroy(s->tracked_chunk_pool);
1178
1179 bad_tracked_chunk_pool:
1180         mempool_destroy(s->pending_pool);
1181
1182 bad_pending_pool:
1183         dm_kcopyd_client_destroy(s->kcopyd_client);
1184
1185 bad_kcopyd:
1186         dm_exception_table_exit(&s->pending, pending_cache);
1187         dm_exception_table_exit(&s->complete, exception_cache);
1188
1189 bad_hash_tables:
1190         dm_exception_store_destroy(s->store);
1191
1192 bad_store:
1193         dm_put_device(ti, s->cow);
1194
1195 bad_cow:
1196         dm_put_device(ti, s->origin);
1197
1198 bad_origin:
1199         kfree(s);
1200
1201 bad:
1202         return r;
1203 }
1204
1205 static void __free_exceptions(struct dm_snapshot *s)
1206 {
1207         dm_kcopyd_client_destroy(s->kcopyd_client);
1208         s->kcopyd_client = NULL;
1209
1210         dm_exception_table_exit(&s->pending, pending_cache);
1211         dm_exception_table_exit(&s->complete, exception_cache);
1212 }
1213
1214 static void __handover_exceptions(struct dm_snapshot *snap_src,
1215                                   struct dm_snapshot *snap_dest)
1216 {
1217         union {
1218                 struct dm_exception_table table_swap;
1219                 struct dm_exception_store *store_swap;
1220         } u;
1221
1222         /*
1223          * Swap all snapshot context information between the two instances.
1224          */
1225         u.table_swap = snap_dest->complete;
1226         snap_dest->complete = snap_src->complete;
1227         snap_src->complete = u.table_swap;
1228
1229         u.store_swap = snap_dest->store;
1230         snap_dest->store = snap_src->store;
1231         snap_src->store = u.store_swap;
1232
1233         snap_dest->store->snap = snap_dest;
1234         snap_src->store->snap = snap_src;
1235
1236         snap_dest->ti->split_io = snap_dest->store->chunk_size;
1237         snap_dest->valid = snap_src->valid;
1238
1239         /*
1240          * Set source invalid to ensure it receives no further I/O.
1241          */
1242         snap_src->valid = 0;
1243 }
1244
1245 static void snapshot_dtr(struct dm_target *ti)
1246 {
1247 #ifdef CONFIG_DM_DEBUG
1248         int i;
1249 #endif
1250         struct dm_snapshot *s = ti->private;
1251         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1252
1253         down_read(&_origins_lock);
1254         /* Check whether exception handover must be cancelled */
1255         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1256         if (snap_src && snap_dest && (s == snap_src)) {
1257                 down_write(&snap_dest->lock);
1258                 snap_dest->valid = 0;
1259                 up_write(&snap_dest->lock);
1260                 DMERR("Cancelling snapshot handover.");
1261         }
1262         up_read(&_origins_lock);
1263
1264         if (dm_target_is_snapshot_merge(ti))
1265                 stop_merge(s);
1266
1267         /* Prevent further origin writes from using this snapshot. */
1268         /* After this returns there can be no new kcopyd jobs. */
1269         unregister_snapshot(s);
1270
1271         while (atomic_read(&s->pending_exceptions_count))
1272                 msleep(1);
1273         /*
1274          * Ensure instructions in mempool_destroy aren't reordered
1275          * before atomic_read.
1276          */
1277         smp_mb();
1278
1279 #ifdef CONFIG_DM_DEBUG
1280         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1281                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1282 #endif
1283
1284         mempool_destroy(s->tracked_chunk_pool);
1285
1286         __free_exceptions(s);
1287
1288         mempool_destroy(s->pending_pool);
1289
1290         dm_exception_store_destroy(s->store);
1291
1292         dm_put_device(ti, s->cow);
1293
1294         dm_put_device(ti, s->origin);
1295
1296         kfree(s);
1297 }
1298
1299 /*
1300  * Flush a list of buffers.
1301  */
1302 static void flush_bios(struct bio *bio)
1303 {
1304         struct bio *n;
1305
1306         while (bio) {
1307                 n = bio->bi_next;
1308                 bio->bi_next = NULL;
1309                 generic_make_request(bio);
1310                 bio = n;
1311         }
1312 }
1313
1314 static int do_origin(struct dm_dev *origin, struct bio *bio);
1315
1316 /*
1317  * Flush a list of buffers.
1318  */
1319 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1320 {
1321         struct bio *n;
1322         int r;
1323
1324         while (bio) {
1325                 n = bio->bi_next;
1326                 bio->bi_next = NULL;
1327                 r = do_origin(s->origin, bio);
1328                 if (r == DM_MAPIO_REMAPPED)
1329                         generic_make_request(bio);
1330                 bio = n;
1331         }
1332 }
1333
1334 /*
1335  * Error a list of buffers.
1336  */
1337 static void error_bios(struct bio *bio)
1338 {
1339         struct bio *n;
1340
1341         while (bio) {
1342                 n = bio->bi_next;
1343                 bio->bi_next = NULL;
1344                 bio_io_error(bio);
1345                 bio = n;
1346         }
1347 }
1348
1349 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1350 {
1351         if (!s->valid)
1352                 return;
1353
1354         if (err == -EIO)
1355                 DMERR("Invalidating snapshot: Error reading/writing.");
1356         else if (err == -ENOMEM)
1357                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1358
1359         if (s->store->type->drop_snapshot)
1360                 s->store->type->drop_snapshot(s->store);
1361
1362         s->valid = 0;
1363
1364         dm_table_event(s->ti->table);
1365 }
1366
1367 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1368 {
1369         struct dm_exception *e;
1370         struct dm_snapshot *s = pe->snap;
1371         struct bio *origin_bios = NULL;
1372         struct bio *snapshot_bios = NULL;
1373         int error = 0;
1374
1375         if (!success) {
1376                 /* Read/write error - snapshot is unusable */
1377                 down_write(&s->lock);
1378                 __invalidate_snapshot(s, -EIO);
1379                 error = 1;
1380                 goto out;
1381         }
1382
1383         e = alloc_completed_exception();
1384         if (!e) {
1385                 down_write(&s->lock);
1386                 __invalidate_snapshot(s, -ENOMEM);
1387                 error = 1;
1388                 goto out;
1389         }
1390         *e = pe->e;
1391
1392         down_write(&s->lock);
1393         if (!s->valid) {
1394                 free_completed_exception(e);
1395                 error = 1;
1396                 goto out;
1397         }
1398
1399         /* Check for conflicting reads */
1400         __check_for_conflicting_io(s, pe->e.old_chunk);
1401
1402         /*
1403          * Add a proper exception, and remove the
1404          * in-flight exception from the list.
1405          */
1406         dm_insert_exception(&s->complete, e);
1407
1408  out:
1409         dm_remove_exception(&pe->e);
1410         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1411         origin_bios = bio_list_get(&pe->origin_bios);
1412         free_pending_exception(pe);
1413
1414         increment_pending_exceptions_done_count();
1415
1416         up_write(&s->lock);
1417
1418         /* Submit any pending write bios */
1419         if (error)
1420                 error_bios(snapshot_bios);
1421         else
1422                 flush_bios(snapshot_bios);
1423
1424         retry_origin_bios(s, origin_bios);
1425 }
1426
1427 static void commit_callback(void *context, int success)
1428 {
1429         struct dm_snap_pending_exception *pe = context;
1430
1431         pending_complete(pe, success);
1432 }
1433
1434 /*
1435  * Called when the copy I/O has finished.  kcopyd actually runs
1436  * this code so don't block.
1437  */
1438 static void copy_callback(int read_err, unsigned long write_err, void *context)
1439 {
1440         struct dm_snap_pending_exception *pe = context;
1441         struct dm_snapshot *s = pe->snap;
1442
1443         if (read_err || write_err)
1444                 pending_complete(pe, 0);
1445
1446         else
1447                 /* Update the metadata if we are persistent */
1448                 s->store->type->commit_exception(s->store, &pe->e,
1449                                                  commit_callback, pe);
1450 }
1451
1452 /*
1453  * Dispatches the copy operation to kcopyd.
1454  */
1455 static void start_copy(struct dm_snap_pending_exception *pe)
1456 {
1457         struct dm_snapshot *s = pe->snap;
1458         struct dm_io_region src, dest;
1459         struct block_device *bdev = s->origin->bdev;
1460         sector_t dev_size;
1461
1462         dev_size = get_dev_size(bdev);
1463
1464         src.bdev = bdev;
1465         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1466         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1467
1468         dest.bdev = s->cow->bdev;
1469         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1470         dest.count = src.count;
1471
1472         /* Hand over to kcopyd */
1473         dm_kcopyd_copy(s->kcopyd_client,
1474                     &src, 1, &dest, 0, copy_callback, pe);
1475 }
1476
1477 static struct dm_snap_pending_exception *
1478 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1479 {
1480         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1481
1482         if (!e)
1483                 return NULL;
1484
1485         return container_of(e, struct dm_snap_pending_exception, e);
1486 }
1487
1488 /*
1489  * Looks to see if this snapshot already has a pending exception
1490  * for this chunk, otherwise it allocates a new one and inserts
1491  * it into the pending table.
1492  *
1493  * NOTE: a write lock must be held on snap->lock before calling
1494  * this.
1495  */
1496 static struct dm_snap_pending_exception *
1497 __find_pending_exception(struct dm_snapshot *s,
1498                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1499 {
1500         struct dm_snap_pending_exception *pe2;
1501
1502         pe2 = __lookup_pending_exception(s, chunk);
1503         if (pe2) {
1504                 free_pending_exception(pe);
1505                 return pe2;
1506         }
1507
1508         pe->e.old_chunk = chunk;
1509         bio_list_init(&pe->origin_bios);
1510         bio_list_init(&pe->snapshot_bios);
1511         pe->started = 0;
1512
1513         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1514                 free_pending_exception(pe);
1515                 return NULL;
1516         }
1517
1518         dm_insert_exception(&s->pending, &pe->e);
1519
1520         return pe;
1521 }
1522
1523 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1524                             struct bio *bio, chunk_t chunk)
1525 {
1526         bio->bi_bdev = s->cow->bdev;
1527         bio->bi_sector = chunk_to_sector(s->store,
1528                                          dm_chunk_number(e->new_chunk) +
1529                                          (chunk - e->old_chunk)) +
1530                                          (bio->bi_sector &
1531                                           s->store->chunk_mask);
1532 }
1533
1534 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1535                         union map_info *map_context)
1536 {
1537         struct dm_exception *e;
1538         struct dm_snapshot *s = ti->private;
1539         int r = DM_MAPIO_REMAPPED;
1540         chunk_t chunk;
1541         struct dm_snap_pending_exception *pe = NULL;
1542
1543         if (bio->bi_rw & REQ_FLUSH) {
1544                 bio->bi_bdev = s->cow->bdev;
1545                 return DM_MAPIO_REMAPPED;
1546         }
1547
1548         chunk = sector_to_chunk(s->store, bio->bi_sector);
1549
1550         /* Full snapshots are not usable */
1551         /* To get here the table must be live so s->active is always set. */
1552         if (!s->valid)
1553                 return -EIO;
1554
1555         /* FIXME: should only take write lock if we need
1556          * to copy an exception */
1557         down_write(&s->lock);
1558
1559         if (!s->valid) {
1560                 r = -EIO;
1561                 goto out_unlock;
1562         }
1563
1564         /* If the block is already remapped - use that, else remap it */
1565         e = dm_lookup_exception(&s->complete, chunk);
1566         if (e) {
1567                 remap_exception(s, e, bio, chunk);
1568                 goto out_unlock;
1569         }
1570
1571         /*
1572          * Write to snapshot - higher level takes care of RW/RO
1573          * flags so we should only get this if we are
1574          * writeable.
1575          */
1576         if (bio_rw(bio) == WRITE) {
1577                 pe = __lookup_pending_exception(s, chunk);
1578                 if (!pe) {
1579                         up_write(&s->lock);
1580                         pe = alloc_pending_exception(s);
1581                         down_write(&s->lock);
1582
1583                         if (!s->valid) {
1584                                 free_pending_exception(pe);
1585                                 r = -EIO;
1586                                 goto out_unlock;
1587                         }
1588
1589                         e = dm_lookup_exception(&s->complete, chunk);
1590                         if (e) {
1591                                 free_pending_exception(pe);
1592                                 remap_exception(s, e, bio, chunk);
1593                                 goto out_unlock;
1594                         }
1595
1596                         pe = __find_pending_exception(s, pe, chunk);
1597                         if (!pe) {
1598                                 __invalidate_snapshot(s, -ENOMEM);
1599                                 r = -EIO;
1600                                 goto out_unlock;
1601                         }
1602                 }
1603
1604                 remap_exception(s, &pe->e, bio, chunk);
1605                 bio_list_add(&pe->snapshot_bios, bio);
1606
1607                 r = DM_MAPIO_SUBMITTED;
1608
1609                 if (!pe->started) {
1610                         /* this is protected by snap->lock */
1611                         pe->started = 1;
1612                         up_write(&s->lock);
1613                         start_copy(pe);
1614                         goto out;
1615                 }
1616         } else {
1617                 bio->bi_bdev = s->origin->bdev;
1618                 map_context->ptr = track_chunk(s, chunk);
1619         }
1620
1621  out_unlock:
1622         up_write(&s->lock);
1623  out:
1624         return r;
1625 }
1626
1627 /*
1628  * A snapshot-merge target behaves like a combination of a snapshot
1629  * target and a snapshot-origin target.  It only generates new
1630  * exceptions in other snapshots and not in the one that is being
1631  * merged.
1632  *
1633  * For each chunk, if there is an existing exception, it is used to
1634  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1635  * which in turn might generate exceptions in other snapshots.
1636  * If merging is currently taking place on the chunk in question, the
1637  * I/O is deferred by adding it to s->bios_queued_during_merge.
1638  */
1639 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
1640                               union map_info *map_context)
1641 {
1642         struct dm_exception *e;
1643         struct dm_snapshot *s = ti->private;
1644         int r = DM_MAPIO_REMAPPED;
1645         chunk_t chunk;
1646
1647         if (bio->bi_rw & REQ_FLUSH) {
1648                 if (!map_context->target_request_nr)
1649                         bio->bi_bdev = s->origin->bdev;
1650                 else
1651                         bio->bi_bdev = s->cow->bdev;
1652                 map_context->ptr = NULL;
1653                 return DM_MAPIO_REMAPPED;
1654         }
1655
1656         chunk = sector_to_chunk(s->store, bio->bi_sector);
1657
1658         down_write(&s->lock);
1659
1660         /* Full merging snapshots are redirected to the origin */
1661         if (!s->valid)
1662                 goto redirect_to_origin;
1663
1664         /* If the block is already remapped - use that */
1665         e = dm_lookup_exception(&s->complete, chunk);
1666         if (e) {
1667                 /* Queue writes overlapping with chunks being merged */
1668                 if (bio_rw(bio) == WRITE &&
1669                     chunk >= s->first_merging_chunk &&
1670                     chunk < (s->first_merging_chunk +
1671                              s->num_merging_chunks)) {
1672                         bio->bi_bdev = s->origin->bdev;
1673                         bio_list_add(&s->bios_queued_during_merge, bio);
1674                         r = DM_MAPIO_SUBMITTED;
1675                         goto out_unlock;
1676                 }
1677
1678                 remap_exception(s, e, bio, chunk);
1679
1680                 if (bio_rw(bio) == WRITE)
1681                         map_context->ptr = track_chunk(s, chunk);
1682                 goto out_unlock;
1683         }
1684
1685 redirect_to_origin:
1686         bio->bi_bdev = s->origin->bdev;
1687
1688         if (bio_rw(bio) == WRITE) {
1689                 up_write(&s->lock);
1690                 return do_origin(s->origin, bio);
1691         }
1692
1693 out_unlock:
1694         up_write(&s->lock);
1695
1696         return r;
1697 }
1698
1699 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1700                            int error, union map_info *map_context)
1701 {
1702         struct dm_snapshot *s = ti->private;
1703         struct dm_snap_tracked_chunk *c = map_context->ptr;
1704
1705         if (c)
1706                 stop_tracking_chunk(s, c);
1707
1708         return 0;
1709 }
1710
1711 static void snapshot_merge_presuspend(struct dm_target *ti)
1712 {
1713         struct dm_snapshot *s = ti->private;
1714
1715         stop_merge(s);
1716 }
1717
1718 static int snapshot_preresume(struct dm_target *ti)
1719 {
1720         int r = 0;
1721         struct dm_snapshot *s = ti->private;
1722         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1723
1724         down_read(&_origins_lock);
1725         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1726         if (snap_src && snap_dest) {
1727                 down_read(&snap_src->lock);
1728                 if (s == snap_src) {
1729                         DMERR("Unable to resume snapshot source until "
1730                               "handover completes.");
1731                         r = -EINVAL;
1732                 } else if (!dm_suspended(snap_src->ti)) {
1733                         DMERR("Unable to perform snapshot handover until "
1734                               "source is suspended.");
1735                         r = -EINVAL;
1736                 }
1737                 up_read(&snap_src->lock);
1738         }
1739         up_read(&_origins_lock);
1740
1741         return r;
1742 }
1743
1744 static void snapshot_resume(struct dm_target *ti)
1745 {
1746         struct dm_snapshot *s = ti->private;
1747         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1748
1749         down_read(&_origins_lock);
1750         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1751         if (snap_src && snap_dest) {
1752                 down_write(&snap_src->lock);
1753                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1754                 __handover_exceptions(snap_src, snap_dest);
1755                 up_write(&snap_dest->lock);
1756                 up_write(&snap_src->lock);
1757         }
1758         up_read(&_origins_lock);
1759
1760         /* Now we have correct chunk size, reregister */
1761         reregister_snapshot(s);
1762
1763         down_write(&s->lock);
1764         s->active = 1;
1765         up_write(&s->lock);
1766 }
1767
1768 static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
1769 {
1770         sector_t min_chunksize;
1771
1772         down_read(&_origins_lock);
1773         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1774         up_read(&_origins_lock);
1775
1776         return min_chunksize;
1777 }
1778
1779 static void snapshot_merge_resume(struct dm_target *ti)
1780 {
1781         struct dm_snapshot *s = ti->private;
1782
1783         /*
1784          * Handover exceptions from existing snapshot.
1785          */
1786         snapshot_resume(ti);
1787
1788         /*
1789          * snapshot-merge acts as an origin, so set ti->split_io
1790          */
1791         ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
1792
1793         start_merge(s);
1794 }
1795
1796 static int snapshot_status(struct dm_target *ti, status_type_t type,
1797                            char *result, unsigned int maxlen)
1798 {
1799         unsigned sz = 0;
1800         struct dm_snapshot *snap = ti->private;
1801
1802         switch (type) {
1803         case STATUSTYPE_INFO:
1804
1805                 down_write(&snap->lock);
1806
1807                 if (!snap->valid)
1808                         DMEMIT("Invalid");
1809                 else if (snap->merge_failed)
1810                         DMEMIT("Merge failed");
1811                 else {
1812                         if (snap->store->type->usage) {
1813                                 sector_t total_sectors, sectors_allocated,
1814                                          metadata_sectors;
1815                                 snap->store->type->usage(snap->store,
1816                                                          &total_sectors,
1817                                                          &sectors_allocated,
1818                                                          &metadata_sectors);
1819                                 DMEMIT("%llu/%llu %llu",
1820                                        (unsigned long long)sectors_allocated,
1821                                        (unsigned long long)total_sectors,
1822                                        (unsigned long long)metadata_sectors);
1823                         }
1824                         else
1825                                 DMEMIT("Unknown");
1826                 }
1827
1828                 up_write(&snap->lock);
1829
1830                 break;
1831
1832         case STATUSTYPE_TABLE:
1833                 /*
1834                  * kdevname returns a static pointer so we need
1835                  * to make private copies if the output is to
1836                  * make sense.
1837                  */
1838                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1839                 snap->store->type->status(snap->store, type, result + sz,
1840                                           maxlen - sz);
1841                 break;
1842         }
1843
1844         return 0;
1845 }
1846
1847 static int snapshot_iterate_devices(struct dm_target *ti,
1848                                     iterate_devices_callout_fn fn, void *data)
1849 {
1850         struct dm_snapshot *snap = ti->private;
1851         int r;
1852
1853         r = fn(ti, snap->origin, 0, ti->len, data);
1854
1855         if (!r)
1856                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1857
1858         return r;
1859 }
1860
1861
1862 /*-----------------------------------------------------------------
1863  * Origin methods
1864  *---------------------------------------------------------------*/
1865
1866 /*
1867  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1868  * supplied bio was ignored.  The caller may submit it immediately.
1869  * (No remapping actually occurs as the origin is always a direct linear
1870  * map.)
1871  *
1872  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1873  * and any supplied bio is added to a list to be submitted once all
1874  * the necessary exceptions exist.
1875  */
1876 static int __origin_write(struct list_head *snapshots, sector_t sector,
1877                           struct bio *bio)
1878 {
1879         int r = DM_MAPIO_REMAPPED;
1880         struct dm_snapshot *snap;
1881         struct dm_exception *e;
1882         struct dm_snap_pending_exception *pe;
1883         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1884         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1885         chunk_t chunk;
1886
1887         /* Do all the snapshots on this origin */
1888         list_for_each_entry (snap, snapshots, list) {
1889                 /*
1890                  * Don't make new exceptions in a merging snapshot
1891                  * because it has effectively been deleted
1892                  */
1893                 if (dm_target_is_snapshot_merge(snap->ti))
1894                         continue;
1895
1896                 down_write(&snap->lock);
1897
1898                 /* Only deal with valid and active snapshots */
1899                 if (!snap->valid || !snap->active)
1900                         goto next_snapshot;
1901
1902                 /* Nothing to do if writing beyond end of snapshot */
1903                 if (sector >= dm_table_get_size(snap->ti->table))
1904                         goto next_snapshot;
1905
1906                 /*
1907                  * Remember, different snapshots can have
1908                  * different chunk sizes.
1909                  */
1910                 chunk = sector_to_chunk(snap->store, sector);
1911
1912                 /*
1913                  * Check exception table to see if block
1914                  * is already remapped in this snapshot
1915                  * and trigger an exception if not.
1916                  */
1917                 e = dm_lookup_exception(&snap->complete, chunk);
1918                 if (e)
1919                         goto next_snapshot;
1920
1921                 pe = __lookup_pending_exception(snap, chunk);
1922                 if (!pe) {
1923                         up_write(&snap->lock);
1924                         pe = alloc_pending_exception(snap);
1925                         down_write(&snap->lock);
1926
1927                         if (!snap->valid) {
1928                                 free_pending_exception(pe);
1929                                 goto next_snapshot;
1930                         }
1931
1932                         e = dm_lookup_exception(&snap->complete, chunk);
1933                         if (e) {
1934                                 free_pending_exception(pe);
1935                                 goto next_snapshot;
1936                         }
1937
1938                         pe = __find_pending_exception(snap, pe, chunk);
1939                         if (!pe) {
1940                                 __invalidate_snapshot(snap, -ENOMEM);
1941                                 goto next_snapshot;
1942                         }
1943                 }
1944
1945                 r = DM_MAPIO_SUBMITTED;
1946
1947                 /*
1948                  * If an origin bio was supplied, queue it to wait for the
1949                  * completion of this exception, and start this one last,
1950                  * at the end of the function.
1951                  */
1952                 if (bio) {
1953                         bio_list_add(&pe->origin_bios, bio);
1954                         bio = NULL;
1955
1956                         if (!pe->started) {
1957                                 pe->started = 1;
1958                                 pe_to_start_last = pe;
1959                         }
1960                 }
1961
1962                 if (!pe->started) {
1963                         pe->started = 1;
1964                         pe_to_start_now = pe;
1965                 }
1966
1967  next_snapshot:
1968                 up_write(&snap->lock);
1969
1970                 if (pe_to_start_now) {
1971                         start_copy(pe_to_start_now);
1972                         pe_to_start_now = NULL;
1973                 }
1974         }
1975
1976         /*
1977          * Submit the exception against which the bio is queued last,
1978          * to give the other exceptions a head start.
1979          */
1980         if (pe_to_start_last)
1981                 start_copy(pe_to_start_last);
1982
1983         return r;
1984 }
1985
1986 /*
1987  * Called on a write from the origin driver.
1988  */
1989 static int do_origin(struct dm_dev *origin, struct bio *bio)
1990 {
1991         struct origin *o;
1992         int r = DM_MAPIO_REMAPPED;
1993
1994         down_read(&_origins_lock);
1995         o = __lookup_origin(origin->bdev);
1996         if (o)
1997                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
1998         up_read(&_origins_lock);
1999
2000         return r;
2001 }
2002
2003 /*
2004  * Trigger exceptions in all non-merging snapshots.
2005  *
2006  * The chunk size of the merging snapshot may be larger than the chunk
2007  * size of some other snapshot so we may need to reallocate multiple
2008  * chunks in other snapshots.
2009  *
2010  * We scan all the overlapping exceptions in the other snapshots.
2011  * Returns 1 if anything was reallocated and must be waited for,
2012  * otherwise returns 0.
2013  *
2014  * size must be a multiple of merging_snap's chunk_size.
2015  */
2016 static int origin_write_extent(struct dm_snapshot *merging_snap,
2017                                sector_t sector, unsigned size)
2018 {
2019         int must_wait = 0;
2020         sector_t n;
2021         struct origin *o;
2022
2023         /*
2024          * The origin's __minimum_chunk_size() got stored in split_io
2025          * by snapshot_merge_resume().
2026          */
2027         down_read(&_origins_lock);
2028         o = __lookup_origin(merging_snap->origin->bdev);
2029         for (n = 0; n < size; n += merging_snap->ti->split_io)
2030                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2031                     DM_MAPIO_SUBMITTED)
2032                         must_wait = 1;
2033         up_read(&_origins_lock);
2034
2035         return must_wait;
2036 }
2037
2038 /*
2039  * Origin: maps a linear range of a device, with hooks for snapshotting.
2040  */
2041
2042 /*
2043  * Construct an origin mapping: <dev_path>
2044  * The context for an origin is merely a 'struct dm_dev *'
2045  * pointing to the real device.
2046  */
2047 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2048 {
2049         int r;
2050         struct dm_dev *dev;
2051
2052         if (argc != 1) {
2053                 ti->error = "origin: incorrect number of arguments";
2054                 return -EINVAL;
2055         }
2056
2057         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2058         if (r) {
2059                 ti->error = "Cannot get target device";
2060                 return r;
2061         }
2062
2063         ti->private = dev;
2064         ti->num_flush_requests = 1;
2065
2066         return 0;
2067 }
2068
2069 static void origin_dtr(struct dm_target *ti)
2070 {
2071         struct dm_dev *dev = ti->private;
2072         dm_put_device(ti, dev);
2073 }
2074
2075 static int origin_map(struct dm_target *ti, struct bio *bio,
2076                       union map_info *map_context)
2077 {
2078         struct dm_dev *dev = ti->private;
2079         bio->bi_bdev = dev->bdev;
2080
2081         if (bio->bi_rw & REQ_FLUSH)
2082                 return DM_MAPIO_REMAPPED;
2083
2084         /* Only tell snapshots if this is a write */
2085         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2086 }
2087
2088 /*
2089  * Set the target "split_io" field to the minimum of all the snapshots'
2090  * chunk sizes.
2091  */
2092 static void origin_resume(struct dm_target *ti)
2093 {
2094         struct dm_dev *dev = ti->private;
2095
2096         ti->split_io = get_origin_minimum_chunksize(dev->bdev);
2097 }
2098
2099 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
2100                          unsigned int maxlen)
2101 {
2102         struct dm_dev *dev = ti->private;
2103
2104         switch (type) {
2105         case STATUSTYPE_INFO:
2106                 result[0] = '\0';
2107                 break;
2108
2109         case STATUSTYPE_TABLE:
2110                 snprintf(result, maxlen, "%s", dev->name);
2111                 break;
2112         }
2113
2114         return 0;
2115 }
2116
2117 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2118                         struct bio_vec *biovec, int max_size)
2119 {
2120         struct dm_dev *dev = ti->private;
2121         struct request_queue *q = bdev_get_queue(dev->bdev);
2122
2123         if (!q->merge_bvec_fn)
2124                 return max_size;
2125
2126         bvm->bi_bdev = dev->bdev;
2127         bvm->bi_sector = bvm->bi_sector;
2128
2129         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2130 }
2131
2132 static int origin_iterate_devices(struct dm_target *ti,
2133                                   iterate_devices_callout_fn fn, void *data)
2134 {
2135         struct dm_dev *dev = ti->private;
2136
2137         return fn(ti, dev, 0, ti->len, data);
2138 }
2139
2140 static struct target_type origin_target = {
2141         .name    = "snapshot-origin",
2142         .version = {1, 7, 1},
2143         .module  = THIS_MODULE,
2144         .ctr     = origin_ctr,
2145         .dtr     = origin_dtr,
2146         .map     = origin_map,
2147         .resume  = origin_resume,
2148         .status  = origin_status,
2149         .merge   = origin_merge,
2150         .iterate_devices = origin_iterate_devices,
2151 };
2152
2153 static struct target_type snapshot_target = {
2154         .name    = "snapshot",
2155         .version = {1, 10, 0},
2156         .module  = THIS_MODULE,
2157         .ctr     = snapshot_ctr,
2158         .dtr     = snapshot_dtr,
2159         .map     = snapshot_map,
2160         .end_io  = snapshot_end_io,
2161         .preresume  = snapshot_preresume,
2162         .resume  = snapshot_resume,
2163         .status  = snapshot_status,
2164         .iterate_devices = snapshot_iterate_devices,
2165 };
2166
2167 static struct target_type merge_target = {
2168         .name    = dm_snapshot_merge_target_name,
2169         .version = {1, 1, 0},
2170         .module  = THIS_MODULE,
2171         .ctr     = snapshot_ctr,
2172         .dtr     = snapshot_dtr,
2173         .map     = snapshot_merge_map,
2174         .end_io  = snapshot_end_io,
2175         .presuspend = snapshot_merge_presuspend,
2176         .preresume  = snapshot_preresume,
2177         .resume  = snapshot_merge_resume,
2178         .status  = snapshot_status,
2179         .iterate_devices = snapshot_iterate_devices,
2180 };
2181
2182 static int __init dm_snapshot_init(void)
2183 {
2184         int r;
2185
2186         r = dm_exception_store_init();
2187         if (r) {
2188                 DMERR("Failed to initialize exception stores");
2189                 return r;
2190         }
2191
2192         r = dm_register_target(&snapshot_target);
2193         if (r < 0) {
2194                 DMERR("snapshot target register failed %d", r);
2195                 goto bad_register_snapshot_target;
2196         }
2197
2198         r = dm_register_target(&origin_target);
2199         if (r < 0) {
2200                 DMERR("Origin target register failed %d", r);
2201                 goto bad_register_origin_target;
2202         }
2203
2204         r = dm_register_target(&merge_target);
2205         if (r < 0) {
2206                 DMERR("Merge target register failed %d", r);
2207                 goto bad_register_merge_target;
2208         }
2209
2210         r = init_origin_hash();
2211         if (r) {
2212                 DMERR("init_origin_hash failed.");
2213                 goto bad_origin_hash;
2214         }
2215
2216         exception_cache = KMEM_CACHE(dm_exception, 0);
2217         if (!exception_cache) {
2218                 DMERR("Couldn't create exception cache.");
2219                 r = -ENOMEM;
2220                 goto bad_exception_cache;
2221         }
2222
2223         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2224         if (!pending_cache) {
2225                 DMERR("Couldn't create pending cache.");
2226                 r = -ENOMEM;
2227                 goto bad_pending_cache;
2228         }
2229
2230         tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
2231         if (!tracked_chunk_cache) {
2232                 DMERR("Couldn't create cache to track chunks in use.");
2233                 r = -ENOMEM;
2234                 goto bad_tracked_chunk_cache;
2235         }
2236
2237         return 0;
2238
2239 bad_tracked_chunk_cache:
2240         kmem_cache_destroy(pending_cache);
2241 bad_pending_cache:
2242         kmem_cache_destroy(exception_cache);
2243 bad_exception_cache:
2244         exit_origin_hash();
2245 bad_origin_hash:
2246         dm_unregister_target(&merge_target);
2247 bad_register_merge_target:
2248         dm_unregister_target(&origin_target);
2249 bad_register_origin_target:
2250         dm_unregister_target(&snapshot_target);
2251 bad_register_snapshot_target:
2252         dm_exception_store_exit();
2253
2254         return r;
2255 }
2256
2257 static void __exit dm_snapshot_exit(void)
2258 {
2259         dm_unregister_target(&snapshot_target);
2260         dm_unregister_target(&origin_target);
2261         dm_unregister_target(&merge_target);
2262
2263         exit_origin_hash();
2264         kmem_cache_destroy(pending_cache);
2265         kmem_cache_destroy(exception_cache);
2266         kmem_cache_destroy(tracked_chunk_cache);
2267
2268         dm_exception_store_exit();
2269 }
2270
2271 /* Module hooks */
2272 module_init(dm_snapshot_init);
2273 module_exit(dm_snapshot_exit);
2274
2275 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2276 MODULE_AUTHOR("Joe Thornber");
2277 MODULE_LICENSE("GPL");