1 #include <linux/bitops.h>
2 #include <linux/slab.h>
5 #include <linux/pagemap.h>
6 #include <linux/page-flags.h>
7 #include <linux/spinlock.h>
8 #include <linux/blkdev.h>
9 #include <linux/swap.h>
10 #include <linux/writeback.h>
11 #include <linux/pagevec.h>
12 #include <linux/prefetch.h>
13 #include <linux/cleancache.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
18 #include "btrfs_inode.h"
20 #include "check-integrity.h"
22 #include "rcu-string.h"
25 static struct kmem_cache *extent_state_cache;
26 static struct kmem_cache *extent_buffer_cache;
27 static struct bio_set *btrfs_bioset;
29 #ifdef CONFIG_BTRFS_DEBUG
30 static LIST_HEAD(buffers);
31 static LIST_HEAD(states);
33 static DEFINE_SPINLOCK(leak_lock);
36 void btrfs_leak_debug_add(struct list_head *new, struct list_head *head)
40 spin_lock_irqsave(&leak_lock, flags);
42 spin_unlock_irqrestore(&leak_lock, flags);
46 void btrfs_leak_debug_del(struct list_head *entry)
50 spin_lock_irqsave(&leak_lock, flags);
52 spin_unlock_irqrestore(&leak_lock, flags);
56 void btrfs_leak_debug_check(void)
58 struct extent_state *state;
59 struct extent_buffer *eb;
61 while (!list_empty(&states)) {
62 state = list_entry(states.next, struct extent_state, leak_list);
63 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
64 "state %lu in tree %p refs %d\n",
65 state->start, state->end, state->state, state->tree,
66 atomic_read(&state->refs));
67 list_del(&state->leak_list);
68 kmem_cache_free(extent_state_cache, state);
71 while (!list_empty(&buffers)) {
72 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
73 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
75 eb->start, eb->len, atomic_read(&eb->refs));
76 list_del(&eb->leak_list);
77 kmem_cache_free(extent_buffer_cache, eb);
81 #define btrfs_debug_check_extent_io_range(inode, start, end) \
82 __btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
83 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
84 struct inode *inode, u64 start, u64 end)
86 u64 isize = i_size_read(inode);
88 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
89 printk_ratelimited(KERN_DEBUG
90 "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
91 caller, btrfs_ino(inode), isize, start, end);
95 #define btrfs_leak_debug_add(new, head) do {} while (0)
96 #define btrfs_leak_debug_del(entry) do {} while (0)
97 #define btrfs_leak_debug_check() do {} while (0)
98 #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
101 #define BUFFER_LRU_MAX 64
106 struct rb_node rb_node;
109 struct extent_page_data {
111 struct extent_io_tree *tree;
112 get_extent_t *get_extent;
113 unsigned long bio_flags;
115 /* tells writepage not to lock the state bits for this range
116 * it still does the unlocking
118 unsigned int extent_locked:1;
120 /* tells the submit_bio code to use a WRITE_SYNC */
121 unsigned int sync_io:1;
124 static noinline void flush_write_bio(void *data);
125 static inline struct btrfs_fs_info *
126 tree_fs_info(struct extent_io_tree *tree)
128 return btrfs_sb(tree->mapping->host->i_sb);
131 int __init extent_io_init(void)
133 extent_state_cache = kmem_cache_create("btrfs_extent_state",
134 sizeof(struct extent_state), 0,
135 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
136 if (!extent_state_cache)
139 extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
140 sizeof(struct extent_buffer), 0,
141 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
142 if (!extent_buffer_cache)
143 goto free_state_cache;
145 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
146 offsetof(struct btrfs_io_bio, bio));
148 goto free_buffer_cache;
150 if (bioset_integrity_create(btrfs_bioset, BIO_POOL_SIZE))
156 bioset_free(btrfs_bioset);
160 kmem_cache_destroy(extent_buffer_cache);
161 extent_buffer_cache = NULL;
164 kmem_cache_destroy(extent_state_cache);
165 extent_state_cache = NULL;
169 void extent_io_exit(void)
171 btrfs_leak_debug_check();
174 * Make sure all delayed rcu free are flushed before we
178 if (extent_state_cache)
179 kmem_cache_destroy(extent_state_cache);
180 if (extent_buffer_cache)
181 kmem_cache_destroy(extent_buffer_cache);
183 bioset_free(btrfs_bioset);
186 void extent_io_tree_init(struct extent_io_tree *tree,
187 struct address_space *mapping)
189 tree->state = RB_ROOT;
190 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
192 tree->dirty_bytes = 0;
193 spin_lock_init(&tree->lock);
194 spin_lock_init(&tree->buffer_lock);
195 tree->mapping = mapping;
198 static struct extent_state *alloc_extent_state(gfp_t mask)
200 struct extent_state *state;
202 state = kmem_cache_alloc(extent_state_cache, mask);
208 btrfs_leak_debug_add(&state->leak_list, &states);
209 atomic_set(&state->refs, 1);
210 init_waitqueue_head(&state->wq);
211 trace_alloc_extent_state(state, mask, _RET_IP_);
215 void free_extent_state(struct extent_state *state)
219 if (atomic_dec_and_test(&state->refs)) {
220 WARN_ON(state->tree);
221 btrfs_leak_debug_del(&state->leak_list);
222 trace_free_extent_state(state, _RET_IP_);
223 kmem_cache_free(extent_state_cache, state);
227 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
228 struct rb_node *node)
230 struct rb_node **p = &root->rb_node;
231 struct rb_node *parent = NULL;
232 struct tree_entry *entry;
236 entry = rb_entry(parent, struct tree_entry, rb_node);
238 if (offset < entry->start)
240 else if (offset > entry->end)
246 rb_link_node(node, parent, p);
247 rb_insert_color(node, root);
251 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
252 struct rb_node **prev_ret,
253 struct rb_node **next_ret)
255 struct rb_root *root = &tree->state;
256 struct rb_node *n = root->rb_node;
257 struct rb_node *prev = NULL;
258 struct rb_node *orig_prev = NULL;
259 struct tree_entry *entry;
260 struct tree_entry *prev_entry = NULL;
263 entry = rb_entry(n, struct tree_entry, rb_node);
267 if (offset < entry->start)
269 else if (offset > entry->end)
277 while (prev && offset > prev_entry->end) {
278 prev = rb_next(prev);
279 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
286 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
287 while (prev && offset < prev_entry->start) {
288 prev = rb_prev(prev);
289 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
296 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
299 struct rb_node *prev = NULL;
302 ret = __etree_search(tree, offset, &prev, NULL);
308 static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
309 struct extent_state *other)
311 if (tree->ops && tree->ops->merge_extent_hook)
312 tree->ops->merge_extent_hook(tree->mapping->host, new,
317 * utility function to look for merge candidates inside a given range.
318 * Any extents with matching state are merged together into a single
319 * extent in the tree. Extents with EXTENT_IO in their state field
320 * are not merged because the end_io handlers need to be able to do
321 * operations on them without sleeping (or doing allocations/splits).
323 * This should be called with the tree lock held.
325 static void merge_state(struct extent_io_tree *tree,
326 struct extent_state *state)
328 struct extent_state *other;
329 struct rb_node *other_node;
331 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
334 other_node = rb_prev(&state->rb_node);
336 other = rb_entry(other_node, struct extent_state, rb_node);
337 if (other->end == state->start - 1 &&
338 other->state == state->state) {
339 merge_cb(tree, state, other);
340 state->start = other->start;
342 rb_erase(&other->rb_node, &tree->state);
343 free_extent_state(other);
346 other_node = rb_next(&state->rb_node);
348 other = rb_entry(other_node, struct extent_state, rb_node);
349 if (other->start == state->end + 1 &&
350 other->state == state->state) {
351 merge_cb(tree, state, other);
352 state->end = other->end;
354 rb_erase(&other->rb_node, &tree->state);
355 free_extent_state(other);
360 static void set_state_cb(struct extent_io_tree *tree,
361 struct extent_state *state, unsigned long *bits)
363 if (tree->ops && tree->ops->set_bit_hook)
364 tree->ops->set_bit_hook(tree->mapping->host, state, bits);
367 static void clear_state_cb(struct extent_io_tree *tree,
368 struct extent_state *state, unsigned long *bits)
370 if (tree->ops && tree->ops->clear_bit_hook)
371 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
374 static void set_state_bits(struct extent_io_tree *tree,
375 struct extent_state *state, unsigned long *bits);
378 * insert an extent_state struct into the tree. 'bits' are set on the
379 * struct before it is inserted.
381 * This may return -EEXIST if the extent is already there, in which case the
382 * state struct is freed.
384 * The tree lock is not taken internally. This is a utility function and
385 * probably isn't what you want to call (see set/clear_extent_bit).
387 static int insert_state(struct extent_io_tree *tree,
388 struct extent_state *state, u64 start, u64 end,
391 struct rb_node *node;
394 WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
396 state->start = start;
399 set_state_bits(tree, state, bits);
401 node = tree_insert(&tree->state, end, &state->rb_node);
403 struct extent_state *found;
404 found = rb_entry(node, struct extent_state, rb_node);
405 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
407 found->start, found->end, start, end);
411 merge_state(tree, state);
415 static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
418 if (tree->ops && tree->ops->split_extent_hook)
419 tree->ops->split_extent_hook(tree->mapping->host, orig, split);
423 * split a given extent state struct in two, inserting the preallocated
424 * struct 'prealloc' as the newly created second half. 'split' indicates an
425 * offset inside 'orig' where it should be split.
428 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
429 * are two extent state structs in the tree:
430 * prealloc: [orig->start, split - 1]
431 * orig: [ split, orig->end ]
433 * The tree locks are not taken by this function. They need to be held
436 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
437 struct extent_state *prealloc, u64 split)
439 struct rb_node *node;
441 split_cb(tree, orig, split);
443 prealloc->start = orig->start;
444 prealloc->end = split - 1;
445 prealloc->state = orig->state;
448 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
450 free_extent_state(prealloc);
453 prealloc->tree = tree;
457 static struct extent_state *next_state(struct extent_state *state)
459 struct rb_node *next = rb_next(&state->rb_node);
461 return rb_entry(next, struct extent_state, rb_node);
467 * utility function to clear some bits in an extent state struct.
468 * it will optionally wake up any one waiting on this state (wake == 1).
470 * If no bits are set on the state struct after clearing things, the
471 * struct is freed and removed from the tree
473 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
474 struct extent_state *state,
475 unsigned long *bits, int wake)
477 struct extent_state *next;
478 unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
480 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
481 u64 range = state->end - state->start + 1;
482 WARN_ON(range > tree->dirty_bytes);
483 tree->dirty_bytes -= range;
485 clear_state_cb(tree, state, bits);
486 state->state &= ~bits_to_clear;
489 if (state->state == 0) {
490 next = next_state(state);
492 rb_erase(&state->rb_node, &tree->state);
494 free_extent_state(state);
499 merge_state(tree, state);
500 next = next_state(state);
505 static struct extent_state *
506 alloc_extent_state_atomic(struct extent_state *prealloc)
509 prealloc = alloc_extent_state(GFP_ATOMIC);
514 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
516 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
517 "Extent tree was modified by another "
518 "thread while locked.");
522 * clear some bits on a range in the tree. This may require splitting
523 * or inserting elements in the tree, so the gfp mask is used to
524 * indicate which allocations or sleeping are allowed.
526 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
527 * the given range from the tree regardless of state (ie for truncate).
529 * the range [start, end] is inclusive.
531 * This takes the tree lock, and returns 0 on success and < 0 on error.
533 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
534 unsigned long bits, int wake, int delete,
535 struct extent_state **cached_state,
538 struct extent_state *state;
539 struct extent_state *cached;
540 struct extent_state *prealloc = NULL;
541 struct rb_node *node;
546 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
548 if (bits & EXTENT_DELALLOC)
549 bits |= EXTENT_NORESERVE;
552 bits |= ~EXTENT_CTLBITS;
553 bits |= EXTENT_FIRST_DELALLOC;
555 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
558 if (!prealloc && (mask & __GFP_WAIT)) {
559 prealloc = alloc_extent_state(mask);
564 spin_lock(&tree->lock);
566 cached = *cached_state;
569 *cached_state = NULL;
573 if (cached && cached->tree && cached->start <= start &&
574 cached->end > start) {
576 atomic_dec(&cached->refs);
581 free_extent_state(cached);
584 * this search will find the extents that end after
587 node = tree_search(tree, start);
590 state = rb_entry(node, struct extent_state, rb_node);
592 if (state->start > end)
594 WARN_ON(state->end < start);
595 last_end = state->end;
597 /* the state doesn't have the wanted bits, go ahead */
598 if (!(state->state & bits)) {
599 state = next_state(state);
604 * | ---- desired range ---- |
606 * | ------------- state -------------- |
608 * We need to split the extent we found, and may flip
609 * bits on second half.
611 * If the extent we found extends past our range, we
612 * just split and search again. It'll get split again
613 * the next time though.
615 * If the extent we found is inside our range, we clear
616 * the desired bit on it.
619 if (state->start < start) {
620 prealloc = alloc_extent_state_atomic(prealloc);
622 err = split_state(tree, state, prealloc, start);
624 extent_io_tree_panic(tree, err);
629 if (state->end <= end) {
630 state = clear_state_bit(tree, state, &bits, wake);
636 * | ---- desired range ---- |
638 * We need to split the extent, and clear the bit
641 if (state->start <= end && state->end > end) {
642 prealloc = alloc_extent_state_atomic(prealloc);
644 err = split_state(tree, state, prealloc, end + 1);
646 extent_io_tree_panic(tree, err);
651 clear_state_bit(tree, prealloc, &bits, wake);
657 state = clear_state_bit(tree, state, &bits, wake);
659 if (last_end == (u64)-1)
661 start = last_end + 1;
662 if (start <= end && state && !need_resched())
667 spin_unlock(&tree->lock);
669 free_extent_state(prealloc);
676 spin_unlock(&tree->lock);
677 if (mask & __GFP_WAIT)
682 static void wait_on_state(struct extent_io_tree *tree,
683 struct extent_state *state)
684 __releases(tree->lock)
685 __acquires(tree->lock)
688 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
689 spin_unlock(&tree->lock);
691 spin_lock(&tree->lock);
692 finish_wait(&state->wq, &wait);
696 * waits for one or more bits to clear on a range in the state tree.
697 * The range [start, end] is inclusive.
698 * The tree lock is taken by this function
700 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
703 struct extent_state *state;
704 struct rb_node *node;
706 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
708 spin_lock(&tree->lock);
712 * this search will find all the extents that end after
715 node = tree_search(tree, start);
719 state = rb_entry(node, struct extent_state, rb_node);
721 if (state->start > end)
724 if (state->state & bits) {
725 start = state->start;
726 atomic_inc(&state->refs);
727 wait_on_state(tree, state);
728 free_extent_state(state);
731 start = state->end + 1;
736 cond_resched_lock(&tree->lock);
739 spin_unlock(&tree->lock);
742 static void set_state_bits(struct extent_io_tree *tree,
743 struct extent_state *state,
746 unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
748 set_state_cb(tree, state, bits);
749 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
750 u64 range = state->end - state->start + 1;
751 tree->dirty_bytes += range;
753 state->state |= bits_to_set;
756 static void cache_state(struct extent_state *state,
757 struct extent_state **cached_ptr)
759 if (cached_ptr && !(*cached_ptr)) {
760 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
762 atomic_inc(&state->refs);
768 * set some bits on a range in the tree. This may require allocations or
769 * sleeping, so the gfp mask is used to indicate what is allowed.
771 * If any of the exclusive bits are set, this will fail with -EEXIST if some
772 * part of the range already has the desired bits set. The start of the
773 * existing range is returned in failed_start in this case.
775 * [start, end] is inclusive This takes the tree lock.
778 static int __must_check
779 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
780 unsigned long bits, unsigned long exclusive_bits,
781 u64 *failed_start, struct extent_state **cached_state,
784 struct extent_state *state;
785 struct extent_state *prealloc = NULL;
786 struct rb_node *node;
791 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
793 bits |= EXTENT_FIRST_DELALLOC;
795 if (!prealloc && (mask & __GFP_WAIT)) {
796 prealloc = alloc_extent_state(mask);
800 spin_lock(&tree->lock);
801 if (cached_state && *cached_state) {
802 state = *cached_state;
803 if (state->start <= start && state->end > start &&
805 node = &state->rb_node;
810 * this search will find all the extents that end after
813 node = tree_search(tree, start);
815 prealloc = alloc_extent_state_atomic(prealloc);
817 err = insert_state(tree, prealloc, start, end, &bits);
819 extent_io_tree_panic(tree, err);
824 state = rb_entry(node, struct extent_state, rb_node);
826 last_start = state->start;
827 last_end = state->end;
830 * | ---- desired range ---- |
833 * Just lock what we found and keep going
835 if (state->start == start && state->end <= end) {
836 if (state->state & exclusive_bits) {
837 *failed_start = state->start;
842 set_state_bits(tree, state, &bits);
843 cache_state(state, cached_state);
844 merge_state(tree, state);
845 if (last_end == (u64)-1)
847 start = last_end + 1;
848 state = next_state(state);
849 if (start < end && state && state->start == start &&
856 * | ---- desired range ---- |
859 * | ------------- state -------------- |
861 * We need to split the extent we found, and may flip bits on
864 * If the extent we found extends past our
865 * range, we just split and search again. It'll get split
866 * again the next time though.
868 * If the extent we found is inside our range, we set the
871 if (state->start < start) {
872 if (state->state & exclusive_bits) {
873 *failed_start = start;
878 prealloc = alloc_extent_state_atomic(prealloc);
880 err = split_state(tree, state, prealloc, start);
882 extent_io_tree_panic(tree, err);
887 if (state->end <= end) {
888 set_state_bits(tree, state, &bits);
889 cache_state(state, cached_state);
890 merge_state(tree, state);
891 if (last_end == (u64)-1)
893 start = last_end + 1;
894 state = next_state(state);
895 if (start < end && state && state->start == start &&
902 * | ---- desired range ---- |
903 * | state | or | state |
905 * There's a hole, we need to insert something in it and
906 * ignore the extent we found.
908 if (state->start > start) {
910 if (end < last_start)
913 this_end = last_start - 1;
915 prealloc = alloc_extent_state_atomic(prealloc);
919 * Avoid to free 'prealloc' if it can be merged with
922 err = insert_state(tree, prealloc, start, this_end,
925 extent_io_tree_panic(tree, err);
927 cache_state(prealloc, cached_state);
929 start = this_end + 1;
933 * | ---- desired range ---- |
935 * We need to split the extent, and set the bit
938 if (state->start <= end && state->end > end) {
939 if (state->state & exclusive_bits) {
940 *failed_start = start;
945 prealloc = alloc_extent_state_atomic(prealloc);
947 err = split_state(tree, state, prealloc, end + 1);
949 extent_io_tree_panic(tree, err);
951 set_state_bits(tree, prealloc, &bits);
952 cache_state(prealloc, cached_state);
953 merge_state(tree, prealloc);
961 spin_unlock(&tree->lock);
963 free_extent_state(prealloc);
970 spin_unlock(&tree->lock);
971 if (mask & __GFP_WAIT)
976 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
977 unsigned long bits, u64 * failed_start,
978 struct extent_state **cached_state, gfp_t mask)
980 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
986 * convert_extent_bit - convert all bits in a given range from one bit to
988 * @tree: the io tree to search
989 * @start: the start offset in bytes
990 * @end: the end offset in bytes (inclusive)
991 * @bits: the bits to set in this range
992 * @clear_bits: the bits to clear in this range
993 * @cached_state: state that we're going to cache
994 * @mask: the allocation mask
996 * This will go through and set bits for the given range. If any states exist
997 * already in this range they are set with the given bit and cleared of the
998 * clear_bits. This is only meant to be used by things that are mergeable, ie
999 * converting from say DELALLOC to DIRTY. This is not meant to be used with
1000 * boundary bits like LOCK.
1002 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1003 unsigned long bits, unsigned long clear_bits,
1004 struct extent_state **cached_state, gfp_t mask)
1006 struct extent_state *state;
1007 struct extent_state *prealloc = NULL;
1008 struct rb_node *node;
1013 btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
1016 if (!prealloc && (mask & __GFP_WAIT)) {
1017 prealloc = alloc_extent_state(mask);
1022 spin_lock(&tree->lock);
1023 if (cached_state && *cached_state) {
1024 state = *cached_state;
1025 if (state->start <= start && state->end > start &&
1027 node = &state->rb_node;
1033 * this search will find all the extents that end after
1036 node = tree_search(tree, start);
1038 prealloc = alloc_extent_state_atomic(prealloc);
1043 err = insert_state(tree, prealloc, start, end, &bits);
1046 extent_io_tree_panic(tree, err);
1049 state = rb_entry(node, struct extent_state, rb_node);
1051 last_start = state->start;
1052 last_end = state->end;
1055 * | ---- desired range ---- |
1058 * Just lock what we found and keep going
1060 if (state->start == start && state->end <= end) {
1061 set_state_bits(tree, state, &bits);
1062 cache_state(state, cached_state);
1063 state = clear_state_bit(tree, state, &clear_bits, 0);
1064 if (last_end == (u64)-1)
1066 start = last_end + 1;
1067 if (start < end && state && state->start == start &&
1074 * | ---- desired range ---- |
1077 * | ------------- state -------------- |
1079 * We need to split the extent we found, and may flip bits on
1082 * If the extent we found extends past our
1083 * range, we just split and search again. It'll get split
1084 * again the next time though.
1086 * If the extent we found is inside our range, we set the
1087 * desired bit on it.
1089 if (state->start < start) {
1090 prealloc = alloc_extent_state_atomic(prealloc);
1095 err = split_state(tree, state, prealloc, start);
1097 extent_io_tree_panic(tree, err);
1101 if (state->end <= end) {
1102 set_state_bits(tree, state, &bits);
1103 cache_state(state, cached_state);
1104 state = clear_state_bit(tree, state, &clear_bits, 0);
1105 if (last_end == (u64)-1)
1107 start = last_end + 1;
1108 if (start < end && state && state->start == start &&
1115 * | ---- desired range ---- |
1116 * | state | or | state |
1118 * There's a hole, we need to insert something in it and
1119 * ignore the extent we found.
1121 if (state->start > start) {
1123 if (end < last_start)
1126 this_end = last_start - 1;
1128 prealloc = alloc_extent_state_atomic(prealloc);
1135 * Avoid to free 'prealloc' if it can be merged with
1138 err = insert_state(tree, prealloc, start, this_end,
1141 extent_io_tree_panic(tree, err);
1142 cache_state(prealloc, cached_state);
1144 start = this_end + 1;
1148 * | ---- desired range ---- |
1150 * We need to split the extent, and set the bit
1153 if (state->start <= end && state->end > end) {
1154 prealloc = alloc_extent_state_atomic(prealloc);
1160 err = split_state(tree, state, prealloc, end + 1);
1162 extent_io_tree_panic(tree, err);
1164 set_state_bits(tree, prealloc, &bits);
1165 cache_state(prealloc, cached_state);
1166 clear_state_bit(tree, prealloc, &clear_bits, 0);
1174 spin_unlock(&tree->lock);
1176 free_extent_state(prealloc);
1183 spin_unlock(&tree->lock);
1184 if (mask & __GFP_WAIT)
1189 /* wrappers around set/clear extent bit */
1190 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1193 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1197 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1198 unsigned long bits, gfp_t mask)
1200 return set_extent_bit(tree, start, end, bits, NULL,
1204 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1205 unsigned long bits, gfp_t mask)
1207 return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
1210 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1211 struct extent_state **cached_state, gfp_t mask)
1213 return set_extent_bit(tree, start, end,
1214 EXTENT_DELALLOC | EXTENT_UPTODATE,
1215 NULL, cached_state, mask);
1218 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
1219 struct extent_state **cached_state, gfp_t mask)
1221 return set_extent_bit(tree, start, end,
1222 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
1223 NULL, cached_state, mask);
1226 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1229 return clear_extent_bit(tree, start, end,
1230 EXTENT_DIRTY | EXTENT_DELALLOC |
1231 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
1234 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1237 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1241 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1242 struct extent_state **cached_state, gfp_t mask)
1244 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
1245 cached_state, mask);
1248 int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1249 struct extent_state **cached_state, gfp_t mask)
1251 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1252 cached_state, mask);
1256 * either insert or lock state struct between start and end use mask to tell
1257 * us if waiting is desired.
1259 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1260 unsigned long bits, struct extent_state **cached_state)
1265 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1266 EXTENT_LOCKED, &failed_start,
1267 cached_state, GFP_NOFS);
1268 if (err == -EEXIST) {
1269 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1270 start = failed_start;
1273 WARN_ON(start > end);
1278 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1280 return lock_extent_bits(tree, start, end, 0, NULL);
1283 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1288 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1289 &failed_start, NULL, GFP_NOFS);
1290 if (err == -EEXIST) {
1291 if (failed_start > start)
1292 clear_extent_bit(tree, start, failed_start - 1,
1293 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1299 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1300 struct extent_state **cached, gfp_t mask)
1302 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
1306 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1308 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1312 int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1314 unsigned long index = start >> PAGE_CACHE_SHIFT;
1315 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1318 while (index <= end_index) {
1319 page = find_get_page(inode->i_mapping, index);
1320 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1321 clear_page_dirty_for_io(page);
1322 page_cache_release(page);
1328 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1330 unsigned long index = start >> PAGE_CACHE_SHIFT;
1331 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1334 while (index <= end_index) {
1335 page = find_get_page(inode->i_mapping, index);
1336 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1337 account_page_redirty(page);
1338 __set_page_dirty_nobuffers(page);
1339 page_cache_release(page);
1346 * helper function to set both pages and extents in the tree writeback
1348 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1350 unsigned long index = start >> PAGE_CACHE_SHIFT;
1351 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1354 while (index <= end_index) {
1355 page = find_get_page(tree->mapping, index);
1356 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1357 set_page_writeback(page);
1358 page_cache_release(page);
1364 /* find the first state struct with 'bits' set after 'start', and
1365 * return it. tree->lock must be held. NULL will returned if
1366 * nothing was found after 'start'
1368 static struct extent_state *
1369 find_first_extent_bit_state(struct extent_io_tree *tree,
1370 u64 start, unsigned long bits)
1372 struct rb_node *node;
1373 struct extent_state *state;
1376 * this search will find all the extents that end after
1379 node = tree_search(tree, start);
1384 state = rb_entry(node, struct extent_state, rb_node);
1385 if (state->end >= start && (state->state & bits))
1388 node = rb_next(node);
1397 * find the first offset in the io tree with 'bits' set. zero is
1398 * returned if we find something, and *start_ret and *end_ret are
1399 * set to reflect the state struct that was found.
1401 * If nothing was found, 1 is returned. If found something, return 0.
1403 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1404 u64 *start_ret, u64 *end_ret, unsigned long bits,
1405 struct extent_state **cached_state)
1407 struct extent_state *state;
1411 spin_lock(&tree->lock);
1412 if (cached_state && *cached_state) {
1413 state = *cached_state;
1414 if (state->end == start - 1 && state->tree) {
1415 n = rb_next(&state->rb_node);
1417 state = rb_entry(n, struct extent_state,
1419 if (state->state & bits)
1423 free_extent_state(*cached_state);
1424 *cached_state = NULL;
1427 free_extent_state(*cached_state);
1428 *cached_state = NULL;
1431 state = find_first_extent_bit_state(tree, start, bits);
1434 cache_state(state, cached_state);
1435 *start_ret = state->start;
1436 *end_ret = state->end;
1440 spin_unlock(&tree->lock);
1445 * find a contiguous range of bytes in the file marked as delalloc, not
1446 * more than 'max_bytes'. start and end are used to return the range,
1448 * 1 is returned if we find something, 0 if nothing was in the tree
1450 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1451 u64 *start, u64 *end, u64 max_bytes,
1452 struct extent_state **cached_state)
1454 struct rb_node *node;
1455 struct extent_state *state;
1456 u64 cur_start = *start;
1458 u64 total_bytes = 0;
1460 spin_lock(&tree->lock);
1463 * this search will find all the extents that end after
1466 node = tree_search(tree, cur_start);
1474 state = rb_entry(node, struct extent_state, rb_node);
1475 if (found && (state->start != cur_start ||
1476 (state->state & EXTENT_BOUNDARY))) {
1479 if (!(state->state & EXTENT_DELALLOC)) {
1485 *start = state->start;
1486 *cached_state = state;
1487 atomic_inc(&state->refs);
1491 cur_start = state->end + 1;
1492 node = rb_next(node);
1493 total_bytes += state->end - state->start + 1;
1494 if (total_bytes >= max_bytes)
1500 spin_unlock(&tree->lock);
1504 static noinline void __unlock_for_delalloc(struct inode *inode,
1505 struct page *locked_page,
1509 struct page *pages[16];
1510 unsigned long index = start >> PAGE_CACHE_SHIFT;
1511 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1512 unsigned long nr_pages = end_index - index + 1;
1515 if (index == locked_page->index && end_index == index)
1518 while (nr_pages > 0) {
1519 ret = find_get_pages_contig(inode->i_mapping, index,
1520 min_t(unsigned long, nr_pages,
1521 ARRAY_SIZE(pages)), pages);
1522 for (i = 0; i < ret; i++) {
1523 if (pages[i] != locked_page)
1524 unlock_page(pages[i]);
1525 page_cache_release(pages[i]);
1533 static noinline int lock_delalloc_pages(struct inode *inode,
1534 struct page *locked_page,
1538 unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1539 unsigned long start_index = index;
1540 unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1541 unsigned long pages_locked = 0;
1542 struct page *pages[16];
1543 unsigned long nrpages;
1547 /* the caller is responsible for locking the start index */
1548 if (index == locked_page->index && index == end_index)
1551 /* skip the page at the start index */
1552 nrpages = end_index - index + 1;
1553 while (nrpages > 0) {
1554 ret = find_get_pages_contig(inode->i_mapping, index,
1555 min_t(unsigned long,
1556 nrpages, ARRAY_SIZE(pages)), pages);
1561 /* now we have an array of pages, lock them all */
1562 for (i = 0; i < ret; i++) {
1564 * the caller is taking responsibility for
1567 if (pages[i] != locked_page) {
1568 lock_page(pages[i]);
1569 if (!PageDirty(pages[i]) ||
1570 pages[i]->mapping != inode->i_mapping) {
1572 unlock_page(pages[i]);
1573 page_cache_release(pages[i]);
1577 page_cache_release(pages[i]);
1586 if (ret && pages_locked) {
1587 __unlock_for_delalloc(inode, locked_page,
1589 ((u64)(start_index + pages_locked - 1)) <<
1596 * find a contiguous range of bytes in the file marked as delalloc, not
1597 * more than 'max_bytes'. start and end are used to return the range,
1599 * 1 is returned if we find something, 0 if nothing was in the tree
1601 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1602 struct extent_io_tree *tree,
1603 struct page *locked_page,
1604 u64 *start, u64 *end,
1610 struct extent_state *cached_state = NULL;
1615 /* step one, find a bunch of delalloc bytes starting at start */
1616 delalloc_start = *start;
1618 found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1619 max_bytes, &cached_state);
1620 if (!found || delalloc_end <= *start) {
1621 *start = delalloc_start;
1622 *end = delalloc_end;
1623 free_extent_state(cached_state);
1628 * start comes from the offset of locked_page. We have to lock
1629 * pages in order, so we can't process delalloc bytes before
1632 if (delalloc_start < *start)
1633 delalloc_start = *start;
1636 * make sure to limit the number of pages we try to lock down
1638 if (delalloc_end + 1 - delalloc_start > max_bytes)
1639 delalloc_end = delalloc_start + max_bytes - 1;
1641 /* step two, lock all the pages after the page that has start */
1642 ret = lock_delalloc_pages(inode, locked_page,
1643 delalloc_start, delalloc_end);
1644 if (ret == -EAGAIN) {
1645 /* some of the pages are gone, lets avoid looping by
1646 * shortening the size of the delalloc range we're searching
1648 free_extent_state(cached_state);
1650 max_bytes = PAGE_CACHE_SIZE;
1658 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1660 /* step three, lock the state bits for the whole range */
1661 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1663 /* then test to make sure it is all still delalloc */
1664 ret = test_range_bit(tree, delalloc_start, delalloc_end,
1665 EXTENT_DELALLOC, 1, cached_state);
1667 unlock_extent_cached(tree, delalloc_start, delalloc_end,
1668 &cached_state, GFP_NOFS);
1669 __unlock_for_delalloc(inode, locked_page,
1670 delalloc_start, delalloc_end);
1674 free_extent_state(cached_state);
1675 *start = delalloc_start;
1676 *end = delalloc_end;
1681 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1682 struct page *locked_page,
1683 unsigned long clear_bits,
1684 unsigned long page_ops)
1686 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1688 struct page *pages[16];
1689 unsigned long index = start >> PAGE_CACHE_SHIFT;
1690 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1691 unsigned long nr_pages = end_index - index + 1;
1694 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1698 while (nr_pages > 0) {
1699 ret = find_get_pages_contig(inode->i_mapping, index,
1700 min_t(unsigned long,
1701 nr_pages, ARRAY_SIZE(pages)), pages);
1702 for (i = 0; i < ret; i++) {
1704 if (page_ops & PAGE_SET_PRIVATE2)
1705 SetPagePrivate2(pages[i]);
1707 if (pages[i] == locked_page) {
1708 page_cache_release(pages[i]);
1711 if (page_ops & PAGE_CLEAR_DIRTY)
1712 clear_page_dirty_for_io(pages[i]);
1713 if (page_ops & PAGE_SET_WRITEBACK)
1714 set_page_writeback(pages[i]);
1715 if (page_ops & PAGE_END_WRITEBACK)
1716 end_page_writeback(pages[i]);
1717 if (page_ops & PAGE_UNLOCK)
1718 unlock_page(pages[i]);
1719 page_cache_release(pages[i]);
1729 * count the number of bytes in the tree that have a given bit(s)
1730 * set. This can be fairly slow, except for EXTENT_DIRTY which is
1731 * cached. The total number found is returned.
1733 u64 count_range_bits(struct extent_io_tree *tree,
1734 u64 *start, u64 search_end, u64 max_bytes,
1735 unsigned long bits, int contig)
1737 struct rb_node *node;
1738 struct extent_state *state;
1739 u64 cur_start = *start;
1740 u64 total_bytes = 0;
1744 if (search_end <= cur_start) {
1749 spin_lock(&tree->lock);
1750 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1751 total_bytes = tree->dirty_bytes;
1755 * this search will find all the extents that end after
1758 node = tree_search(tree, cur_start);
1763 state = rb_entry(node, struct extent_state, rb_node);
1764 if (state->start > search_end)
1766 if (contig && found && state->start > last + 1)
1768 if (state->end >= cur_start && (state->state & bits) == bits) {
1769 total_bytes += min(search_end, state->end) + 1 -
1770 max(cur_start, state->start);
1771 if (total_bytes >= max_bytes)
1774 *start = max(cur_start, state->start);
1778 } else if (contig && found) {
1781 node = rb_next(node);
1786 spin_unlock(&tree->lock);
1791 * set the private field for a given byte offset in the tree. If there isn't
1792 * an extent_state there already, this does nothing.
1794 static int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1796 struct rb_node *node;
1797 struct extent_state *state;
1800 spin_lock(&tree->lock);
1802 * this search will find all the extents that end after
1805 node = tree_search(tree, start);
1810 state = rb_entry(node, struct extent_state, rb_node);
1811 if (state->start != start) {
1815 state->private = private;
1817 spin_unlock(&tree->lock);
1821 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1823 struct rb_node *node;
1824 struct extent_state *state;
1827 spin_lock(&tree->lock);
1829 * this search will find all the extents that end after
1832 node = tree_search(tree, start);
1837 state = rb_entry(node, struct extent_state, rb_node);
1838 if (state->start != start) {
1842 *private = state->private;
1844 spin_unlock(&tree->lock);
1849 * searches a range in the state tree for a given mask.
1850 * If 'filled' == 1, this returns 1 only if every extent in the tree
1851 * has the bits set. Otherwise, 1 is returned if any bit in the
1852 * range is found set.
1854 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1855 unsigned long bits, int filled, struct extent_state *cached)
1857 struct extent_state *state = NULL;
1858 struct rb_node *node;
1861 spin_lock(&tree->lock);
1862 if (cached && cached->tree && cached->start <= start &&
1863 cached->end > start)
1864 node = &cached->rb_node;
1866 node = tree_search(tree, start);
1867 while (node && start <= end) {
1868 state = rb_entry(node, struct extent_state, rb_node);
1870 if (filled && state->start > start) {
1875 if (state->start > end)
1878 if (state->state & bits) {
1882 } else if (filled) {
1887 if (state->end == (u64)-1)
1890 start = state->end + 1;
1893 node = rb_next(node);
1900 spin_unlock(&tree->lock);
1905 * helper function to set a given page up to date if all the
1906 * extents in the tree for that page are up to date
1908 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1910 u64 start = page_offset(page);
1911 u64 end = start + PAGE_CACHE_SIZE - 1;
1912 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1913 SetPageUptodate(page);
1917 * When IO fails, either with EIO or csum verification fails, we
1918 * try other mirrors that might have a good copy of the data. This
1919 * io_failure_record is used to record state as we go through all the
1920 * mirrors. If another mirror has good data, the page is set up to date
1921 * and things continue. If a good mirror can't be found, the original
1922 * bio end_io callback is called to indicate things have failed.
1924 struct io_failure_record {
1929 unsigned long bio_flags;
1935 static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
1940 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1942 set_state_private(failure_tree, rec->start, 0);
1943 ret = clear_extent_bits(failure_tree, rec->start,
1944 rec->start + rec->len - 1,
1945 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1949 ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
1950 rec->start + rec->len - 1,
1951 EXTENT_DAMAGED, GFP_NOFS);
1959 static void repair_io_failure_callback(struct bio *bio, int err)
1961 complete(bio->bi_private);
1965 * this bypasses the standard btrfs submit functions deliberately, as
1966 * the standard behavior is to write all copies in a raid setup. here we only
1967 * want to write the one bad copy. so we do the mapping for ourselves and issue
1968 * submit_bio directly.
1969 * to avoid any synchronization issues, wait for the data after writing, which
1970 * actually prevents the read that triggered the error from finishing.
1971 * currently, there can be no more than two copies of every data bit. thus,
1972 * exactly one rewrite is required.
1974 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1975 u64 length, u64 logical, struct page *page,
1979 struct btrfs_device *dev;
1980 DECLARE_COMPLETION_ONSTACK(compl);
1983 struct btrfs_bio *bbio = NULL;
1984 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
1987 BUG_ON(!mirror_num);
1989 /* we can't repair anything in raid56 yet */
1990 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
1993 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1996 bio->bi_private = &compl;
1997 bio->bi_end_io = repair_io_failure_callback;
1999 map_length = length;
2001 ret = btrfs_map_block(fs_info, WRITE, logical,
2002 &map_length, &bbio, mirror_num);
2007 BUG_ON(mirror_num != bbio->mirror_num);
2008 sector = bbio->stripes[mirror_num-1].physical >> 9;
2009 bio->bi_sector = sector;
2010 dev = bbio->stripes[mirror_num-1].dev;
2012 if (!dev || !dev->bdev || !dev->writeable) {
2016 bio->bi_bdev = dev->bdev;
2017 bio_add_page(bio, page, length, start - page_offset(page));
2018 btrfsic_submit_bio(WRITE_SYNC, bio);
2019 wait_for_completion(&compl);
2021 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2022 /* try to remap that extent elsewhere? */
2024 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2028 printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
2029 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
2030 start, rcu_str_deref(dev->name), sector);
2036 int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
2039 u64 start = eb->start;
2040 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
2043 for (i = 0; i < num_pages; i++) {
2044 struct page *p = extent_buffer_page(eb, i);
2045 ret = repair_io_failure(root->fs_info, start, PAGE_CACHE_SIZE,
2046 start, p, mirror_num);
2049 start += PAGE_CACHE_SIZE;
2056 * each time an IO finishes, we do a fast check in the IO failure tree
2057 * to see if we need to process or clean up an io_failure_record
2059 static int clean_io_failure(u64 start, struct page *page)
2062 u64 private_failure;
2063 struct io_failure_record *failrec;
2064 struct btrfs_fs_info *fs_info;
2065 struct extent_state *state;
2069 struct inode *inode = page->mapping->host;
2072 ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
2073 (u64)-1, 1, EXTENT_DIRTY, 0);
2077 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
2082 failrec = (struct io_failure_record *)(unsigned long) private_failure;
2083 BUG_ON(!failrec->this_mirror);
2085 if (failrec->in_validation) {
2086 /* there was no real error, just free the record */
2087 pr_debug("clean_io_failure: freeing dummy error at %llu\n",
2093 spin_lock(&BTRFS_I(inode)->io_tree.lock);
2094 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
2097 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
2099 if (state && state->start <= failrec->start &&
2100 state->end >= failrec->start + failrec->len - 1) {
2101 fs_info = BTRFS_I(inode)->root->fs_info;
2102 num_copies = btrfs_num_copies(fs_info, failrec->logical,
2104 if (num_copies > 1) {
2105 ret = repair_io_failure(fs_info, start, failrec->len,
2106 failrec->logical, page,
2107 failrec->failed_mirror);
2115 ret = free_io_failure(inode, failrec, did_repair);
2121 * this is a generic handler for readpage errors (default
2122 * readpage_io_failed_hook). if other copies exist, read those and write back
2123 * good data to the failed position. does not investigate in remapping the
2124 * failed extent elsewhere, hoping the device will be smart enough to do this as
2128 static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2129 struct page *page, u64 start, u64 end,
2132 struct io_failure_record *failrec = NULL;
2134 struct extent_map *em;
2135 struct inode *inode = page->mapping->host;
2136 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2137 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2138 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2140 struct btrfs_io_bio *btrfs_failed_bio;
2141 struct btrfs_io_bio *btrfs_bio;
2147 BUG_ON(failed_bio->bi_rw & REQ_WRITE);
2149 ret = get_state_private(failure_tree, start, &private);
2151 failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2154 failrec->start = start;
2155 failrec->len = end - start + 1;
2156 failrec->this_mirror = 0;
2157 failrec->bio_flags = 0;
2158 failrec->in_validation = 0;
2160 read_lock(&em_tree->lock);
2161 em = lookup_extent_mapping(em_tree, start, failrec->len);
2163 read_unlock(&em_tree->lock);
2168 if (em->start > start || em->start + em->len < start) {
2169 free_extent_map(em);
2172 read_unlock(&em_tree->lock);
2178 logical = start - em->start;
2179 logical = em->block_start + logical;
2180 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2181 logical = em->block_start;
2182 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2183 extent_set_compress_type(&failrec->bio_flags,
2186 pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
2187 "len=%llu\n", logical, start, failrec->len);
2188 failrec->logical = logical;
2189 free_extent_map(em);
2191 /* set the bits in the private failure tree */
2192 ret = set_extent_bits(failure_tree, start, end,
2193 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
2195 ret = set_state_private(failure_tree, start,
2196 (u64)(unsigned long)failrec);
2197 /* set the bits in the inode's tree */
2199 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
2206 failrec = (struct io_failure_record *)(unsigned long)private;
2207 pr_debug("bio_readpage_error: (found) logical=%llu, "
2208 "start=%llu, len=%llu, validation=%d\n",
2209 failrec->logical, failrec->start, failrec->len,
2210 failrec->in_validation);
2212 * when data can be on disk more than twice, add to failrec here
2213 * (e.g. with a list for failed_mirror) to make
2214 * clean_io_failure() clean all those errors at once.
2217 num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
2218 failrec->logical, failrec->len);
2219 if (num_copies == 1) {
2221 * we only have a single copy of the data, so don't bother with
2222 * all the retry and error correction code that follows. no
2223 * matter what the error is, it is very likely to persist.
2225 pr_debug("bio_readpage_error: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
2226 num_copies, failrec->this_mirror, failed_mirror);
2227 free_io_failure(inode, failrec, 0);
2232 * there are two premises:
2233 * a) deliver good data to the caller
2234 * b) correct the bad sectors on disk
2236 if (failed_bio->bi_vcnt > 1) {
2238 * to fulfill b), we need to know the exact failing sectors, as
2239 * we don't want to rewrite any more than the failed ones. thus,
2240 * we need separate read requests for the failed bio
2242 * if the following BUG_ON triggers, our validation request got
2243 * merged. we need separate requests for our algorithm to work.
2245 BUG_ON(failrec->in_validation);
2246 failrec->in_validation = 1;
2247 failrec->this_mirror = failed_mirror;
2248 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
2251 * we're ready to fulfill a) and b) alongside. get a good copy
2252 * of the failed sector and if we succeed, we have setup
2253 * everything for repair_io_failure to do the rest for us.
2255 if (failrec->in_validation) {
2256 BUG_ON(failrec->this_mirror != failed_mirror);
2257 failrec->in_validation = 0;
2258 failrec->this_mirror = 0;
2260 failrec->failed_mirror = failed_mirror;
2261 failrec->this_mirror++;
2262 if (failrec->this_mirror == failed_mirror)
2263 failrec->this_mirror++;
2264 read_mode = READ_SYNC;
2267 if (failrec->this_mirror > num_copies) {
2268 pr_debug("bio_readpage_error: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
2269 num_copies, failrec->this_mirror, failed_mirror);
2270 free_io_failure(inode, failrec, 0);
2274 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2276 free_io_failure(inode, failrec, 0);
2279 bio->bi_end_io = failed_bio->bi_end_io;
2280 bio->bi_sector = failrec->logical >> 9;
2281 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2284 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2285 if (btrfs_failed_bio->csum) {
2286 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2287 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2289 btrfs_bio = btrfs_io_bio(bio);
2290 btrfs_bio->csum = btrfs_bio->csum_inline;
2291 phy_offset >>= inode->i_sb->s_blocksize_bits;
2292 phy_offset *= csum_size;
2293 memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + phy_offset,
2297 bio_add_page(bio, page, failrec->len, start - page_offset(page));
2299 pr_debug("bio_readpage_error: submitting new read[%#x] to "
2300 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2301 failrec->this_mirror, num_copies, failrec->in_validation);
2303 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2304 failrec->this_mirror,
2305 failrec->bio_flags, 0);
2309 /* lots and lots of room for performance fixes in the end_bio funcs */
2311 int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2313 int uptodate = (err == 0);
2314 struct extent_io_tree *tree;
2317 tree = &BTRFS_I(page->mapping->host)->io_tree;
2319 if (tree->ops && tree->ops->writepage_end_io_hook) {
2320 ret = tree->ops->writepage_end_io_hook(page, start,
2321 end, NULL, uptodate);
2327 ClearPageUptodate(page);
2334 * after a writepage IO is done, we need to:
2335 * clear the uptodate bits on error
2336 * clear the writeback bits in the extent tree for this IO
2337 * end_page_writeback if the page has no more pending IO
2339 * Scheduling is not allowed, so the extent state tree is expected
2340 * to have one and only one object corresponding to this IO.
2342 static void end_bio_extent_writepage(struct bio *bio, int err)
2344 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2345 struct extent_io_tree *tree;
2350 struct page *page = bvec->bv_page;
2351 tree = &BTRFS_I(page->mapping->host)->io_tree;
2353 /* We always issue full-page reads, but if some block
2354 * in a page fails to read, blk_update_request() will
2355 * advance bv_offset and adjust bv_len to compensate.
2356 * Print a warning for nonzero offsets, and an error
2357 * if they don't add up to a full page. */
2358 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2359 printk("%s page write in btrfs with offset %u and length %u\n",
2360 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2361 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2362 bvec->bv_offset, bvec->bv_len);
2364 start = page_offset(page);
2365 end = start + bvec->bv_offset + bvec->bv_len - 1;
2367 if (--bvec >= bio->bi_io_vec)
2368 prefetchw(&bvec->bv_page->flags);
2370 if (end_extent_writepage(page, err, start, end))
2373 end_page_writeback(page);
2374 } while (bvec >= bio->bi_io_vec);
2380 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2383 struct extent_state *cached = NULL;
2384 u64 end = start + len - 1;
2386 if (uptodate && tree->track_uptodate)
2387 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2388 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2392 * after a readpage IO is done, we need to:
2393 * clear the uptodate bits on error
2394 * set the uptodate bits if things worked
2395 * set the page up to date if all extents in the tree are uptodate
2396 * clear the lock bit in the extent tree
2397 * unlock the page if there are no other extents locked for it
2399 * Scheduling is not allowed, so the extent state tree is expected
2400 * to have one and only one object corresponding to this IO.
2402 static void end_bio_extent_readpage(struct bio *bio, int err)
2404 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2405 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2406 struct bio_vec *bvec = bio->bi_io_vec;
2407 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2408 struct extent_io_tree *tree;
2413 u64 extent_start = 0;
2422 struct page *page = bvec->bv_page;
2423 struct inode *inode = page->mapping->host;
2425 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2426 "mirror=%lu\n", (u64)bio->bi_sector, err,
2427 io_bio->mirror_num);
2428 tree = &BTRFS_I(inode)->io_tree;
2430 /* We always issue full-page reads, but if some block
2431 * in a page fails to read, blk_update_request() will
2432 * advance bv_offset and adjust bv_len to compensate.
2433 * Print a warning for nonzero offsets, and an error
2434 * if they don't add up to a full page. */
2435 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2436 printk("%s page read in btrfs with offset %u and length %u\n",
2437 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2438 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2439 bvec->bv_offset, bvec->bv_len);
2441 start = page_offset(page);
2442 end = start + bvec->bv_offset + bvec->bv_len - 1;
2445 if (++bvec <= bvec_end)
2446 prefetchw(&bvec->bv_page->flags);
2448 mirror = io_bio->mirror_num;
2449 if (likely(uptodate && tree->ops &&
2450 tree->ops->readpage_end_io_hook)) {
2451 ret = tree->ops->readpage_end_io_hook(io_bio, offset,
2457 clean_io_failure(start, page);
2460 if (likely(uptodate))
2463 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2464 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2466 test_bit(BIO_UPTODATE, &bio->bi_flags))
2470 * The generic bio_readpage_error handles errors the
2471 * following way: If possible, new read requests are
2472 * created and submitted and will end up in
2473 * end_bio_extent_readpage as well (if we're lucky, not
2474 * in the !uptodate case). In that case it returns 0 and
2475 * we just go on with the next page in our bio. If it
2476 * can't handle the error it will return -EIO and we
2477 * remain responsible for that page.
2479 ret = bio_readpage_error(bio, offset, page, start, end,
2483 test_bit(BIO_UPTODATE, &bio->bi_flags);
2490 if (likely(uptodate)) {
2491 loff_t i_size = i_size_read(inode);
2492 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2495 /* Zero out the end if this page straddles i_size */
2496 offset = i_size & (PAGE_CACHE_SIZE-1);
2497 if (page->index == end_index && offset)
2498 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2499 SetPageUptodate(page);
2501 ClearPageUptodate(page);
2507 if (unlikely(!uptodate)) {
2509 endio_readpage_release_extent(tree,
2515 endio_readpage_release_extent(tree, start,
2516 end - start + 1, 0);
2517 } else if (!extent_len) {
2518 extent_start = start;
2519 extent_len = end + 1 - start;
2520 } else if (extent_start + extent_len == start) {
2521 extent_len += end + 1 - start;
2523 endio_readpage_release_extent(tree, extent_start,
2524 extent_len, uptodate);
2525 extent_start = start;
2526 extent_len = end + 1 - start;
2528 } while (bvec <= bvec_end);
2531 endio_readpage_release_extent(tree, extent_start, extent_len,
2534 io_bio->end_io(io_bio, err);
2539 * this allocates from the btrfs_bioset. We're returning a bio right now
2540 * but you can call btrfs_io_bio for the appropriate container_of magic
2543 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2546 struct btrfs_io_bio *btrfs_bio;
2549 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2551 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2552 while (!bio && (nr_vecs /= 2)) {
2553 bio = bio_alloc_bioset(gfp_flags,
2554 nr_vecs, btrfs_bioset);
2560 bio->bi_bdev = bdev;
2561 bio->bi_sector = first_sector;
2562 btrfs_bio = btrfs_io_bio(bio);
2563 btrfs_bio->csum = NULL;
2564 btrfs_bio->csum_allocated = NULL;
2565 btrfs_bio->end_io = NULL;
2570 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2572 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2576 /* this also allocates from the btrfs_bioset */
2577 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2579 struct btrfs_io_bio *btrfs_bio;
2582 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2584 btrfs_bio = btrfs_io_bio(bio);
2585 btrfs_bio->csum = NULL;
2586 btrfs_bio->csum_allocated = NULL;
2587 btrfs_bio->end_io = NULL;
2593 static int __must_check submit_one_bio(int rw, struct bio *bio,
2594 int mirror_num, unsigned long bio_flags)
2597 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2598 struct page *page = bvec->bv_page;
2599 struct extent_io_tree *tree = bio->bi_private;
2602 start = page_offset(page) + bvec->bv_offset;
2604 bio->bi_private = NULL;
2608 if (tree->ops && tree->ops->submit_bio_hook)
2609 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
2610 mirror_num, bio_flags, start);
2612 btrfsic_submit_bio(rw, bio);
2614 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2620 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
2621 unsigned long offset, size_t size, struct bio *bio,
2622 unsigned long bio_flags)
2625 if (tree->ops && tree->ops->merge_bio_hook)
2626 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
2633 static int submit_extent_page(int rw, struct extent_io_tree *tree,
2634 struct page *page, sector_t sector,
2635 size_t size, unsigned long offset,
2636 struct block_device *bdev,
2637 struct bio **bio_ret,
2638 unsigned long max_pages,
2639 bio_end_io_t end_io_func,
2641 unsigned long prev_bio_flags,
2642 unsigned long bio_flags)
2648 int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
2649 int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
2650 size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
2652 if (bio_ret && *bio_ret) {
2655 contig = bio->bi_sector == sector;
2657 contig = bio_end_sector(bio) == sector;
2659 if (prev_bio_flags != bio_flags || !contig ||
2660 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2661 bio_add_page(bio, page, page_size, offset) < page_size) {
2662 ret = submit_one_bio(rw, bio, mirror_num,
2671 if (this_compressed)
2674 nr = bio_get_nr_vecs(bdev);
2676 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
2680 bio_add_page(bio, page, page_size, offset);
2681 bio->bi_end_io = end_io_func;
2682 bio->bi_private = tree;
2687 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
2692 static void attach_extent_buffer_page(struct extent_buffer *eb,
2695 if (!PagePrivate(page)) {
2696 SetPagePrivate(page);
2697 page_cache_get(page);
2698 set_page_private(page, (unsigned long)eb);
2700 WARN_ON(page->private != (unsigned long)eb);
2704 void set_page_extent_mapped(struct page *page)
2706 if (!PagePrivate(page)) {
2707 SetPagePrivate(page);
2708 page_cache_get(page);
2709 set_page_private(page, EXTENT_PAGE_PRIVATE);
2713 static struct extent_map *
2714 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
2715 u64 start, u64 len, get_extent_t *get_extent,
2716 struct extent_map **em_cached)
2718 struct extent_map *em;
2720 if (em_cached && *em_cached) {
2722 if (em->in_tree && start >= em->start &&
2723 start < extent_map_end(em)) {
2724 atomic_inc(&em->refs);
2728 free_extent_map(em);
2732 em = get_extent(inode, page, pg_offset, start, len, 0);
2733 if (em_cached && !IS_ERR_OR_NULL(em)) {
2735 atomic_inc(&em->refs);
2741 * basic readpage implementation. Locked extent state structs are inserted
2742 * into the tree that are removed when the IO is done (by the end_io
2744 * XXX JDM: This needs looking at to ensure proper page locking
2746 static int __do_readpage(struct extent_io_tree *tree,
2748 get_extent_t *get_extent,
2749 struct extent_map **em_cached,
2750 struct bio **bio, int mirror_num,
2751 unsigned long *bio_flags, int rw)
2753 struct inode *inode = page->mapping->host;
2754 u64 start = page_offset(page);
2755 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2759 u64 last_byte = i_size_read(inode);
2763 struct extent_map *em;
2764 struct block_device *bdev;
2767 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2768 size_t pg_offset = 0;
2770 size_t disk_io_size;
2771 size_t blocksize = inode->i_sb->s_blocksize;
2772 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2774 set_page_extent_mapped(page);
2777 if (!PageUptodate(page)) {
2778 if (cleancache_get_page(page) == 0) {
2779 BUG_ON(blocksize != PAGE_SIZE);
2780 unlock_extent(tree, start, end);
2785 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2787 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
2790 iosize = PAGE_CACHE_SIZE - zero_offset;
2791 userpage = kmap_atomic(page);
2792 memset(userpage + zero_offset, 0, iosize);
2793 flush_dcache_page(page);
2794 kunmap_atomic(userpage);
2797 while (cur <= end) {
2798 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2800 if (cur >= last_byte) {
2802 struct extent_state *cached = NULL;
2804 iosize = PAGE_CACHE_SIZE - pg_offset;
2805 userpage = kmap_atomic(page);
2806 memset(userpage + pg_offset, 0, iosize);
2807 flush_dcache_page(page);
2808 kunmap_atomic(userpage);
2809 set_extent_uptodate(tree, cur, cur + iosize - 1,
2812 unlock_extent_cached(tree, cur,
2817 em = __get_extent_map(inode, page, pg_offset, cur,
2818 end - cur + 1, get_extent, em_cached);
2819 if (IS_ERR_OR_NULL(em)) {
2822 unlock_extent(tree, cur, end);
2825 extent_offset = cur - em->start;
2826 BUG_ON(extent_map_end(em) <= cur);
2829 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2830 this_bio_flag |= EXTENT_BIO_COMPRESSED;
2831 extent_set_compress_type(&this_bio_flag,
2835 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2836 cur_end = min(extent_map_end(em) - 1, end);
2837 iosize = ALIGN(iosize, blocksize);
2838 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2839 disk_io_size = em->block_len;
2840 sector = em->block_start >> 9;
2842 sector = (em->block_start + extent_offset) >> 9;
2843 disk_io_size = iosize;
2846 block_start = em->block_start;
2847 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2848 block_start = EXTENT_MAP_HOLE;
2849 free_extent_map(em);
2852 /* we've found a hole, just zero and go on */
2853 if (block_start == EXTENT_MAP_HOLE) {
2855 struct extent_state *cached = NULL;
2857 userpage = kmap_atomic(page);
2858 memset(userpage + pg_offset, 0, iosize);
2859 flush_dcache_page(page);
2860 kunmap_atomic(userpage);
2862 set_extent_uptodate(tree, cur, cur + iosize - 1,
2864 unlock_extent_cached(tree, cur, cur + iosize - 1,
2867 pg_offset += iosize;
2870 /* the get_extent function already copied into the page */
2871 if (test_range_bit(tree, cur, cur_end,
2872 EXTENT_UPTODATE, 1, NULL)) {
2873 check_page_uptodate(tree, page);
2875 unlock_extent(tree, cur, cur + iosize - 1);
2877 pg_offset += iosize;
2880 /* we have an inline extent but it didn't get marked up
2881 * to date. Error out
2883 if (block_start == EXTENT_MAP_INLINE) {
2886 unlock_extent(tree, cur, cur + iosize - 1);
2888 pg_offset += iosize;
2893 ret = submit_extent_page(rw, tree, page,
2894 sector, disk_io_size, pg_offset,
2896 end_bio_extent_readpage, mirror_num,
2901 *bio_flags = this_bio_flag;
2905 unlock_extent(tree, cur, cur + iosize - 1);
2908 pg_offset += iosize;
2912 if (!PageError(page))
2913 SetPageUptodate(page);
2919 static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
2920 struct page *pages[], int nr_pages,
2922 get_extent_t *get_extent,
2923 struct extent_map **em_cached,
2924 struct bio **bio, int mirror_num,
2925 unsigned long *bio_flags, int rw)
2927 struct inode *inode;
2928 struct btrfs_ordered_extent *ordered;
2931 inode = pages[0]->mapping->host;
2933 lock_extent(tree, start, end);
2934 ordered = btrfs_lookup_ordered_range(inode, start,
2938 unlock_extent(tree, start, end);
2939 btrfs_start_ordered_extent(inode, ordered, 1);
2940 btrfs_put_ordered_extent(ordered);
2943 for (index = 0; index < nr_pages; index++) {
2944 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
2945 mirror_num, bio_flags, rw);
2946 page_cache_release(pages[index]);
2950 static void __extent_readpages(struct extent_io_tree *tree,
2951 struct page *pages[],
2952 int nr_pages, get_extent_t *get_extent,
2953 struct extent_map **em_cached,
2954 struct bio **bio, int mirror_num,
2955 unsigned long *bio_flags, int rw)
2961 int first_index = 0;
2963 for (index = 0; index < nr_pages; index++) {
2964 page_start = page_offset(pages[index]);
2967 end = start + PAGE_CACHE_SIZE - 1;
2968 first_index = index;
2969 } else if (end + 1 == page_start) {
2970 end += PAGE_CACHE_SIZE;
2972 __do_contiguous_readpages(tree, &pages[first_index],
2973 index - first_index, start,
2974 end, get_extent, em_cached,
2975 bio, mirror_num, bio_flags,
2978 end = start + PAGE_CACHE_SIZE - 1;
2979 first_index = index;
2984 __do_contiguous_readpages(tree, &pages[first_index],
2985 index - first_index, start,
2986 end, get_extent, em_cached, bio,
2987 mirror_num, bio_flags, rw);
2990 static int __extent_read_full_page(struct extent_io_tree *tree,
2992 get_extent_t *get_extent,
2993 struct bio **bio, int mirror_num,
2994 unsigned long *bio_flags, int rw)
2996 struct inode *inode = page->mapping->host;
2997 struct btrfs_ordered_extent *ordered;
2998 u64 start = page_offset(page);
2999 u64 end = start + PAGE_CACHE_SIZE - 1;
3003 lock_extent(tree, start, end);
3004 ordered = btrfs_lookup_ordered_extent(inode, start);
3007 unlock_extent(tree, start, end);
3008 btrfs_start_ordered_extent(inode, ordered, 1);
3009 btrfs_put_ordered_extent(ordered);
3012 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3017 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3018 get_extent_t *get_extent, int mirror_num)
3020 struct bio *bio = NULL;
3021 unsigned long bio_flags = 0;
3024 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
3027 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3031 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3032 get_extent_t *get_extent, int mirror_num)
3034 struct bio *bio = NULL;
3035 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3038 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3041 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3045 static noinline void update_nr_written(struct page *page,
3046 struct writeback_control *wbc,
3047 unsigned long nr_written)
3049 wbc->nr_to_write -= nr_written;
3050 if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
3051 wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
3052 page->mapping->writeback_index = page->index + nr_written;
3056 * the writepage semantics are similar to regular writepage. extent
3057 * records are inserted to lock ranges in the tree, and as dirty areas
3058 * are found, they are marked writeback. Then the lock bits are removed
3059 * and the end_io handler clears the writeback ranges
3061 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3064 struct inode *inode = page->mapping->host;
3065 struct extent_page_data *epd = data;
3066 struct extent_io_tree *tree = epd->tree;
3067 u64 start = page_offset(page);
3069 u64 page_end = start + PAGE_CACHE_SIZE - 1;
3073 u64 last_byte = i_size_read(inode);
3077 struct extent_state *cached_state = NULL;
3078 struct extent_map *em;
3079 struct block_device *bdev;
3082 size_t pg_offset = 0;
3084 loff_t i_size = i_size_read(inode);
3085 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
3091 unsigned long nr_written = 0;
3092 bool fill_delalloc = true;
3094 if (wbc->sync_mode == WB_SYNC_ALL)
3095 write_flags = WRITE_SYNC;
3097 write_flags = WRITE;
3099 trace___extent_writepage(page, inode, wbc);
3101 WARN_ON(!PageLocked(page));
3103 ClearPageError(page);
3105 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
3106 if (page->index > end_index ||
3107 (page->index == end_index && !pg_offset)) {
3108 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
3113 if (page->index == end_index) {
3116 userpage = kmap_atomic(page);
3117 memset(userpage + pg_offset, 0,
3118 PAGE_CACHE_SIZE - pg_offset);
3119 kunmap_atomic(userpage);
3120 flush_dcache_page(page);
3124 set_page_extent_mapped(page);
3126 if (!tree->ops || !tree->ops->fill_delalloc)
3127 fill_delalloc = false;
3129 delalloc_start = start;
3132 if (!epd->extent_locked && fill_delalloc) {
3133 u64 delalloc_to_write = 0;
3135 * make sure the wbc mapping index is at least updated
3138 update_nr_written(page, wbc, 0);
3140 while (delalloc_end < page_end) {
3141 nr_delalloc = find_lock_delalloc_range(inode, tree,
3146 if (nr_delalloc == 0) {
3147 delalloc_start = delalloc_end + 1;
3150 ret = tree->ops->fill_delalloc(inode, page,
3155 /* File system has been set read-only */
3161 * delalloc_end is already one less than the total
3162 * length, so we don't subtract one from
3165 delalloc_to_write += (delalloc_end - delalloc_start +
3168 delalloc_start = delalloc_end + 1;
3170 if (wbc->nr_to_write < delalloc_to_write) {
3173 if (delalloc_to_write < thresh * 2)
3174 thresh = delalloc_to_write;
3175 wbc->nr_to_write = min_t(u64, delalloc_to_write,
3179 /* did the fill delalloc function already unlock and start
3185 * we've unlocked the page, so we can't update
3186 * the mapping's writeback index, just update
3189 wbc->nr_to_write -= nr_written;
3193 if (tree->ops && tree->ops->writepage_start_hook) {
3194 ret = tree->ops->writepage_start_hook(page, start,
3197 /* Fixup worker will requeue */
3199 wbc->pages_skipped++;
3201 redirty_page_for_writepage(wbc, page);
3202 update_nr_written(page, wbc, nr_written);
3210 * we don't want to touch the inode after unlocking the page,
3211 * so we update the mapping writeback index now
3213 update_nr_written(page, wbc, nr_written + 1);
3216 if (last_byte <= start) {
3217 if (tree->ops && tree->ops->writepage_end_io_hook)
3218 tree->ops->writepage_end_io_hook(page, start,
3223 blocksize = inode->i_sb->s_blocksize;
3225 while (cur <= end) {
3226 if (cur >= last_byte) {
3227 if (tree->ops && tree->ops->writepage_end_io_hook)
3228 tree->ops->writepage_end_io_hook(page, cur,
3232 em = epd->get_extent(inode, page, pg_offset, cur,
3234 if (IS_ERR_OR_NULL(em)) {
3239 extent_offset = cur - em->start;
3240 BUG_ON(extent_map_end(em) <= cur);
3242 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3243 iosize = ALIGN(iosize, blocksize);
3244 sector = (em->block_start + extent_offset) >> 9;
3246 block_start = em->block_start;
3247 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3248 free_extent_map(em);
3252 * compressed and inline extents are written through other
3255 if (compressed || block_start == EXTENT_MAP_HOLE ||
3256 block_start == EXTENT_MAP_INLINE) {
3258 * end_io notification does not happen here for
3259 * compressed extents
3261 if (!compressed && tree->ops &&
3262 tree->ops->writepage_end_io_hook)
3263 tree->ops->writepage_end_io_hook(page, cur,
3266 else if (compressed) {
3267 /* we don't want to end_page_writeback on
3268 * a compressed extent. this happens
3275 pg_offset += iosize;
3278 /* leave this out until we have a page_mkwrite call */
3279 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
3280 EXTENT_DIRTY, 0, NULL)) {
3282 pg_offset += iosize;
3286 if (tree->ops && tree->ops->writepage_io_hook) {
3287 ret = tree->ops->writepage_io_hook(page, cur,
3295 unsigned long max_nr = end_index + 1;
3297 set_range_writeback(tree, cur, cur + iosize - 1);
3298 if (!PageWriteback(page)) {
3299 printk(KERN_ERR "btrfs warning page %lu not "
3300 "writeback, cur %llu end %llu\n",
3301 page->index, cur, end);
3304 ret = submit_extent_page(write_flags, tree, page,
3305 sector, iosize, pg_offset,
3306 bdev, &epd->bio, max_nr,
3307 end_bio_extent_writepage,
3313 pg_offset += iosize;
3318 /* make sure the mapping tag for page dirty gets cleared */
3319 set_page_writeback(page);
3320 end_page_writeback(page);
3326 /* drop our reference on any cached states */
3327 free_extent_state(cached_state);
3331 static int eb_wait(void *word)
3337 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3339 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3340 TASK_UNINTERRUPTIBLE);
3343 static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3344 struct btrfs_fs_info *fs_info,
3345 struct extent_page_data *epd)
3347 unsigned long i, num_pages;
3351 if (!btrfs_try_tree_write_lock(eb)) {
3353 flush_write_bio(epd);
3354 btrfs_tree_lock(eb);
3357 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3358 btrfs_tree_unlock(eb);
3362 flush_write_bio(epd);
3366 wait_on_extent_buffer_writeback(eb);
3367 btrfs_tree_lock(eb);
3368 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3370 btrfs_tree_unlock(eb);
3375 * We need to do this to prevent races in people who check if the eb is
3376 * under IO since we can end up having no IO bits set for a short period
3379 spin_lock(&eb->refs_lock);
3380 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3381 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3382 spin_unlock(&eb->refs_lock);
3383 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3384 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3386 fs_info->dirty_metadata_batch);
3389 spin_unlock(&eb->refs_lock);
3392 btrfs_tree_unlock(eb);
3397 num_pages = num_extent_pages(eb->start, eb->len);
3398 for (i = 0; i < num_pages; i++) {
3399 struct page *p = extent_buffer_page(eb, i);
3401 if (!trylock_page(p)) {
3403 flush_write_bio(epd);
3413 static void end_extent_buffer_writeback(struct extent_buffer *eb)
3415 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3416 smp_mb__after_clear_bit();
3417 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3420 static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3422 int uptodate = err == 0;
3423 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3424 struct extent_buffer *eb;
3428 struct page *page = bvec->bv_page;
3431 eb = (struct extent_buffer *)page->private;
3433 done = atomic_dec_and_test(&eb->io_pages);
3435 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3436 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3437 ClearPageUptodate(page);
3441 end_page_writeback(page);
3446 end_extent_buffer_writeback(eb);
3447 } while (bvec >= bio->bi_io_vec);
3453 static int write_one_eb(struct extent_buffer *eb,
3454 struct btrfs_fs_info *fs_info,
3455 struct writeback_control *wbc,
3456 struct extent_page_data *epd)
3458 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3459 u64 offset = eb->start;
3460 unsigned long i, num_pages;
3461 unsigned long bio_flags = 0;
3462 int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
3465 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3466 num_pages = num_extent_pages(eb->start, eb->len);
3467 atomic_set(&eb->io_pages, num_pages);
3468 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3469 bio_flags = EXTENT_BIO_TREE_LOG;
3471 for (i = 0; i < num_pages; i++) {
3472 struct page *p = extent_buffer_page(eb, i);
3474 clear_page_dirty_for_io(p);
3475 set_page_writeback(p);
3476 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3477 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3478 -1, end_bio_extent_buffer_writepage,
3479 0, epd->bio_flags, bio_flags);
3480 epd->bio_flags = bio_flags;
3482 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3484 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3485 end_extent_buffer_writeback(eb);
3489 offset += PAGE_CACHE_SIZE;
3490 update_nr_written(p, wbc, 1);
3494 if (unlikely(ret)) {
3495 for (; i < num_pages; i++) {
3496 struct page *p = extent_buffer_page(eb, i);
3504 int btree_write_cache_pages(struct address_space *mapping,
3505 struct writeback_control *wbc)
3507 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3508 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3509 struct extent_buffer *eb, *prev_eb = NULL;
3510 struct extent_page_data epd = {
3514 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3519 int nr_to_write_done = 0;
3520 struct pagevec pvec;
3523 pgoff_t end; /* Inclusive */
3527 pagevec_init(&pvec, 0);
3528 if (wbc->range_cyclic) {
3529 index = mapping->writeback_index; /* Start from prev offset */
3532 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3533 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3536 if (wbc->sync_mode == WB_SYNC_ALL)
3537 tag = PAGECACHE_TAG_TOWRITE;
3539 tag = PAGECACHE_TAG_DIRTY;
3541 if (wbc->sync_mode == WB_SYNC_ALL)
3542 tag_pages_for_writeback(mapping, index, end);
3543 while (!done && !nr_to_write_done && (index <= end) &&
3544 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3545 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3549 for (i = 0; i < nr_pages; i++) {
3550 struct page *page = pvec.pages[i];
3552 if (!PagePrivate(page))
3555 if (!wbc->range_cyclic && page->index > end) {
3560 spin_lock(&mapping->private_lock);
3561 if (!PagePrivate(page)) {
3562 spin_unlock(&mapping->private_lock);
3566 eb = (struct extent_buffer *)page->private;
3569 * Shouldn't happen and normally this would be a BUG_ON
3570 * but no sense in crashing the users box for something
3571 * we can survive anyway.
3574 spin_unlock(&mapping->private_lock);
3579 if (eb == prev_eb) {
3580 spin_unlock(&mapping->private_lock);
3584 ret = atomic_inc_not_zero(&eb->refs);
3585 spin_unlock(&mapping->private_lock);
3590 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3592 free_extent_buffer(eb);
3596 ret = write_one_eb(eb, fs_info, wbc, &epd);
3599 free_extent_buffer(eb);
3602 free_extent_buffer(eb);
3605 * the filesystem may choose to bump up nr_to_write.
3606 * We have to make sure to honor the new nr_to_write
3609 nr_to_write_done = wbc->nr_to_write <= 0;
3611 pagevec_release(&pvec);
3614 if (!scanned && !done) {
3616 * We hit the last page and there is more work to be done: wrap
3617 * back to the start of the file
3623 flush_write_bio(&epd);
3628 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
3629 * @mapping: address space structure to write
3630 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
3631 * @writepage: function called for each page
3632 * @data: data passed to writepage function
3634 * If a page is already under I/O, write_cache_pages() skips it, even
3635 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
3636 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
3637 * and msync() need to guarantee that all the data which was dirty at the time
3638 * the call was made get new I/O started against them. If wbc->sync_mode is
3639 * WB_SYNC_ALL then we were called for data integrity and we must wait for
3640 * existing IO to complete.
3642 static int extent_write_cache_pages(struct extent_io_tree *tree,
3643 struct address_space *mapping,
3644 struct writeback_control *wbc,
3645 writepage_t writepage, void *data,
3646 void (*flush_fn)(void *))
3648 struct inode *inode = mapping->host;
3651 int nr_to_write_done = 0;
3652 struct pagevec pvec;
3655 pgoff_t end; /* Inclusive */
3660 * We have to hold onto the inode so that ordered extents can do their
3661 * work when the IO finishes. The alternative to this is failing to add
3662 * an ordered extent if the igrab() fails there and that is a huge pain
3663 * to deal with, so instead just hold onto the inode throughout the
3664 * writepages operation. If it fails here we are freeing up the inode
3665 * anyway and we'd rather not waste our time writing out stuff that is
3666 * going to be truncated anyway.
3671 pagevec_init(&pvec, 0);
3672 if (wbc->range_cyclic) {
3673 index = mapping->writeback_index; /* Start from prev offset */
3676 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3677 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3680 if (wbc->sync_mode == WB_SYNC_ALL)
3681 tag = PAGECACHE_TAG_TOWRITE;
3683 tag = PAGECACHE_TAG_DIRTY;
3685 if (wbc->sync_mode == WB_SYNC_ALL)
3686 tag_pages_for_writeback(mapping, index, end);
3687 while (!done && !nr_to_write_done && (index <= end) &&
3688 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3689 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3693 for (i = 0; i < nr_pages; i++) {
3694 struct page *page = pvec.pages[i];
3697 * At this point we hold neither mapping->tree_lock nor
3698 * lock on the page itself: the page may be truncated or
3699 * invalidated (changing page->mapping to NULL), or even
3700 * swizzled back from swapper_space to tmpfs file
3703 if (!trylock_page(page)) {
3708 if (unlikely(page->mapping != mapping)) {
3713 if (!wbc->range_cyclic && page->index > end) {
3719 if (wbc->sync_mode != WB_SYNC_NONE) {
3720 if (PageWriteback(page))
3722 wait_on_page_writeback(page);
3725 if (PageWriteback(page) ||
3726 !clear_page_dirty_for_io(page)) {
3731 ret = (*writepage)(page, wbc, data);
3733 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
3741 * the filesystem may choose to bump up nr_to_write.
3742 * We have to make sure to honor the new nr_to_write
3745 nr_to_write_done = wbc->nr_to_write <= 0;
3747 pagevec_release(&pvec);
3750 if (!scanned && !done) {
3752 * We hit the last page and there is more work to be done: wrap
3753 * back to the start of the file
3759 btrfs_add_delayed_iput(inode);
3763 static void flush_epd_write_bio(struct extent_page_data *epd)
3772 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3773 BUG_ON(ret < 0); /* -ENOMEM */
3778 static noinline void flush_write_bio(void *data)
3780 struct extent_page_data *epd = data;
3781 flush_epd_write_bio(epd);
3784 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3785 get_extent_t *get_extent,
3786 struct writeback_control *wbc)
3789 struct extent_page_data epd = {
3792 .get_extent = get_extent,
3794 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3798 ret = __extent_writepage(page, wbc, &epd);
3800 flush_epd_write_bio(&epd);
3804 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3805 u64 start, u64 end, get_extent_t *get_extent,
3809 struct address_space *mapping = inode->i_mapping;
3811 unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
3814 struct extent_page_data epd = {
3817 .get_extent = get_extent,
3819 .sync_io = mode == WB_SYNC_ALL,
3822 struct writeback_control wbc_writepages = {
3824 .nr_to_write = nr_pages * 2,
3825 .range_start = start,
3826 .range_end = end + 1,
3829 while (start <= end) {
3830 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
3831 if (clear_page_dirty_for_io(page))
3832 ret = __extent_writepage(page, &wbc_writepages, &epd);
3834 if (tree->ops && tree->ops->writepage_end_io_hook)
3835 tree->ops->writepage_end_io_hook(page, start,
3836 start + PAGE_CACHE_SIZE - 1,
3840 page_cache_release(page);
3841 start += PAGE_CACHE_SIZE;
3844 flush_epd_write_bio(&epd);
3848 int extent_writepages(struct extent_io_tree *tree,
3849 struct address_space *mapping,
3850 get_extent_t *get_extent,
3851 struct writeback_control *wbc)
3854 struct extent_page_data epd = {
3857 .get_extent = get_extent,
3859 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3863 ret = extent_write_cache_pages(tree, mapping, wbc,
3864 __extent_writepage, &epd,
3866 flush_epd_write_bio(&epd);
3870 int extent_readpages(struct extent_io_tree *tree,
3871 struct address_space *mapping,
3872 struct list_head *pages, unsigned nr_pages,
3873 get_extent_t get_extent)
3875 struct bio *bio = NULL;
3877 unsigned long bio_flags = 0;
3878 struct page *pagepool[16];
3880 struct extent_map *em_cached = NULL;
3883 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
3884 page = list_entry(pages->prev, struct page, lru);
3886 prefetchw(&page->flags);
3887 list_del(&page->lru);
3888 if (add_to_page_cache_lru(page, mapping,
3889 page->index, GFP_NOFS)) {
3890 page_cache_release(page);
3894 pagepool[nr++] = page;
3895 if (nr < ARRAY_SIZE(pagepool))
3897 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3898 &bio, 0, &bio_flags, READ);
3902 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
3903 &bio, 0, &bio_flags, READ);
3906 free_extent_map(em_cached);
3908 BUG_ON(!list_empty(pages));
3910 return submit_one_bio(READ, bio, 0, bio_flags);
3915 * basic invalidatepage code, this waits on any locked or writeback
3916 * ranges corresponding to the page, and then deletes any extent state
3917 * records from the tree
3919 int extent_invalidatepage(struct extent_io_tree *tree,
3920 struct page *page, unsigned long offset)
3922 struct extent_state *cached_state = NULL;
3923 u64 start = page_offset(page);
3924 u64 end = start + PAGE_CACHE_SIZE - 1;
3925 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3927 start += ALIGN(offset, blocksize);
3931 lock_extent_bits(tree, start, end, 0, &cached_state);
3932 wait_on_page_writeback(page);
3933 clear_extent_bit(tree, start, end,
3934 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3935 EXTENT_DO_ACCOUNTING,
3936 1, 1, &cached_state, GFP_NOFS);
3941 * a helper for releasepage, this tests for areas of the page that
3942 * are locked or under IO and drops the related state bits if it is safe
3945 static int try_release_extent_state(struct extent_map_tree *map,
3946 struct extent_io_tree *tree,
3947 struct page *page, gfp_t mask)
3949 u64 start = page_offset(page);
3950 u64 end = start + PAGE_CACHE_SIZE - 1;
3953 if (test_range_bit(tree, start, end,
3954 EXTENT_IOBITS, 0, NULL))
3957 if ((mask & GFP_NOFS) == GFP_NOFS)
3960 * at this point we can safely clear everything except the
3961 * locked bit and the nodatasum bit
3963 ret = clear_extent_bit(tree, start, end,
3964 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
3967 /* if clear_extent_bit failed for enomem reasons,
3968 * we can't allow the release to continue.
3979 * a helper for releasepage. As long as there are no locked extents
3980 * in the range corresponding to the page, both state records and extent
3981 * map records are removed
3983 int try_release_extent_mapping(struct extent_map_tree *map,
3984 struct extent_io_tree *tree, struct page *page,
3987 struct extent_map *em;
3988 u64 start = page_offset(page);
3989 u64 end = start + PAGE_CACHE_SIZE - 1;
3991 if ((mask & __GFP_WAIT) &&
3992 page->mapping->host->i_size > 16 * 1024 * 1024) {
3994 while (start <= end) {
3995 len = end - start + 1;
3996 write_lock(&map->lock);
3997 em = lookup_extent_mapping(map, start, len);
3999 write_unlock(&map->lock);
4002 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4003 em->start != start) {
4004 write_unlock(&map->lock);
4005 free_extent_map(em);
4008 if (!test_range_bit(tree, em->start,
4009 extent_map_end(em) - 1,
4010 EXTENT_LOCKED | EXTENT_WRITEBACK,
4012 remove_extent_mapping(map, em);
4013 /* once for the rb tree */
4014 free_extent_map(em);
4016 start = extent_map_end(em);
4017 write_unlock(&map->lock);
4020 free_extent_map(em);
4023 return try_release_extent_state(map, tree, page, mask);
4027 * helper function for fiemap, which doesn't want to see any holes.
4028 * This maps until we find something past 'last'
4030 static struct extent_map *get_extent_skip_holes(struct inode *inode,
4033 get_extent_t *get_extent)
4035 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
4036 struct extent_map *em;
4043 len = last - offset;
4046 len = ALIGN(len, sectorsize);
4047 em = get_extent(inode, NULL, 0, offset, len, 0);
4048 if (IS_ERR_OR_NULL(em))
4051 /* if this isn't a hole return it */
4052 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
4053 em->block_start != EXTENT_MAP_HOLE) {
4057 /* this is a hole, advance to the next extent */
4058 offset = extent_map_end(em);
4059 free_extent_map(em);
4066 static noinline int count_ext_ref(u64 inum, u64 offset, u64 root_id, void *ctx)
4068 unsigned long cnt = *((unsigned long *)ctx);
4071 *((unsigned long *)ctx) = cnt;
4073 /* Now we're sure that the extent is shared. */
4079 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4080 __u64 start, __u64 len, get_extent_t *get_extent)
4084 u64 max = start + len;
4088 u64 last_for_get_extent = 0;
4090 u64 isize = i_size_read(inode);
4091 struct btrfs_key found_key;
4092 struct extent_map *em = NULL;
4093 struct extent_state *cached_state = NULL;
4094 struct btrfs_path *path;
4095 struct btrfs_file_extent_item *item;
4100 unsigned long emflags;
4105 path = btrfs_alloc_path();
4108 path->leave_spinning = 1;
4110 start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
4111 len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
4114 * lookup the last file extent. We're not using i_size here
4115 * because there might be preallocation past i_size
4117 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
4118 path, btrfs_ino(inode), -1, 0);
4120 btrfs_free_path(path);
4125 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4126 struct btrfs_file_extent_item);
4127 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4128 found_type = btrfs_key_type(&found_key);
4130 /* No extents, but there might be delalloc bits */
4131 if (found_key.objectid != btrfs_ino(inode) ||
4132 found_type != BTRFS_EXTENT_DATA_KEY) {
4133 /* have to trust i_size as the end */
4135 last_for_get_extent = isize;
4138 * remember the start of the last extent. There are a
4139 * bunch of different factors that go into the length of the
4140 * extent, so its much less complex to remember where it started
4142 last = found_key.offset;
4143 last_for_get_extent = last + 1;
4145 btrfs_release_path(path);
4148 * we might have some extents allocated but more delalloc past those
4149 * extents. so, we trust isize unless the start of the last extent is
4154 last_for_get_extent = isize;
4157 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
4160 em = get_extent_skip_holes(inode, start, last_for_get_extent,
4170 u64 offset_in_extent = 0;
4172 /* break if the extent we found is outside the range */
4173 if (em->start >= max || extent_map_end(em) < off)
4177 * get_extent may return an extent that starts before our
4178 * requested range. We have to make sure the ranges
4179 * we return to fiemap always move forward and don't
4180 * overlap, so adjust the offsets here
4182 em_start = max(em->start, off);
4185 * record the offset from the start of the extent
4186 * for adjusting the disk offset below. Only do this if the
4187 * extent isn't compressed since our in ram offset may be past
4188 * what we have actually allocated on disk.
4190 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4191 offset_in_extent = em_start - em->start;
4192 em_end = extent_map_end(em);
4193 em_len = em_end - em_start;
4194 emflags = em->flags;
4199 * bump off for our next call to get_extent
4201 off = extent_map_end(em);
4205 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4207 flags |= FIEMAP_EXTENT_LAST;
4208 } else if (em->block_start == EXTENT_MAP_INLINE) {
4209 flags |= (FIEMAP_EXTENT_DATA_INLINE |
4210 FIEMAP_EXTENT_NOT_ALIGNED);
4211 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4212 flags |= (FIEMAP_EXTENT_DELALLOC |
4213 FIEMAP_EXTENT_UNKNOWN);
4215 unsigned long ref_cnt = 0;
4217 disko = em->block_start + offset_in_extent;
4220 * As btrfs supports shared space, this information
4221 * can be exported to userspace tools via
4222 * flag FIEMAP_EXTENT_SHARED.
4224 ret = iterate_inodes_from_logical(
4226 BTRFS_I(inode)->root->fs_info,
4227 path, count_ext_ref, &ref_cnt);
4228 if (ret < 0 && ret != -ENOENT)
4232 flags |= FIEMAP_EXTENT_SHARED;
4234 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4235 flags |= FIEMAP_EXTENT_ENCODED;
4237 free_extent_map(em);
4239 if ((em_start >= last) || em_len == (u64)-1 ||
4240 (last == (u64)-1 && isize <= em_end)) {
4241 flags |= FIEMAP_EXTENT_LAST;
4245 /* now scan forward to see if this is really the last extent. */
4246 em = get_extent_skip_holes(inode, off, last_for_get_extent,
4253 flags |= FIEMAP_EXTENT_LAST;
4256 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
4262 free_extent_map(em);
4264 btrfs_free_path(path);
4265 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4266 &cached_state, GFP_NOFS);
4270 static void __free_extent_buffer(struct extent_buffer *eb)
4272 btrfs_leak_debug_del(&eb->leak_list);
4273 kmem_cache_free(extent_buffer_cache, eb);
4276 static int extent_buffer_under_io(struct extent_buffer *eb)
4278 return (atomic_read(&eb->io_pages) ||
4279 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4280 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4284 * Helper for releasing extent buffer page.
4286 static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4287 unsigned long start_idx)
4289 unsigned long index;
4290 unsigned long num_pages;
4292 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4294 BUG_ON(extent_buffer_under_io(eb));
4296 num_pages = num_extent_pages(eb->start, eb->len);
4297 index = start_idx + num_pages;
4298 if (start_idx >= index)
4303 page = extent_buffer_page(eb, index);
4304 if (page && mapped) {
4305 spin_lock(&page->mapping->private_lock);
4307 * We do this since we'll remove the pages after we've
4308 * removed the eb from the radix tree, so we could race
4309 * and have this page now attached to the new eb. So
4310 * only clear page_private if it's still connected to
4313 if (PagePrivate(page) &&
4314 page->private == (unsigned long)eb) {
4315 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4316 BUG_ON(PageDirty(page));
4317 BUG_ON(PageWriteback(page));
4319 * We need to make sure we haven't be attached
4322 ClearPagePrivate(page);
4323 set_page_private(page, 0);
4324 /* One for the page private */
4325 page_cache_release(page);
4327 spin_unlock(&page->mapping->private_lock);
4331 /* One for when we alloced the page */
4332 page_cache_release(page);
4334 } while (index != start_idx);
4338 * Helper for releasing the extent buffer.
4340 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4342 btrfs_release_extent_buffer_page(eb, 0);
4343 __free_extent_buffer(eb);
4346 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
4351 struct extent_buffer *eb = NULL;
4353 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4360 rwlock_init(&eb->lock);
4361 atomic_set(&eb->write_locks, 0);
4362 atomic_set(&eb->read_locks, 0);
4363 atomic_set(&eb->blocking_readers, 0);
4364 atomic_set(&eb->blocking_writers, 0);
4365 atomic_set(&eb->spinning_readers, 0);
4366 atomic_set(&eb->spinning_writers, 0);
4367 eb->lock_nested = 0;
4368 init_waitqueue_head(&eb->write_lock_wq);
4369 init_waitqueue_head(&eb->read_lock_wq);
4371 btrfs_leak_debug_add(&eb->leak_list, &buffers);
4373 spin_lock_init(&eb->refs_lock);
4374 atomic_set(&eb->refs, 1);
4375 atomic_set(&eb->io_pages, 0);
4378 * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4380 BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4381 > MAX_INLINE_EXTENT_BUFFER_SIZE);
4382 BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4387 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
4391 struct extent_buffer *new;
4392 unsigned long num_pages = num_extent_pages(src->start, src->len);
4394 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
4398 for (i = 0; i < num_pages; i++) {
4399 p = alloc_page(GFP_NOFS);
4401 btrfs_release_extent_buffer(new);
4404 attach_extent_buffer_page(new, p);
4405 WARN_ON(PageDirty(p));
4410 copy_extent_buffer(new, src, 0, 0, src->len);
4411 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
4412 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
4417 struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
4419 struct extent_buffer *eb;
4420 unsigned long num_pages = num_extent_pages(0, len);
4423 eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
4427 for (i = 0; i < num_pages; i++) {
4428 eb->pages[i] = alloc_page(GFP_NOFS);
4432 set_extent_buffer_uptodate(eb);
4433 btrfs_set_header_nritems(eb, 0);
4434 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4439 __free_page(eb->pages[i - 1]);
4440 __free_extent_buffer(eb);
4444 static void check_buffer_tree_ref(struct extent_buffer *eb)
4447 /* the ref bit is tricky. We have to make sure it is set
4448 * if we have the buffer dirty. Otherwise the
4449 * code to free a buffer can end up dropping a dirty
4452 * Once the ref bit is set, it won't go away while the
4453 * buffer is dirty or in writeback, and it also won't
4454 * go away while we have the reference count on the
4457 * We can't just set the ref bit without bumping the
4458 * ref on the eb because free_extent_buffer might
4459 * see the ref bit and try to clear it. If this happens
4460 * free_extent_buffer might end up dropping our original
4461 * ref by mistake and freeing the page before we are able
4462 * to add one more ref.
4464 * So bump the ref count first, then set the bit. If someone
4465 * beat us to it, drop the ref we added.
4467 refs = atomic_read(&eb->refs);
4468 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4471 spin_lock(&eb->refs_lock);
4472 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4473 atomic_inc(&eb->refs);
4474 spin_unlock(&eb->refs_lock);
4477 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4479 unsigned long num_pages, i;
4481 check_buffer_tree_ref(eb);
4483 num_pages = num_extent_pages(eb->start, eb->len);
4484 for (i = 0; i < num_pages; i++) {
4485 struct page *p = extent_buffer_page(eb, i);
4486 mark_page_accessed(p);
4490 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
4491 u64 start, unsigned long len)
4493 unsigned long num_pages = num_extent_pages(start, len);
4495 unsigned long index = start >> PAGE_CACHE_SHIFT;
4496 struct extent_buffer *eb;
4497 struct extent_buffer *exists = NULL;
4499 struct address_space *mapping = tree->mapping;
4504 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4505 if (eb && atomic_inc_not_zero(&eb->refs)) {
4507 mark_extent_buffer_accessed(eb);
4512 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
4516 for (i = 0; i < num_pages; i++, index++) {
4517 p = find_or_create_page(mapping, index, GFP_NOFS);
4521 spin_lock(&mapping->private_lock);
4522 if (PagePrivate(p)) {
4524 * We could have already allocated an eb for this page
4525 * and attached one so lets see if we can get a ref on
4526 * the existing eb, and if we can we know it's good and
4527 * we can just return that one, else we know we can just
4528 * overwrite page->private.
4530 exists = (struct extent_buffer *)p->private;
4531 if (atomic_inc_not_zero(&exists->refs)) {
4532 spin_unlock(&mapping->private_lock);
4534 page_cache_release(p);
4535 mark_extent_buffer_accessed(exists);
4540 * Do this so attach doesn't complain and we need to
4541 * drop the ref the old guy had.
4543 ClearPagePrivate(p);
4544 WARN_ON(PageDirty(p));
4545 page_cache_release(p);
4547 attach_extent_buffer_page(eb, p);
4548 spin_unlock(&mapping->private_lock);
4549 WARN_ON(PageDirty(p));
4550 mark_page_accessed(p);
4552 if (!PageUptodate(p))
4556 * see below about how we avoid a nasty race with release page
4557 * and why we unlock later
4561 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4563 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
4567 spin_lock(&tree->buffer_lock);
4568 ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
4569 if (ret == -EEXIST) {
4570 exists = radix_tree_lookup(&tree->buffer,
4571 start >> PAGE_CACHE_SHIFT);
4572 if (!atomic_inc_not_zero(&exists->refs)) {
4573 spin_unlock(&tree->buffer_lock);
4574 radix_tree_preload_end();
4578 spin_unlock(&tree->buffer_lock);
4579 radix_tree_preload_end();
4580 mark_extent_buffer_accessed(exists);
4583 /* add one reference for the tree */
4584 check_buffer_tree_ref(eb);
4585 spin_unlock(&tree->buffer_lock);
4586 radix_tree_preload_end();
4589 * there is a race where release page may have
4590 * tried to find this extent buffer in the radix
4591 * but failed. It will tell the VM it is safe to
4592 * reclaim the, and it will clear the page private bit.
4593 * We must make sure to set the page private bit properly
4594 * after the extent buffer is in the radix tree so
4595 * it doesn't get lost
4597 SetPageChecked(eb->pages[0]);
4598 for (i = 1; i < num_pages; i++) {
4599 p = extent_buffer_page(eb, i);
4600 ClearPageChecked(p);
4603 unlock_page(eb->pages[0]);
4607 for (i = 0; i < num_pages; i++) {
4609 unlock_page(eb->pages[i]);
4612 WARN_ON(!atomic_dec_and_test(&eb->refs));
4613 btrfs_release_extent_buffer(eb);
4617 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
4618 u64 start, unsigned long len)
4620 struct extent_buffer *eb;
4623 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
4624 if (eb && atomic_inc_not_zero(&eb->refs)) {
4626 mark_extent_buffer_accessed(eb);
4634 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4636 struct extent_buffer *eb =
4637 container_of(head, struct extent_buffer, rcu_head);
4639 __free_extent_buffer(eb);
4642 /* Expects to have eb->eb_lock already held */
4643 static int release_extent_buffer(struct extent_buffer *eb)
4645 WARN_ON(atomic_read(&eb->refs) == 0);
4646 if (atomic_dec_and_test(&eb->refs)) {
4647 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4648 spin_unlock(&eb->refs_lock);
4650 struct extent_io_tree *tree = eb->tree;
4652 spin_unlock(&eb->refs_lock);
4654 spin_lock(&tree->buffer_lock);
4655 radix_tree_delete(&tree->buffer,
4656 eb->start >> PAGE_CACHE_SHIFT);
4657 spin_unlock(&tree->buffer_lock);
4660 /* Should be safe to release our pages at this point */
4661 btrfs_release_extent_buffer_page(eb, 0);
4662 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4665 spin_unlock(&eb->refs_lock);
4670 void free_extent_buffer(struct extent_buffer *eb)
4678 refs = atomic_read(&eb->refs);
4681 old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
4686 spin_lock(&eb->refs_lock);
4687 if (atomic_read(&eb->refs) == 2 &&
4688 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4689 atomic_dec(&eb->refs);
4691 if (atomic_read(&eb->refs) == 2 &&
4692 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4693 !extent_buffer_under_io(eb) &&
4694 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4695 atomic_dec(&eb->refs);
4698 * I know this is terrible, but it's temporary until we stop tracking
4699 * the uptodate bits and such for the extent buffers.
4701 release_extent_buffer(eb);
4704 void free_extent_buffer_stale(struct extent_buffer *eb)
4709 spin_lock(&eb->refs_lock);
4710 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4712 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4713 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4714 atomic_dec(&eb->refs);
4715 release_extent_buffer(eb);
4718 void clear_extent_buffer_dirty(struct extent_buffer *eb)
4721 unsigned long num_pages;
4724 num_pages = num_extent_pages(eb->start, eb->len);
4726 for (i = 0; i < num_pages; i++) {
4727 page = extent_buffer_page(eb, i);
4728 if (!PageDirty(page))
4732 WARN_ON(!PagePrivate(page));
4734 clear_page_dirty_for_io(page);
4735 spin_lock_irq(&page->mapping->tree_lock);
4736 if (!PageDirty(page)) {
4737 radix_tree_tag_clear(&page->mapping->page_tree,
4739 PAGECACHE_TAG_DIRTY);
4741 spin_unlock_irq(&page->mapping->tree_lock);
4742 ClearPageError(page);
4745 WARN_ON(atomic_read(&eb->refs) == 0);
4748 int set_extent_buffer_dirty(struct extent_buffer *eb)
4751 unsigned long num_pages;
4754 check_buffer_tree_ref(eb);
4756 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4758 num_pages = num_extent_pages(eb->start, eb->len);
4759 WARN_ON(atomic_read(&eb->refs) == 0);
4760 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4762 for (i = 0; i < num_pages; i++)
4763 set_page_dirty(extent_buffer_page(eb, i));
4767 int clear_extent_buffer_uptodate(struct extent_buffer *eb)
4771 unsigned long num_pages;
4773 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4774 num_pages = num_extent_pages(eb->start, eb->len);
4775 for (i = 0; i < num_pages; i++) {
4776 page = extent_buffer_page(eb, i);
4778 ClearPageUptodate(page);
4783 int set_extent_buffer_uptodate(struct extent_buffer *eb)
4787 unsigned long num_pages;
4789 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4790 num_pages = num_extent_pages(eb->start, eb->len);
4791 for (i = 0; i < num_pages; i++) {
4792 page = extent_buffer_page(eb, i);
4793 SetPageUptodate(page);
4798 int extent_buffer_uptodate(struct extent_buffer *eb)
4800 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4803 int read_extent_buffer_pages(struct extent_io_tree *tree,
4804 struct extent_buffer *eb, u64 start, int wait,
4805 get_extent_t *get_extent, int mirror_num)
4808 unsigned long start_i;
4812 int locked_pages = 0;
4813 int all_uptodate = 1;
4814 unsigned long num_pages;
4815 unsigned long num_reads = 0;
4816 struct bio *bio = NULL;
4817 unsigned long bio_flags = 0;
4819 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
4823 WARN_ON(start < eb->start);
4824 start_i = (start >> PAGE_CACHE_SHIFT) -
4825 (eb->start >> PAGE_CACHE_SHIFT);
4830 num_pages = num_extent_pages(eb->start, eb->len);
4831 for (i = start_i; i < num_pages; i++) {
4832 page = extent_buffer_page(eb, i);
4833 if (wait == WAIT_NONE) {
4834 if (!trylock_page(page))
4840 if (!PageUptodate(page)) {
4847 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4851 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4852 eb->read_mirror = 0;
4853 atomic_set(&eb->io_pages, num_reads);
4854 for (i = start_i; i < num_pages; i++) {
4855 page = extent_buffer_page(eb, i);
4856 if (!PageUptodate(page)) {
4857 ClearPageError(page);
4858 err = __extent_read_full_page(tree, page,
4860 mirror_num, &bio_flags,
4870 err = submit_one_bio(READ | REQ_META, bio, mirror_num,
4876 if (ret || wait != WAIT_COMPLETE)
4879 for (i = start_i; i < num_pages; i++) {
4880 page = extent_buffer_page(eb, i);
4881 wait_on_page_locked(page);
4882 if (!PageUptodate(page))
4890 while (locked_pages > 0) {
4891 page = extent_buffer_page(eb, i);
4899 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
4900 unsigned long start,
4907 char *dst = (char *)dstv;
4908 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4909 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4911 WARN_ON(start > eb->len);
4912 WARN_ON(start + len > eb->start + eb->len);
4914 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4917 page = extent_buffer_page(eb, i);
4919 cur = min(len, (PAGE_CACHE_SIZE - offset));
4920 kaddr = page_address(page);
4921 memcpy(dst, kaddr + offset, cur);
4930 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
4931 unsigned long min_len, char **map,
4932 unsigned long *map_start,
4933 unsigned long *map_len)
4935 size_t offset = start & (PAGE_CACHE_SIZE - 1);
4938 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4939 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4940 unsigned long end_i = (start_offset + start + min_len - 1) >>
4947 offset = start_offset;
4951 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
4954 if (start + min_len > eb->len) {
4955 WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
4957 eb->start, eb->len, start, min_len);
4961 p = extent_buffer_page(eb, i);
4962 kaddr = page_address(p);
4963 *map = kaddr + offset;
4964 *map_len = PAGE_CACHE_SIZE - offset;
4968 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
4969 unsigned long start,
4976 char *ptr = (char *)ptrv;
4977 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
4978 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
4981 WARN_ON(start > eb->len);
4982 WARN_ON(start + len > eb->start + eb->len);
4984 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
4987 page = extent_buffer_page(eb, i);
4989 cur = min(len, (PAGE_CACHE_SIZE - offset));
4991 kaddr = page_address(page);
4992 ret = memcmp(ptr, kaddr + offset, cur);
5004 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
5005 unsigned long start, unsigned long len)
5011 char *src = (char *)srcv;
5012 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5013 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5015 WARN_ON(start > eb->len);
5016 WARN_ON(start + len > eb->start + eb->len);
5018 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5021 page = extent_buffer_page(eb, i);
5022 WARN_ON(!PageUptodate(page));
5024 cur = min(len, PAGE_CACHE_SIZE - offset);
5025 kaddr = page_address(page);
5026 memcpy(kaddr + offset, src, cur);
5035 void memset_extent_buffer(struct extent_buffer *eb, char c,
5036 unsigned long start, unsigned long len)
5042 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
5043 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
5045 WARN_ON(start > eb->len);
5046 WARN_ON(start + len > eb->start + eb->len);
5048 offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
5051 page = extent_buffer_page(eb, i);
5052 WARN_ON(!PageUptodate(page));
5054 cur = min(len, PAGE_CACHE_SIZE - offset);
5055 kaddr = page_address(page);
5056 memset(kaddr + offset, c, cur);
5064 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
5065 unsigned long dst_offset, unsigned long src_offset,
5068 u64 dst_len = dst->len;
5073 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5074 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5076 WARN_ON(src->len != dst_len);
5078 offset = (start_offset + dst_offset) &
5079 (PAGE_CACHE_SIZE - 1);
5082 page = extent_buffer_page(dst, i);
5083 WARN_ON(!PageUptodate(page));
5085 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
5087 kaddr = page_address(page);
5088 read_extent_buffer(src, kaddr + offset, src_offset, cur);
5097 static void move_pages(struct page *dst_page, struct page *src_page,
5098 unsigned long dst_off, unsigned long src_off,
5101 char *dst_kaddr = page_address(dst_page);
5102 if (dst_page == src_page) {
5103 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
5105 char *src_kaddr = page_address(src_page);
5106 char *p = dst_kaddr + dst_off + len;
5107 char *s = src_kaddr + src_off + len;
5114 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5116 unsigned long distance = (src > dst) ? src - dst : dst - src;
5117 return distance < len;
5120 static void copy_pages(struct page *dst_page, struct page *src_page,
5121 unsigned long dst_off, unsigned long src_off,
5124 char *dst_kaddr = page_address(dst_page);
5126 int must_memmove = 0;
5128 if (dst_page != src_page) {
5129 src_kaddr = page_address(src_page);
5131 src_kaddr = dst_kaddr;
5132 if (areas_overlap(src_off, dst_off, len))
5137 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
5139 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5142 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5143 unsigned long src_offset, unsigned long len)
5146 size_t dst_off_in_page;
5147 size_t src_off_in_page;
5148 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5149 unsigned long dst_i;
5150 unsigned long src_i;
5152 if (src_offset + len > dst->len) {
5153 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5154 "len %lu dst len %lu\n", src_offset, len, dst->len);
5157 if (dst_offset + len > dst->len) {
5158 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5159 "len %lu dst len %lu\n", dst_offset, len, dst->len);
5164 dst_off_in_page = (start_offset + dst_offset) &
5165 (PAGE_CACHE_SIZE - 1);
5166 src_off_in_page = (start_offset + src_offset) &
5167 (PAGE_CACHE_SIZE - 1);
5169 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
5170 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
5172 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
5174 cur = min_t(unsigned long, cur,
5175 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5177 copy_pages(extent_buffer_page(dst, dst_i),
5178 extent_buffer_page(dst, src_i),
5179 dst_off_in_page, src_off_in_page, cur);
5187 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
5188 unsigned long src_offset, unsigned long len)
5191 size_t dst_off_in_page;
5192 size_t src_off_in_page;
5193 unsigned long dst_end = dst_offset + len - 1;
5194 unsigned long src_end = src_offset + len - 1;
5195 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
5196 unsigned long dst_i;
5197 unsigned long src_i;
5199 if (src_offset + len > dst->len) {
5200 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
5201 "len %lu len %lu\n", src_offset, len, dst->len);
5204 if (dst_offset + len > dst->len) {
5205 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
5206 "len %lu len %lu\n", dst_offset, len, dst->len);
5209 if (dst_offset < src_offset) {
5210 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
5214 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
5215 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
5217 dst_off_in_page = (start_offset + dst_end) &
5218 (PAGE_CACHE_SIZE - 1);
5219 src_off_in_page = (start_offset + src_end) &
5220 (PAGE_CACHE_SIZE - 1);
5222 cur = min_t(unsigned long, len, src_off_in_page + 1);
5223 cur = min(cur, dst_off_in_page + 1);
5224 move_pages(extent_buffer_page(dst, dst_i),
5225 extent_buffer_page(dst, src_i),
5226 dst_off_in_page - cur + 1,
5227 src_off_in_page - cur + 1, cur);
5235 int try_release_extent_buffer(struct page *page)
5237 struct extent_buffer *eb;
5240 * We need to make sure noboody is attaching this page to an eb right
5243 spin_lock(&page->mapping->private_lock);
5244 if (!PagePrivate(page)) {
5245 spin_unlock(&page->mapping->private_lock);
5249 eb = (struct extent_buffer *)page->private;
5253 * This is a little awful but should be ok, we need to make sure that
5254 * the eb doesn't disappear out from under us while we're looking at
5257 spin_lock(&eb->refs_lock);
5258 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
5259 spin_unlock(&eb->refs_lock);
5260 spin_unlock(&page->mapping->private_lock);
5263 spin_unlock(&page->mapping->private_lock);
5266 * If tree ref isn't set then we know the ref on this eb is a real ref,
5267 * so just return, this page will likely be freed soon anyway.
5269 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
5270 spin_unlock(&eb->refs_lock);
5274 return release_extent_buffer(eb);