Btrfs: implement the free space B-tree
[cascardo/linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 /*
336  * this is only called by cache_block_group, since we could have freed extents
337  * we need to check the pinned_extents for any extents that can't be used yet
338  * since their free space will be released as soon as the transaction commits.
339  */
340 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
341                        struct btrfs_fs_info *info, u64 start, u64 end)
342 {
343         u64 extent_start, extent_end, size, total_added = 0;
344         int ret;
345
346         while (start < end) {
347                 ret = find_first_extent_bit(info->pinned_extents, start,
348                                             &extent_start, &extent_end,
349                                             EXTENT_DIRTY | EXTENT_UPTODATE,
350                                             NULL);
351                 if (ret)
352                         break;
353
354                 if (extent_start <= start) {
355                         start = extent_end + 1;
356                 } else if (extent_start > start && extent_start < end) {
357                         size = extent_start - start;
358                         total_added += size;
359                         ret = btrfs_add_free_space(block_group, start,
360                                                    size);
361                         BUG_ON(ret); /* -ENOMEM or logic error */
362                         start = extent_end + 1;
363                 } else {
364                         break;
365                 }
366         }
367
368         if (start < end) {
369                 size = end - start;
370                 total_added += size;
371                 ret = btrfs_add_free_space(block_group, start, size);
372                 BUG_ON(ret); /* -ENOMEM or logic error */
373         }
374
375         return total_added;
376 }
377
378 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
379 {
380         struct btrfs_block_group_cache *block_group;
381         struct btrfs_fs_info *fs_info;
382         struct btrfs_root *extent_root;
383         struct btrfs_path *path;
384         struct extent_buffer *leaf;
385         struct btrfs_key key;
386         u64 total_found = 0;
387         u64 last = 0;
388         u32 nritems;
389         int ret;
390
391         block_group = caching_ctl->block_group;
392         fs_info = block_group->fs_info;
393         extent_root = fs_info->extent_root;
394
395         path = btrfs_alloc_path();
396         if (!path)
397                 return -ENOMEM;
398
399         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
400
401         /*
402          * We don't want to deadlock with somebody trying to allocate a new
403          * extent for the extent root while also trying to search the extent
404          * root to add free space.  So we skip locking and search the commit
405          * root, since its read-only
406          */
407         path->skip_locking = 1;
408         path->search_commit_root = 1;
409         path->reada = 1;
410
411         key.objectid = last;
412         key.offset = 0;
413         key.type = BTRFS_EXTENT_ITEM_KEY;
414
415 next:
416         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
417         if (ret < 0)
418                 goto out;
419
420         leaf = path->nodes[0];
421         nritems = btrfs_header_nritems(leaf);
422
423         while (1) {
424                 if (btrfs_fs_closing(fs_info) > 1) {
425                         last = (u64)-1;
426                         break;
427                 }
428
429                 if (path->slots[0] < nritems) {
430                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
431                 } else {
432                         ret = find_next_key(path, 0, &key);
433                         if (ret)
434                                 break;
435
436                         if (need_resched() ||
437                             rwsem_is_contended(&fs_info->commit_root_sem)) {
438                                 caching_ctl->progress = last;
439                                 btrfs_release_path(path);
440                                 up_read(&fs_info->commit_root_sem);
441                                 mutex_unlock(&caching_ctl->mutex);
442                                 cond_resched();
443                                 mutex_lock(&caching_ctl->mutex);
444                                 down_read(&fs_info->commit_root_sem);
445                                 goto next;
446                         }
447
448                         ret = btrfs_next_leaf(extent_root, path);
449                         if (ret < 0)
450                                 goto out;
451                         if (ret)
452                                 break;
453                         leaf = path->nodes[0];
454                         nritems = btrfs_header_nritems(leaf);
455                         continue;
456                 }
457
458                 if (key.objectid < last) {
459                         key.objectid = last;
460                         key.offset = 0;
461                         key.type = BTRFS_EXTENT_ITEM_KEY;
462
463                         caching_ctl->progress = last;
464                         btrfs_release_path(path);
465                         goto next;
466                 }
467
468                 if (key.objectid < block_group->key.objectid) {
469                         path->slots[0]++;
470                         continue;
471                 }
472
473                 if (key.objectid >= block_group->key.objectid +
474                     block_group->key.offset)
475                         break;
476
477                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
478                     key.type == BTRFS_METADATA_ITEM_KEY) {
479                         total_found += add_new_free_space(block_group,
480                                                           fs_info, last,
481                                                           key.objectid);
482                         if (key.type == BTRFS_METADATA_ITEM_KEY)
483                                 last = key.objectid +
484                                         fs_info->tree_root->nodesize;
485                         else
486                                 last = key.objectid + key.offset;
487
488                         if (total_found > CACHING_CTL_WAKE_UP) {
489                                 total_found = 0;
490                                 wake_up(&caching_ctl->wait);
491                         }
492                 }
493                 path->slots[0]++;
494         }
495         ret = 0;
496
497         total_found += add_new_free_space(block_group, fs_info, last,
498                                           block_group->key.objectid +
499                                           block_group->key.offset);
500         caching_ctl->progress = (u64)-1;
501
502 out:
503         btrfs_free_path(path);
504         return ret;
505 }
506
507 static noinline void caching_thread(struct btrfs_work *work)
508 {
509         struct btrfs_block_group_cache *block_group;
510         struct btrfs_fs_info *fs_info;
511         struct btrfs_caching_control *caching_ctl;
512         int ret;
513
514         caching_ctl = container_of(work, struct btrfs_caching_control, work);
515         block_group = caching_ctl->block_group;
516         fs_info = block_group->fs_info;
517
518         mutex_lock(&caching_ctl->mutex);
519         down_read(&fs_info->commit_root_sem);
520
521         ret = load_extent_tree_free(caching_ctl);
522
523         spin_lock(&block_group->lock);
524         block_group->caching_ctl = NULL;
525         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
526         spin_unlock(&block_group->lock);
527
528         up_read(&fs_info->commit_root_sem);
529         free_excluded_extents(fs_info->extent_root, block_group);
530         mutex_unlock(&caching_ctl->mutex);
531
532         wake_up(&caching_ctl->wait);
533
534         put_caching_control(caching_ctl);
535         btrfs_put_block_group(block_group);
536 }
537
538 static int cache_block_group(struct btrfs_block_group_cache *cache,
539                              int load_cache_only)
540 {
541         DEFINE_WAIT(wait);
542         struct btrfs_fs_info *fs_info = cache->fs_info;
543         struct btrfs_caching_control *caching_ctl;
544         int ret = 0;
545
546         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
547         if (!caching_ctl)
548                 return -ENOMEM;
549
550         INIT_LIST_HEAD(&caching_ctl->list);
551         mutex_init(&caching_ctl->mutex);
552         init_waitqueue_head(&caching_ctl->wait);
553         caching_ctl->block_group = cache;
554         caching_ctl->progress = cache->key.objectid;
555         atomic_set(&caching_ctl->count, 1);
556         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
557                         caching_thread, NULL, NULL);
558
559         spin_lock(&cache->lock);
560         /*
561          * This should be a rare occasion, but this could happen I think in the
562          * case where one thread starts to load the space cache info, and then
563          * some other thread starts a transaction commit which tries to do an
564          * allocation while the other thread is still loading the space cache
565          * info.  The previous loop should have kept us from choosing this block
566          * group, but if we've moved to the state where we will wait on caching
567          * block groups we need to first check if we're doing a fast load here,
568          * so we can wait for it to finish, otherwise we could end up allocating
569          * from a block group who's cache gets evicted for one reason or
570          * another.
571          */
572         while (cache->cached == BTRFS_CACHE_FAST) {
573                 struct btrfs_caching_control *ctl;
574
575                 ctl = cache->caching_ctl;
576                 atomic_inc(&ctl->count);
577                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
578                 spin_unlock(&cache->lock);
579
580                 schedule();
581
582                 finish_wait(&ctl->wait, &wait);
583                 put_caching_control(ctl);
584                 spin_lock(&cache->lock);
585         }
586
587         if (cache->cached != BTRFS_CACHE_NO) {
588                 spin_unlock(&cache->lock);
589                 kfree(caching_ctl);
590                 return 0;
591         }
592         WARN_ON(cache->caching_ctl);
593         cache->caching_ctl = caching_ctl;
594         cache->cached = BTRFS_CACHE_FAST;
595         spin_unlock(&cache->lock);
596
597         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
598                 mutex_lock(&caching_ctl->mutex);
599                 ret = load_free_space_cache(fs_info, cache);
600
601                 spin_lock(&cache->lock);
602                 if (ret == 1) {
603                         cache->caching_ctl = NULL;
604                         cache->cached = BTRFS_CACHE_FINISHED;
605                         cache->last_byte_to_unpin = (u64)-1;
606                         caching_ctl->progress = (u64)-1;
607                 } else {
608                         if (load_cache_only) {
609                                 cache->caching_ctl = NULL;
610                                 cache->cached = BTRFS_CACHE_NO;
611                         } else {
612                                 cache->cached = BTRFS_CACHE_STARTED;
613                                 cache->has_caching_ctl = 1;
614                         }
615                 }
616                 spin_unlock(&cache->lock);
617                 mutex_unlock(&caching_ctl->mutex);
618
619                 wake_up(&caching_ctl->wait);
620                 if (ret == 1) {
621                         put_caching_control(caching_ctl);
622                         free_excluded_extents(fs_info->extent_root, cache);
623                         return 0;
624                 }
625         } else {
626                 /*
627                  * We are not going to do the fast caching, set cached to the
628                  * appropriate value and wakeup any waiters.
629                  */
630                 spin_lock(&cache->lock);
631                 if (load_cache_only) {
632                         cache->caching_ctl = NULL;
633                         cache->cached = BTRFS_CACHE_NO;
634                 } else {
635                         cache->cached = BTRFS_CACHE_STARTED;
636                         cache->has_caching_ctl = 1;
637                 }
638                 spin_unlock(&cache->lock);
639                 wake_up(&caching_ctl->wait);
640         }
641
642         if (load_cache_only) {
643                 put_caching_control(caching_ctl);
644                 return 0;
645         }
646
647         down_write(&fs_info->commit_root_sem);
648         atomic_inc(&caching_ctl->count);
649         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
650         up_write(&fs_info->commit_root_sem);
651
652         btrfs_get_block_group(cache);
653
654         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
655
656         return ret;
657 }
658
659 /*
660  * return the block group that starts at or after bytenr
661  */
662 static struct btrfs_block_group_cache *
663 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
664 {
665         struct btrfs_block_group_cache *cache;
666
667         cache = block_group_cache_tree_search(info, bytenr, 0);
668
669         return cache;
670 }
671
672 /*
673  * return the block group that contains the given bytenr
674  */
675 struct btrfs_block_group_cache *btrfs_lookup_block_group(
676                                                  struct btrfs_fs_info *info,
677                                                  u64 bytenr)
678 {
679         struct btrfs_block_group_cache *cache;
680
681         cache = block_group_cache_tree_search(info, bytenr, 1);
682
683         return cache;
684 }
685
686 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
687                                                   u64 flags)
688 {
689         struct list_head *head = &info->space_info;
690         struct btrfs_space_info *found;
691
692         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
693
694         rcu_read_lock();
695         list_for_each_entry_rcu(found, head, list) {
696                 if (found->flags & flags) {
697                         rcu_read_unlock();
698                         return found;
699                 }
700         }
701         rcu_read_unlock();
702         return NULL;
703 }
704
705 /*
706  * after adding space to the filesystem, we need to clear the full flags
707  * on all the space infos.
708  */
709 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
710 {
711         struct list_head *head = &info->space_info;
712         struct btrfs_space_info *found;
713
714         rcu_read_lock();
715         list_for_each_entry_rcu(found, head, list)
716                 found->full = 0;
717         rcu_read_unlock();
718 }
719
720 /* simple helper to search for an existing data extent at a given offset */
721 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
722 {
723         int ret;
724         struct btrfs_key key;
725         struct btrfs_path *path;
726
727         path = btrfs_alloc_path();
728         if (!path)
729                 return -ENOMEM;
730
731         key.objectid = start;
732         key.offset = len;
733         key.type = BTRFS_EXTENT_ITEM_KEY;
734         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
735                                 0, 0);
736         btrfs_free_path(path);
737         return ret;
738 }
739
740 /*
741  * helper function to lookup reference count and flags of a tree block.
742  *
743  * the head node for delayed ref is used to store the sum of all the
744  * reference count modifications queued up in the rbtree. the head
745  * node may also store the extent flags to set. This way you can check
746  * to see what the reference count and extent flags would be if all of
747  * the delayed refs are not processed.
748  */
749 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
750                              struct btrfs_root *root, u64 bytenr,
751                              u64 offset, int metadata, u64 *refs, u64 *flags)
752 {
753         struct btrfs_delayed_ref_head *head;
754         struct btrfs_delayed_ref_root *delayed_refs;
755         struct btrfs_path *path;
756         struct btrfs_extent_item *ei;
757         struct extent_buffer *leaf;
758         struct btrfs_key key;
759         u32 item_size;
760         u64 num_refs;
761         u64 extent_flags;
762         int ret;
763
764         /*
765          * If we don't have skinny metadata, don't bother doing anything
766          * different
767          */
768         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
769                 offset = root->nodesize;
770                 metadata = 0;
771         }
772
773         path = btrfs_alloc_path();
774         if (!path)
775                 return -ENOMEM;
776
777         if (!trans) {
778                 path->skip_locking = 1;
779                 path->search_commit_root = 1;
780         }
781
782 search_again:
783         key.objectid = bytenr;
784         key.offset = offset;
785         if (metadata)
786                 key.type = BTRFS_METADATA_ITEM_KEY;
787         else
788                 key.type = BTRFS_EXTENT_ITEM_KEY;
789
790         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
791                                 &key, path, 0, 0);
792         if (ret < 0)
793                 goto out_free;
794
795         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
796                 if (path->slots[0]) {
797                         path->slots[0]--;
798                         btrfs_item_key_to_cpu(path->nodes[0], &key,
799                                               path->slots[0]);
800                         if (key.objectid == bytenr &&
801                             key.type == BTRFS_EXTENT_ITEM_KEY &&
802                             key.offset == root->nodesize)
803                                 ret = 0;
804                 }
805         }
806
807         if (ret == 0) {
808                 leaf = path->nodes[0];
809                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
810                 if (item_size >= sizeof(*ei)) {
811                         ei = btrfs_item_ptr(leaf, path->slots[0],
812                                             struct btrfs_extent_item);
813                         num_refs = btrfs_extent_refs(leaf, ei);
814                         extent_flags = btrfs_extent_flags(leaf, ei);
815                 } else {
816 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
817                         struct btrfs_extent_item_v0 *ei0;
818                         BUG_ON(item_size != sizeof(*ei0));
819                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
820                                              struct btrfs_extent_item_v0);
821                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
822                         /* FIXME: this isn't correct for data */
823                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
824 #else
825                         BUG();
826 #endif
827                 }
828                 BUG_ON(num_refs == 0);
829         } else {
830                 num_refs = 0;
831                 extent_flags = 0;
832                 ret = 0;
833         }
834
835         if (!trans)
836                 goto out;
837
838         delayed_refs = &trans->transaction->delayed_refs;
839         spin_lock(&delayed_refs->lock);
840         head = btrfs_find_delayed_ref_head(trans, bytenr);
841         if (head) {
842                 if (!mutex_trylock(&head->mutex)) {
843                         atomic_inc(&head->node.refs);
844                         spin_unlock(&delayed_refs->lock);
845
846                         btrfs_release_path(path);
847
848                         /*
849                          * Mutex was contended, block until it's released and try
850                          * again
851                          */
852                         mutex_lock(&head->mutex);
853                         mutex_unlock(&head->mutex);
854                         btrfs_put_delayed_ref(&head->node);
855                         goto search_again;
856                 }
857                 spin_lock(&head->lock);
858                 if (head->extent_op && head->extent_op->update_flags)
859                         extent_flags |= head->extent_op->flags_to_set;
860                 else
861                         BUG_ON(num_refs == 0);
862
863                 num_refs += head->node.ref_mod;
864                 spin_unlock(&head->lock);
865                 mutex_unlock(&head->mutex);
866         }
867         spin_unlock(&delayed_refs->lock);
868 out:
869         WARN_ON(num_refs == 0);
870         if (refs)
871                 *refs = num_refs;
872         if (flags)
873                 *flags = extent_flags;
874 out_free:
875         btrfs_free_path(path);
876         return ret;
877 }
878
879 /*
880  * Back reference rules.  Back refs have three main goals:
881  *
882  * 1) differentiate between all holders of references to an extent so that
883  *    when a reference is dropped we can make sure it was a valid reference
884  *    before freeing the extent.
885  *
886  * 2) Provide enough information to quickly find the holders of an extent
887  *    if we notice a given block is corrupted or bad.
888  *
889  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
890  *    maintenance.  This is actually the same as #2, but with a slightly
891  *    different use case.
892  *
893  * There are two kinds of back refs. The implicit back refs is optimized
894  * for pointers in non-shared tree blocks. For a given pointer in a block,
895  * back refs of this kind provide information about the block's owner tree
896  * and the pointer's key. These information allow us to find the block by
897  * b-tree searching. The full back refs is for pointers in tree blocks not
898  * referenced by their owner trees. The location of tree block is recorded
899  * in the back refs. Actually the full back refs is generic, and can be
900  * used in all cases the implicit back refs is used. The major shortcoming
901  * of the full back refs is its overhead. Every time a tree block gets
902  * COWed, we have to update back refs entry for all pointers in it.
903  *
904  * For a newly allocated tree block, we use implicit back refs for
905  * pointers in it. This means most tree related operations only involve
906  * implicit back refs. For a tree block created in old transaction, the
907  * only way to drop a reference to it is COW it. So we can detect the
908  * event that tree block loses its owner tree's reference and do the
909  * back refs conversion.
910  *
911  * When a tree block is COW'd through a tree, there are four cases:
912  *
913  * The reference count of the block is one and the tree is the block's
914  * owner tree. Nothing to do in this case.
915  *
916  * The reference count of the block is one and the tree is not the
917  * block's owner tree. In this case, full back refs is used for pointers
918  * in the block. Remove these full back refs, add implicit back refs for
919  * every pointers in the new block.
920  *
921  * The reference count of the block is greater than one and the tree is
922  * the block's owner tree. In this case, implicit back refs is used for
923  * pointers in the block. Add full back refs for every pointers in the
924  * block, increase lower level extents' reference counts. The original
925  * implicit back refs are entailed to the new block.
926  *
927  * The reference count of the block is greater than one and the tree is
928  * not the block's owner tree. Add implicit back refs for every pointer in
929  * the new block, increase lower level extents' reference count.
930  *
931  * Back Reference Key composing:
932  *
933  * The key objectid corresponds to the first byte in the extent,
934  * The key type is used to differentiate between types of back refs.
935  * There are different meanings of the key offset for different types
936  * of back refs.
937  *
938  * File extents can be referenced by:
939  *
940  * - multiple snapshots, subvolumes, or different generations in one subvol
941  * - different files inside a single subvolume
942  * - different offsets inside a file (bookend extents in file.c)
943  *
944  * The extent ref structure for the implicit back refs has fields for:
945  *
946  * - Objectid of the subvolume root
947  * - objectid of the file holding the reference
948  * - original offset in the file
949  * - how many bookend extents
950  *
951  * The key offset for the implicit back refs is hash of the first
952  * three fields.
953  *
954  * The extent ref structure for the full back refs has field for:
955  *
956  * - number of pointers in the tree leaf
957  *
958  * The key offset for the implicit back refs is the first byte of
959  * the tree leaf
960  *
961  * When a file extent is allocated, The implicit back refs is used.
962  * the fields are filled in:
963  *
964  *     (root_key.objectid, inode objectid, offset in file, 1)
965  *
966  * When a file extent is removed file truncation, we find the
967  * corresponding implicit back refs and check the following fields:
968  *
969  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
970  *
971  * Btree extents can be referenced by:
972  *
973  * - Different subvolumes
974  *
975  * Both the implicit back refs and the full back refs for tree blocks
976  * only consist of key. The key offset for the implicit back refs is
977  * objectid of block's owner tree. The key offset for the full back refs
978  * is the first byte of parent block.
979  *
980  * When implicit back refs is used, information about the lowest key and
981  * level of the tree block are required. These information are stored in
982  * tree block info structure.
983  */
984
985 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
986 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
987                                   struct btrfs_root *root,
988                                   struct btrfs_path *path,
989                                   u64 owner, u32 extra_size)
990 {
991         struct btrfs_extent_item *item;
992         struct btrfs_extent_item_v0 *ei0;
993         struct btrfs_extent_ref_v0 *ref0;
994         struct btrfs_tree_block_info *bi;
995         struct extent_buffer *leaf;
996         struct btrfs_key key;
997         struct btrfs_key found_key;
998         u32 new_size = sizeof(*item);
999         u64 refs;
1000         int ret;
1001
1002         leaf = path->nodes[0];
1003         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1004
1005         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1006         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1007                              struct btrfs_extent_item_v0);
1008         refs = btrfs_extent_refs_v0(leaf, ei0);
1009
1010         if (owner == (u64)-1) {
1011                 while (1) {
1012                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1013                                 ret = btrfs_next_leaf(root, path);
1014                                 if (ret < 0)
1015                                         return ret;
1016                                 BUG_ON(ret > 0); /* Corruption */
1017                                 leaf = path->nodes[0];
1018                         }
1019                         btrfs_item_key_to_cpu(leaf, &found_key,
1020                                               path->slots[0]);
1021                         BUG_ON(key.objectid != found_key.objectid);
1022                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1023                                 path->slots[0]++;
1024                                 continue;
1025                         }
1026                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1027                                               struct btrfs_extent_ref_v0);
1028                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1029                         break;
1030                 }
1031         }
1032         btrfs_release_path(path);
1033
1034         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1035                 new_size += sizeof(*bi);
1036
1037         new_size -= sizeof(*ei0);
1038         ret = btrfs_search_slot(trans, root, &key, path,
1039                                 new_size + extra_size, 1);
1040         if (ret < 0)
1041                 return ret;
1042         BUG_ON(ret); /* Corruption */
1043
1044         btrfs_extend_item(root, path, new_size);
1045
1046         leaf = path->nodes[0];
1047         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1048         btrfs_set_extent_refs(leaf, item, refs);
1049         /* FIXME: get real generation */
1050         btrfs_set_extent_generation(leaf, item, 0);
1051         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1052                 btrfs_set_extent_flags(leaf, item,
1053                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1054                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1055                 bi = (struct btrfs_tree_block_info *)(item + 1);
1056                 /* FIXME: get first key of the block */
1057                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1058                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1059         } else {
1060                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1061         }
1062         btrfs_mark_buffer_dirty(leaf);
1063         return 0;
1064 }
1065 #endif
1066
1067 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1068 {
1069         u32 high_crc = ~(u32)0;
1070         u32 low_crc = ~(u32)0;
1071         __le64 lenum;
1072
1073         lenum = cpu_to_le64(root_objectid);
1074         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1075         lenum = cpu_to_le64(owner);
1076         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1077         lenum = cpu_to_le64(offset);
1078         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1079
1080         return ((u64)high_crc << 31) ^ (u64)low_crc;
1081 }
1082
1083 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1084                                      struct btrfs_extent_data_ref *ref)
1085 {
1086         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1087                                     btrfs_extent_data_ref_objectid(leaf, ref),
1088                                     btrfs_extent_data_ref_offset(leaf, ref));
1089 }
1090
1091 static int match_extent_data_ref(struct extent_buffer *leaf,
1092                                  struct btrfs_extent_data_ref *ref,
1093                                  u64 root_objectid, u64 owner, u64 offset)
1094 {
1095         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1096             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1097             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1098                 return 0;
1099         return 1;
1100 }
1101
1102 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1103                                            struct btrfs_root *root,
1104                                            struct btrfs_path *path,
1105                                            u64 bytenr, u64 parent,
1106                                            u64 root_objectid,
1107                                            u64 owner, u64 offset)
1108 {
1109         struct btrfs_key key;
1110         struct btrfs_extent_data_ref *ref;
1111         struct extent_buffer *leaf;
1112         u32 nritems;
1113         int ret;
1114         int recow;
1115         int err = -ENOENT;
1116
1117         key.objectid = bytenr;
1118         if (parent) {
1119                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1120                 key.offset = parent;
1121         } else {
1122                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1123                 key.offset = hash_extent_data_ref(root_objectid,
1124                                                   owner, offset);
1125         }
1126 again:
1127         recow = 0;
1128         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1129         if (ret < 0) {
1130                 err = ret;
1131                 goto fail;
1132         }
1133
1134         if (parent) {
1135                 if (!ret)
1136                         return 0;
1137 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1138                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1139                 btrfs_release_path(path);
1140                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1141                 if (ret < 0) {
1142                         err = ret;
1143                         goto fail;
1144                 }
1145                 if (!ret)
1146                         return 0;
1147 #endif
1148                 goto fail;
1149         }
1150
1151         leaf = path->nodes[0];
1152         nritems = btrfs_header_nritems(leaf);
1153         while (1) {
1154                 if (path->slots[0] >= nritems) {
1155                         ret = btrfs_next_leaf(root, path);
1156                         if (ret < 0)
1157                                 err = ret;
1158                         if (ret)
1159                                 goto fail;
1160
1161                         leaf = path->nodes[0];
1162                         nritems = btrfs_header_nritems(leaf);
1163                         recow = 1;
1164                 }
1165
1166                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1167                 if (key.objectid != bytenr ||
1168                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1169                         goto fail;
1170
1171                 ref = btrfs_item_ptr(leaf, path->slots[0],
1172                                      struct btrfs_extent_data_ref);
1173
1174                 if (match_extent_data_ref(leaf, ref, root_objectid,
1175                                           owner, offset)) {
1176                         if (recow) {
1177                                 btrfs_release_path(path);
1178                                 goto again;
1179                         }
1180                         err = 0;
1181                         break;
1182                 }
1183                 path->slots[0]++;
1184         }
1185 fail:
1186         return err;
1187 }
1188
1189 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1190                                            struct btrfs_root *root,
1191                                            struct btrfs_path *path,
1192                                            u64 bytenr, u64 parent,
1193                                            u64 root_objectid, u64 owner,
1194                                            u64 offset, int refs_to_add)
1195 {
1196         struct btrfs_key key;
1197         struct extent_buffer *leaf;
1198         u32 size;
1199         u32 num_refs;
1200         int ret;
1201
1202         key.objectid = bytenr;
1203         if (parent) {
1204                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1205                 key.offset = parent;
1206                 size = sizeof(struct btrfs_shared_data_ref);
1207         } else {
1208                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1209                 key.offset = hash_extent_data_ref(root_objectid,
1210                                                   owner, offset);
1211                 size = sizeof(struct btrfs_extent_data_ref);
1212         }
1213
1214         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1215         if (ret && ret != -EEXIST)
1216                 goto fail;
1217
1218         leaf = path->nodes[0];
1219         if (parent) {
1220                 struct btrfs_shared_data_ref *ref;
1221                 ref = btrfs_item_ptr(leaf, path->slots[0],
1222                                      struct btrfs_shared_data_ref);
1223                 if (ret == 0) {
1224                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1225                 } else {
1226                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1227                         num_refs += refs_to_add;
1228                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1229                 }
1230         } else {
1231                 struct btrfs_extent_data_ref *ref;
1232                 while (ret == -EEXIST) {
1233                         ref = btrfs_item_ptr(leaf, path->slots[0],
1234                                              struct btrfs_extent_data_ref);
1235                         if (match_extent_data_ref(leaf, ref, root_objectid,
1236                                                   owner, offset))
1237                                 break;
1238                         btrfs_release_path(path);
1239                         key.offset++;
1240                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1241                                                       size);
1242                         if (ret && ret != -EEXIST)
1243                                 goto fail;
1244
1245                         leaf = path->nodes[0];
1246                 }
1247                 ref = btrfs_item_ptr(leaf, path->slots[0],
1248                                      struct btrfs_extent_data_ref);
1249                 if (ret == 0) {
1250                         btrfs_set_extent_data_ref_root(leaf, ref,
1251                                                        root_objectid);
1252                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1253                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1254                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1255                 } else {
1256                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1257                         num_refs += refs_to_add;
1258                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1259                 }
1260         }
1261         btrfs_mark_buffer_dirty(leaf);
1262         ret = 0;
1263 fail:
1264         btrfs_release_path(path);
1265         return ret;
1266 }
1267
1268 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1269                                            struct btrfs_root *root,
1270                                            struct btrfs_path *path,
1271                                            int refs_to_drop, int *last_ref)
1272 {
1273         struct btrfs_key key;
1274         struct btrfs_extent_data_ref *ref1 = NULL;
1275         struct btrfs_shared_data_ref *ref2 = NULL;
1276         struct extent_buffer *leaf;
1277         u32 num_refs = 0;
1278         int ret = 0;
1279
1280         leaf = path->nodes[0];
1281         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1282
1283         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1284                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1285                                       struct btrfs_extent_data_ref);
1286                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1287         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1288                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1289                                       struct btrfs_shared_data_ref);
1290                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1291 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1292         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1293                 struct btrfs_extent_ref_v0 *ref0;
1294                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1295                                       struct btrfs_extent_ref_v0);
1296                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1297 #endif
1298         } else {
1299                 BUG();
1300         }
1301
1302         BUG_ON(num_refs < refs_to_drop);
1303         num_refs -= refs_to_drop;
1304
1305         if (num_refs == 0) {
1306                 ret = btrfs_del_item(trans, root, path);
1307                 *last_ref = 1;
1308         } else {
1309                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1310                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1311                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1312                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1313 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1314                 else {
1315                         struct btrfs_extent_ref_v0 *ref0;
1316                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1317                                         struct btrfs_extent_ref_v0);
1318                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1319                 }
1320 #endif
1321                 btrfs_mark_buffer_dirty(leaf);
1322         }
1323         return ret;
1324 }
1325
1326 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1327                                           struct btrfs_extent_inline_ref *iref)
1328 {
1329         struct btrfs_key key;
1330         struct extent_buffer *leaf;
1331         struct btrfs_extent_data_ref *ref1;
1332         struct btrfs_shared_data_ref *ref2;
1333         u32 num_refs = 0;
1334
1335         leaf = path->nodes[0];
1336         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1337         if (iref) {
1338                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1339                     BTRFS_EXTENT_DATA_REF_KEY) {
1340                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1341                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1342                 } else {
1343                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1344                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1345                 }
1346         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1347                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1348                                       struct btrfs_extent_data_ref);
1349                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1350         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1351                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1352                                       struct btrfs_shared_data_ref);
1353                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1354 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1355         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1356                 struct btrfs_extent_ref_v0 *ref0;
1357                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1358                                       struct btrfs_extent_ref_v0);
1359                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1360 #endif
1361         } else {
1362                 WARN_ON(1);
1363         }
1364         return num_refs;
1365 }
1366
1367 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1368                                           struct btrfs_root *root,
1369                                           struct btrfs_path *path,
1370                                           u64 bytenr, u64 parent,
1371                                           u64 root_objectid)
1372 {
1373         struct btrfs_key key;
1374         int ret;
1375
1376         key.objectid = bytenr;
1377         if (parent) {
1378                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1379                 key.offset = parent;
1380         } else {
1381                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1382                 key.offset = root_objectid;
1383         }
1384
1385         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1386         if (ret > 0)
1387                 ret = -ENOENT;
1388 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1389         if (ret == -ENOENT && parent) {
1390                 btrfs_release_path(path);
1391                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1392                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1393                 if (ret > 0)
1394                         ret = -ENOENT;
1395         }
1396 #endif
1397         return ret;
1398 }
1399
1400 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1401                                           struct btrfs_root *root,
1402                                           struct btrfs_path *path,
1403                                           u64 bytenr, u64 parent,
1404                                           u64 root_objectid)
1405 {
1406         struct btrfs_key key;
1407         int ret;
1408
1409         key.objectid = bytenr;
1410         if (parent) {
1411                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1412                 key.offset = parent;
1413         } else {
1414                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1415                 key.offset = root_objectid;
1416         }
1417
1418         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1419         btrfs_release_path(path);
1420         return ret;
1421 }
1422
1423 static inline int extent_ref_type(u64 parent, u64 owner)
1424 {
1425         int type;
1426         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1427                 if (parent > 0)
1428                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1429                 else
1430                         type = BTRFS_TREE_BLOCK_REF_KEY;
1431         } else {
1432                 if (parent > 0)
1433                         type = BTRFS_SHARED_DATA_REF_KEY;
1434                 else
1435                         type = BTRFS_EXTENT_DATA_REF_KEY;
1436         }
1437         return type;
1438 }
1439
1440 static int find_next_key(struct btrfs_path *path, int level,
1441                          struct btrfs_key *key)
1442
1443 {
1444         for (; level < BTRFS_MAX_LEVEL; level++) {
1445                 if (!path->nodes[level])
1446                         break;
1447                 if (path->slots[level] + 1 >=
1448                     btrfs_header_nritems(path->nodes[level]))
1449                         continue;
1450                 if (level == 0)
1451                         btrfs_item_key_to_cpu(path->nodes[level], key,
1452                                               path->slots[level] + 1);
1453                 else
1454                         btrfs_node_key_to_cpu(path->nodes[level], key,
1455                                               path->slots[level] + 1);
1456                 return 0;
1457         }
1458         return 1;
1459 }
1460
1461 /*
1462  * look for inline back ref. if back ref is found, *ref_ret is set
1463  * to the address of inline back ref, and 0 is returned.
1464  *
1465  * if back ref isn't found, *ref_ret is set to the address where it
1466  * should be inserted, and -ENOENT is returned.
1467  *
1468  * if insert is true and there are too many inline back refs, the path
1469  * points to the extent item, and -EAGAIN is returned.
1470  *
1471  * NOTE: inline back refs are ordered in the same way that back ref
1472  *       items in the tree are ordered.
1473  */
1474 static noinline_for_stack
1475 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1476                                  struct btrfs_root *root,
1477                                  struct btrfs_path *path,
1478                                  struct btrfs_extent_inline_ref **ref_ret,
1479                                  u64 bytenr, u64 num_bytes,
1480                                  u64 parent, u64 root_objectid,
1481                                  u64 owner, u64 offset, int insert)
1482 {
1483         struct btrfs_key key;
1484         struct extent_buffer *leaf;
1485         struct btrfs_extent_item *ei;
1486         struct btrfs_extent_inline_ref *iref;
1487         u64 flags;
1488         u64 item_size;
1489         unsigned long ptr;
1490         unsigned long end;
1491         int extra_size;
1492         int type;
1493         int want;
1494         int ret;
1495         int err = 0;
1496         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1497                                                  SKINNY_METADATA);
1498
1499         key.objectid = bytenr;
1500         key.type = BTRFS_EXTENT_ITEM_KEY;
1501         key.offset = num_bytes;
1502
1503         want = extent_ref_type(parent, owner);
1504         if (insert) {
1505                 extra_size = btrfs_extent_inline_ref_size(want);
1506                 path->keep_locks = 1;
1507         } else
1508                 extra_size = -1;
1509
1510         /*
1511          * Owner is our parent level, so we can just add one to get the level
1512          * for the block we are interested in.
1513          */
1514         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1515                 key.type = BTRFS_METADATA_ITEM_KEY;
1516                 key.offset = owner;
1517         }
1518
1519 again:
1520         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1521         if (ret < 0) {
1522                 err = ret;
1523                 goto out;
1524         }
1525
1526         /*
1527          * We may be a newly converted file system which still has the old fat
1528          * extent entries for metadata, so try and see if we have one of those.
1529          */
1530         if (ret > 0 && skinny_metadata) {
1531                 skinny_metadata = false;
1532                 if (path->slots[0]) {
1533                         path->slots[0]--;
1534                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1535                                               path->slots[0]);
1536                         if (key.objectid == bytenr &&
1537                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1538                             key.offset == num_bytes)
1539                                 ret = 0;
1540                 }
1541                 if (ret) {
1542                         key.objectid = bytenr;
1543                         key.type = BTRFS_EXTENT_ITEM_KEY;
1544                         key.offset = num_bytes;
1545                         btrfs_release_path(path);
1546                         goto again;
1547                 }
1548         }
1549
1550         if (ret && !insert) {
1551                 err = -ENOENT;
1552                 goto out;
1553         } else if (WARN_ON(ret)) {
1554                 err = -EIO;
1555                 goto out;
1556         }
1557
1558         leaf = path->nodes[0];
1559         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1560 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1561         if (item_size < sizeof(*ei)) {
1562                 if (!insert) {
1563                         err = -ENOENT;
1564                         goto out;
1565                 }
1566                 ret = convert_extent_item_v0(trans, root, path, owner,
1567                                              extra_size);
1568                 if (ret < 0) {
1569                         err = ret;
1570                         goto out;
1571                 }
1572                 leaf = path->nodes[0];
1573                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1574         }
1575 #endif
1576         BUG_ON(item_size < sizeof(*ei));
1577
1578         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1579         flags = btrfs_extent_flags(leaf, ei);
1580
1581         ptr = (unsigned long)(ei + 1);
1582         end = (unsigned long)ei + item_size;
1583
1584         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1585                 ptr += sizeof(struct btrfs_tree_block_info);
1586                 BUG_ON(ptr > end);
1587         }
1588
1589         err = -ENOENT;
1590         while (1) {
1591                 if (ptr >= end) {
1592                         WARN_ON(ptr > end);
1593                         break;
1594                 }
1595                 iref = (struct btrfs_extent_inline_ref *)ptr;
1596                 type = btrfs_extent_inline_ref_type(leaf, iref);
1597                 if (want < type)
1598                         break;
1599                 if (want > type) {
1600                         ptr += btrfs_extent_inline_ref_size(type);
1601                         continue;
1602                 }
1603
1604                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1605                         struct btrfs_extent_data_ref *dref;
1606                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1607                         if (match_extent_data_ref(leaf, dref, root_objectid,
1608                                                   owner, offset)) {
1609                                 err = 0;
1610                                 break;
1611                         }
1612                         if (hash_extent_data_ref_item(leaf, dref) <
1613                             hash_extent_data_ref(root_objectid, owner, offset))
1614                                 break;
1615                 } else {
1616                         u64 ref_offset;
1617                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1618                         if (parent > 0) {
1619                                 if (parent == ref_offset) {
1620                                         err = 0;
1621                                         break;
1622                                 }
1623                                 if (ref_offset < parent)
1624                                         break;
1625                         } else {
1626                                 if (root_objectid == ref_offset) {
1627                                         err = 0;
1628                                         break;
1629                                 }
1630                                 if (ref_offset < root_objectid)
1631                                         break;
1632                         }
1633                 }
1634                 ptr += btrfs_extent_inline_ref_size(type);
1635         }
1636         if (err == -ENOENT && insert) {
1637                 if (item_size + extra_size >=
1638                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1639                         err = -EAGAIN;
1640                         goto out;
1641                 }
1642                 /*
1643                  * To add new inline back ref, we have to make sure
1644                  * there is no corresponding back ref item.
1645                  * For simplicity, we just do not add new inline back
1646                  * ref if there is any kind of item for this block
1647                  */
1648                 if (find_next_key(path, 0, &key) == 0 &&
1649                     key.objectid == bytenr &&
1650                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1651                         err = -EAGAIN;
1652                         goto out;
1653                 }
1654         }
1655         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1656 out:
1657         if (insert) {
1658                 path->keep_locks = 0;
1659                 btrfs_unlock_up_safe(path, 1);
1660         }
1661         return err;
1662 }
1663
1664 /*
1665  * helper to add new inline back ref
1666  */
1667 static noinline_for_stack
1668 void setup_inline_extent_backref(struct btrfs_root *root,
1669                                  struct btrfs_path *path,
1670                                  struct btrfs_extent_inline_ref *iref,
1671                                  u64 parent, u64 root_objectid,
1672                                  u64 owner, u64 offset, int refs_to_add,
1673                                  struct btrfs_delayed_extent_op *extent_op)
1674 {
1675         struct extent_buffer *leaf;
1676         struct btrfs_extent_item *ei;
1677         unsigned long ptr;
1678         unsigned long end;
1679         unsigned long item_offset;
1680         u64 refs;
1681         int size;
1682         int type;
1683
1684         leaf = path->nodes[0];
1685         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1686         item_offset = (unsigned long)iref - (unsigned long)ei;
1687
1688         type = extent_ref_type(parent, owner);
1689         size = btrfs_extent_inline_ref_size(type);
1690
1691         btrfs_extend_item(root, path, size);
1692
1693         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1694         refs = btrfs_extent_refs(leaf, ei);
1695         refs += refs_to_add;
1696         btrfs_set_extent_refs(leaf, ei, refs);
1697         if (extent_op)
1698                 __run_delayed_extent_op(extent_op, leaf, ei);
1699
1700         ptr = (unsigned long)ei + item_offset;
1701         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1702         if (ptr < end - size)
1703                 memmove_extent_buffer(leaf, ptr + size, ptr,
1704                                       end - size - ptr);
1705
1706         iref = (struct btrfs_extent_inline_ref *)ptr;
1707         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1708         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1709                 struct btrfs_extent_data_ref *dref;
1710                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1711                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1712                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1713                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1714                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1715         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1716                 struct btrfs_shared_data_ref *sref;
1717                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1718                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1719                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1720         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1724         }
1725         btrfs_mark_buffer_dirty(leaf);
1726 }
1727
1728 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1729                                  struct btrfs_root *root,
1730                                  struct btrfs_path *path,
1731                                  struct btrfs_extent_inline_ref **ref_ret,
1732                                  u64 bytenr, u64 num_bytes, u64 parent,
1733                                  u64 root_objectid, u64 owner, u64 offset)
1734 {
1735         int ret;
1736
1737         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1738                                            bytenr, num_bytes, parent,
1739                                            root_objectid, owner, offset, 0);
1740         if (ret != -ENOENT)
1741                 return ret;
1742
1743         btrfs_release_path(path);
1744         *ref_ret = NULL;
1745
1746         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1747                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1748                                             root_objectid);
1749         } else {
1750                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1751                                              root_objectid, owner, offset);
1752         }
1753         return ret;
1754 }
1755
1756 /*
1757  * helper to update/remove inline back ref
1758  */
1759 static noinline_for_stack
1760 void update_inline_extent_backref(struct btrfs_root *root,
1761                                   struct btrfs_path *path,
1762                                   struct btrfs_extent_inline_ref *iref,
1763                                   int refs_to_mod,
1764                                   struct btrfs_delayed_extent_op *extent_op,
1765                                   int *last_ref)
1766 {
1767         struct extent_buffer *leaf;
1768         struct btrfs_extent_item *ei;
1769         struct btrfs_extent_data_ref *dref = NULL;
1770         struct btrfs_shared_data_ref *sref = NULL;
1771         unsigned long ptr;
1772         unsigned long end;
1773         u32 item_size;
1774         int size;
1775         int type;
1776         u64 refs;
1777
1778         leaf = path->nodes[0];
1779         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1780         refs = btrfs_extent_refs(leaf, ei);
1781         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1782         refs += refs_to_mod;
1783         btrfs_set_extent_refs(leaf, ei, refs);
1784         if (extent_op)
1785                 __run_delayed_extent_op(extent_op, leaf, ei);
1786
1787         type = btrfs_extent_inline_ref_type(leaf, iref);
1788
1789         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1790                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1791                 refs = btrfs_extent_data_ref_count(leaf, dref);
1792         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1793                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1794                 refs = btrfs_shared_data_ref_count(leaf, sref);
1795         } else {
1796                 refs = 1;
1797                 BUG_ON(refs_to_mod != -1);
1798         }
1799
1800         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1801         refs += refs_to_mod;
1802
1803         if (refs > 0) {
1804                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1805                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1806                 else
1807                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1808         } else {
1809                 *last_ref = 1;
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op, NULL);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data, int *last_ref)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL, last_ref);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1884                                              last_ref);
1885         } else {
1886                 *last_ref = 1;
1887                 ret = btrfs_del_item(trans, root, path);
1888         }
1889         return ret;
1890 }
1891
1892 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1893 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1894                                u64 *discarded_bytes)
1895 {
1896         int j, ret = 0;
1897         u64 bytes_left, end;
1898         u64 aligned_start = ALIGN(start, 1 << 9);
1899
1900         if (WARN_ON(start != aligned_start)) {
1901                 len -= aligned_start - start;
1902                 len = round_down(len, 1 << 9);
1903                 start = aligned_start;
1904         }
1905
1906         *discarded_bytes = 0;
1907
1908         if (!len)
1909                 return 0;
1910
1911         end = start + len;
1912         bytes_left = len;
1913
1914         /* Skip any superblocks on this device. */
1915         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1916                 u64 sb_start = btrfs_sb_offset(j);
1917                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1918                 u64 size = sb_start - start;
1919
1920                 if (!in_range(sb_start, start, bytes_left) &&
1921                     !in_range(sb_end, start, bytes_left) &&
1922                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1923                         continue;
1924
1925                 /*
1926                  * Superblock spans beginning of range.  Adjust start and
1927                  * try again.
1928                  */
1929                 if (sb_start <= start) {
1930                         start += sb_end - start;
1931                         if (start > end) {
1932                                 bytes_left = 0;
1933                                 break;
1934                         }
1935                         bytes_left = end - start;
1936                         continue;
1937                 }
1938
1939                 if (size) {
1940                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1941                                                    GFP_NOFS, 0);
1942                         if (!ret)
1943                                 *discarded_bytes += size;
1944                         else if (ret != -EOPNOTSUPP)
1945                                 return ret;
1946                 }
1947
1948                 start = sb_end;
1949                 if (start > end) {
1950                         bytes_left = 0;
1951                         break;
1952                 }
1953                 bytes_left = end - start;
1954         }
1955
1956         if (bytes_left) {
1957                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1958                                            GFP_NOFS, 0);
1959                 if (!ret)
1960                         *discarded_bytes += bytes_left;
1961         }
1962         return ret;
1963 }
1964
1965 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1966                          u64 num_bytes, u64 *actual_bytes)
1967 {
1968         int ret;
1969         u64 discarded_bytes = 0;
1970         struct btrfs_bio *bbio = NULL;
1971
1972
1973         /* Tell the block device(s) that the sectors can be discarded */
1974         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1975                               bytenr, &num_bytes, &bbio, 0);
1976         /* Error condition is -ENOMEM */
1977         if (!ret) {
1978                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1979                 int i;
1980
1981
1982                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1983                         u64 bytes;
1984                         if (!stripe->dev->can_discard)
1985                                 continue;
1986
1987                         ret = btrfs_issue_discard(stripe->dev->bdev,
1988                                                   stripe->physical,
1989                                                   stripe->length,
1990                                                   &bytes);
1991                         if (!ret)
1992                                 discarded_bytes += bytes;
1993                         else if (ret != -EOPNOTSUPP)
1994                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1995
1996                         /*
1997                          * Just in case we get back EOPNOTSUPP for some reason,
1998                          * just ignore the return value so we don't screw up
1999                          * people calling discard_extent.
2000                          */
2001                         ret = 0;
2002                 }
2003                 btrfs_put_bbio(bbio);
2004         }
2005
2006         if (actual_bytes)
2007                 *actual_bytes = discarded_bytes;
2008
2009
2010         if (ret == -EOPNOTSUPP)
2011                 ret = 0;
2012         return ret;
2013 }
2014
2015 /* Can return -ENOMEM */
2016 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2017                          struct btrfs_root *root,
2018                          u64 bytenr, u64 num_bytes, u64 parent,
2019                          u64 root_objectid, u64 owner, u64 offset,
2020                          int no_quota)
2021 {
2022         int ret;
2023         struct btrfs_fs_info *fs_info = root->fs_info;
2024
2025         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2026                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2027
2028         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2029                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2030                                         num_bytes,
2031                                         parent, root_objectid, (int)owner,
2032                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2033         } else {
2034                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2035                                         num_bytes,
2036                                         parent, root_objectid, owner, offset,
2037                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2038         }
2039         return ret;
2040 }
2041
2042 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2043                                   struct btrfs_root *root,
2044                                   struct btrfs_delayed_ref_node *node,
2045                                   u64 parent, u64 root_objectid,
2046                                   u64 owner, u64 offset, int refs_to_add,
2047                                   struct btrfs_delayed_extent_op *extent_op)
2048 {
2049         struct btrfs_fs_info *fs_info = root->fs_info;
2050         struct btrfs_path *path;
2051         struct extent_buffer *leaf;
2052         struct btrfs_extent_item *item;
2053         struct btrfs_key key;
2054         u64 bytenr = node->bytenr;
2055         u64 num_bytes = node->num_bytes;
2056         u64 refs;
2057         int ret;
2058         int no_quota = node->no_quota;
2059
2060         path = btrfs_alloc_path();
2061         if (!path)
2062                 return -ENOMEM;
2063
2064         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2065                 no_quota = 1;
2066
2067         path->reada = 1;
2068         path->leave_spinning = 1;
2069         /* this will setup the path even if it fails to insert the back ref */
2070         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2071                                            bytenr, num_bytes, parent,
2072                                            root_objectid, owner, offset,
2073                                            refs_to_add, extent_op);
2074         if ((ret < 0 && ret != -EAGAIN) || !ret)
2075                 goto out;
2076
2077         /*
2078          * Ok we had -EAGAIN which means we didn't have space to insert and
2079          * inline extent ref, so just update the reference count and add a
2080          * normal backref.
2081          */
2082         leaf = path->nodes[0];
2083         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2084         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2085         refs = btrfs_extent_refs(leaf, item);
2086         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2087         if (extent_op)
2088                 __run_delayed_extent_op(extent_op, leaf, item);
2089
2090         btrfs_mark_buffer_dirty(leaf);
2091         btrfs_release_path(path);
2092
2093         path->reada = 1;
2094         path->leave_spinning = 1;
2095         /* now insert the actual backref */
2096         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2097                                     path, bytenr, parent, root_objectid,
2098                                     owner, offset, refs_to_add);
2099         if (ret)
2100                 btrfs_abort_transaction(trans, root, ret);
2101 out:
2102         btrfs_free_path(path);
2103         return ret;
2104 }
2105
2106 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2107                                 struct btrfs_root *root,
2108                                 struct btrfs_delayed_ref_node *node,
2109                                 struct btrfs_delayed_extent_op *extent_op,
2110                                 int insert_reserved)
2111 {
2112         int ret = 0;
2113         struct btrfs_delayed_data_ref *ref;
2114         struct btrfs_key ins;
2115         u64 parent = 0;
2116         u64 ref_root = 0;
2117         u64 flags = 0;
2118
2119         ins.objectid = node->bytenr;
2120         ins.offset = node->num_bytes;
2121         ins.type = BTRFS_EXTENT_ITEM_KEY;
2122
2123         ref = btrfs_delayed_node_to_data_ref(node);
2124         trace_run_delayed_data_ref(node, ref, node->action);
2125
2126         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2127                 parent = ref->parent;
2128         ref_root = ref->root;
2129
2130         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2131                 if (extent_op)
2132                         flags |= extent_op->flags_to_set;
2133                 ret = alloc_reserved_file_extent(trans, root,
2134                                                  parent, ref_root, flags,
2135                                                  ref->objectid, ref->offset,
2136                                                  &ins, node->ref_mod);
2137         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2138                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2139                                              ref_root, ref->objectid,
2140                                              ref->offset, node->ref_mod,
2141                                              extent_op);
2142         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2143                 ret = __btrfs_free_extent(trans, root, node, parent,
2144                                           ref_root, ref->objectid,
2145                                           ref->offset, node->ref_mod,
2146                                           extent_op);
2147         } else {
2148                 BUG();
2149         }
2150         return ret;
2151 }
2152
2153 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2154                                     struct extent_buffer *leaf,
2155                                     struct btrfs_extent_item *ei)
2156 {
2157         u64 flags = btrfs_extent_flags(leaf, ei);
2158         if (extent_op->update_flags) {
2159                 flags |= extent_op->flags_to_set;
2160                 btrfs_set_extent_flags(leaf, ei, flags);
2161         }
2162
2163         if (extent_op->update_key) {
2164                 struct btrfs_tree_block_info *bi;
2165                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2166                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2167                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2168         }
2169 }
2170
2171 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2172                                  struct btrfs_root *root,
2173                                  struct btrfs_delayed_ref_node *node,
2174                                  struct btrfs_delayed_extent_op *extent_op)
2175 {
2176         struct btrfs_key key;
2177         struct btrfs_path *path;
2178         struct btrfs_extent_item *ei;
2179         struct extent_buffer *leaf;
2180         u32 item_size;
2181         int ret;
2182         int err = 0;
2183         int metadata = !extent_op->is_data;
2184
2185         if (trans->aborted)
2186                 return 0;
2187
2188         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2189                 metadata = 0;
2190
2191         path = btrfs_alloc_path();
2192         if (!path)
2193                 return -ENOMEM;
2194
2195         key.objectid = node->bytenr;
2196
2197         if (metadata) {
2198                 key.type = BTRFS_METADATA_ITEM_KEY;
2199                 key.offset = extent_op->level;
2200         } else {
2201                 key.type = BTRFS_EXTENT_ITEM_KEY;
2202                 key.offset = node->num_bytes;
2203         }
2204
2205 again:
2206         path->reada = 1;
2207         path->leave_spinning = 1;
2208         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2209                                 path, 0, 1);
2210         if (ret < 0) {
2211                 err = ret;
2212                 goto out;
2213         }
2214         if (ret > 0) {
2215                 if (metadata) {
2216                         if (path->slots[0] > 0) {
2217                                 path->slots[0]--;
2218                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2219                                                       path->slots[0]);
2220                                 if (key.objectid == node->bytenr &&
2221                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2222                                     key.offset == node->num_bytes)
2223                                         ret = 0;
2224                         }
2225                         if (ret > 0) {
2226                                 btrfs_release_path(path);
2227                                 metadata = 0;
2228
2229                                 key.objectid = node->bytenr;
2230                                 key.offset = node->num_bytes;
2231                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2232                                 goto again;
2233                         }
2234                 } else {
2235                         err = -EIO;
2236                         goto out;
2237                 }
2238         }
2239
2240         leaf = path->nodes[0];
2241         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2242 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2243         if (item_size < sizeof(*ei)) {
2244                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2245                                              path, (u64)-1, 0);
2246                 if (ret < 0) {
2247                         err = ret;
2248                         goto out;
2249                 }
2250                 leaf = path->nodes[0];
2251                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2252         }
2253 #endif
2254         BUG_ON(item_size < sizeof(*ei));
2255         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2256         __run_delayed_extent_op(extent_op, leaf, ei);
2257
2258         btrfs_mark_buffer_dirty(leaf);
2259 out:
2260         btrfs_free_path(path);
2261         return err;
2262 }
2263
2264 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2265                                 struct btrfs_root *root,
2266                                 struct btrfs_delayed_ref_node *node,
2267                                 struct btrfs_delayed_extent_op *extent_op,
2268                                 int insert_reserved)
2269 {
2270         int ret = 0;
2271         struct btrfs_delayed_tree_ref *ref;
2272         struct btrfs_key ins;
2273         u64 parent = 0;
2274         u64 ref_root = 0;
2275         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2276                                                  SKINNY_METADATA);
2277
2278         ref = btrfs_delayed_node_to_tree_ref(node);
2279         trace_run_delayed_tree_ref(node, ref, node->action);
2280
2281         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2282                 parent = ref->parent;
2283         ref_root = ref->root;
2284
2285         ins.objectid = node->bytenr;
2286         if (skinny_metadata) {
2287                 ins.offset = ref->level;
2288                 ins.type = BTRFS_METADATA_ITEM_KEY;
2289         } else {
2290                 ins.offset = node->num_bytes;
2291                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2292         }
2293
2294         BUG_ON(node->ref_mod != 1);
2295         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2296                 BUG_ON(!extent_op || !extent_op->update_flags);
2297                 ret = alloc_reserved_tree_block(trans, root,
2298                                                 parent, ref_root,
2299                                                 extent_op->flags_to_set,
2300                                                 &extent_op->key,
2301                                                 ref->level, &ins,
2302                                                 node->no_quota);
2303         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2304                 ret = __btrfs_inc_extent_ref(trans, root, node,
2305                                              parent, ref_root,
2306                                              ref->level, 0, 1,
2307                                              extent_op);
2308         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2309                 ret = __btrfs_free_extent(trans, root, node,
2310                                           parent, ref_root,
2311                                           ref->level, 0, 1, extent_op);
2312         } else {
2313                 BUG();
2314         }
2315         return ret;
2316 }
2317
2318 /* helper function to actually process a single delayed ref entry */
2319 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2320                                struct btrfs_root *root,
2321                                struct btrfs_delayed_ref_node *node,
2322                                struct btrfs_delayed_extent_op *extent_op,
2323                                int insert_reserved)
2324 {
2325         int ret = 0;
2326
2327         if (trans->aborted) {
2328                 if (insert_reserved)
2329                         btrfs_pin_extent(root, node->bytenr,
2330                                          node->num_bytes, 1);
2331                 return 0;
2332         }
2333
2334         if (btrfs_delayed_ref_is_head(node)) {
2335                 struct btrfs_delayed_ref_head *head;
2336                 /*
2337                  * we've hit the end of the chain and we were supposed
2338                  * to insert this extent into the tree.  But, it got
2339                  * deleted before we ever needed to insert it, so all
2340                  * we have to do is clean up the accounting
2341                  */
2342                 BUG_ON(extent_op);
2343                 head = btrfs_delayed_node_to_head(node);
2344                 trace_run_delayed_ref_head(node, head, node->action);
2345
2346                 if (insert_reserved) {
2347                         btrfs_pin_extent(root, node->bytenr,
2348                                          node->num_bytes, 1);
2349                         if (head->is_data) {
2350                                 ret = btrfs_del_csums(trans, root,
2351                                                       node->bytenr,
2352                                                       node->num_bytes);
2353                         }
2354                 }
2355                 return ret;
2356         }
2357
2358         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2359             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2360                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2361                                            insert_reserved);
2362         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2363                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2364                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2365                                            insert_reserved);
2366         else
2367                 BUG();
2368         return ret;
2369 }
2370
2371 static inline struct btrfs_delayed_ref_node *
2372 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2373 {
2374         struct btrfs_delayed_ref_node *ref;
2375
2376         if (list_empty(&head->ref_list))
2377                 return NULL;
2378
2379         /*
2380          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2381          * This is to prevent a ref count from going down to zero, which deletes
2382          * the extent item from the extent tree, when there still are references
2383          * to add, which would fail because they would not find the extent item.
2384          */
2385         list_for_each_entry(ref, &head->ref_list, list) {
2386                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2387                         return ref;
2388         }
2389
2390         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2391                           list);
2392 }
2393
2394 /*
2395  * Returns 0 on success or if called with an already aborted transaction.
2396  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2397  */
2398 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2399                                              struct btrfs_root *root,
2400                                              unsigned long nr)
2401 {
2402         struct btrfs_delayed_ref_root *delayed_refs;
2403         struct btrfs_delayed_ref_node *ref;
2404         struct btrfs_delayed_ref_head *locked_ref = NULL;
2405         struct btrfs_delayed_extent_op *extent_op;
2406         struct btrfs_fs_info *fs_info = root->fs_info;
2407         ktime_t start = ktime_get();
2408         int ret;
2409         unsigned long count = 0;
2410         unsigned long actual_count = 0;
2411         int must_insert_reserved = 0;
2412
2413         delayed_refs = &trans->transaction->delayed_refs;
2414         while (1) {
2415                 if (!locked_ref) {
2416                         if (count >= nr)
2417                                 break;
2418
2419                         spin_lock(&delayed_refs->lock);
2420                         locked_ref = btrfs_select_ref_head(trans);
2421                         if (!locked_ref) {
2422                                 spin_unlock(&delayed_refs->lock);
2423                                 break;
2424                         }
2425
2426                         /* grab the lock that says we are going to process
2427                          * all the refs for this head */
2428                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2429                         spin_unlock(&delayed_refs->lock);
2430                         /*
2431                          * we may have dropped the spin lock to get the head
2432                          * mutex lock, and that might have given someone else
2433                          * time to free the head.  If that's true, it has been
2434                          * removed from our list and we can move on.
2435                          */
2436                         if (ret == -EAGAIN) {
2437                                 locked_ref = NULL;
2438                                 count++;
2439                                 continue;
2440                         }
2441                 }
2442
2443                 spin_lock(&locked_ref->lock);
2444
2445                 /*
2446                  * locked_ref is the head node, so we have to go one
2447                  * node back for any delayed ref updates
2448                  */
2449                 ref = select_delayed_ref(locked_ref);
2450
2451                 if (ref && ref->seq &&
2452                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2453                         spin_unlock(&locked_ref->lock);
2454                         btrfs_delayed_ref_unlock(locked_ref);
2455                         spin_lock(&delayed_refs->lock);
2456                         locked_ref->processing = 0;
2457                         delayed_refs->num_heads_ready++;
2458                         spin_unlock(&delayed_refs->lock);
2459                         locked_ref = NULL;
2460                         cond_resched();
2461                         count++;
2462                         continue;
2463                 }
2464
2465                 /*
2466                  * record the must insert reserved flag before we
2467                  * drop the spin lock.
2468                  */
2469                 must_insert_reserved = locked_ref->must_insert_reserved;
2470                 locked_ref->must_insert_reserved = 0;
2471
2472                 extent_op = locked_ref->extent_op;
2473                 locked_ref->extent_op = NULL;
2474
2475                 if (!ref) {
2476
2477
2478                         /* All delayed refs have been processed, Go ahead
2479                          * and send the head node to run_one_delayed_ref,
2480                          * so that any accounting fixes can happen
2481                          */
2482                         ref = &locked_ref->node;
2483
2484                         if (extent_op && must_insert_reserved) {
2485                                 btrfs_free_delayed_extent_op(extent_op);
2486                                 extent_op = NULL;
2487                         }
2488
2489                         if (extent_op) {
2490                                 spin_unlock(&locked_ref->lock);
2491                                 ret = run_delayed_extent_op(trans, root,
2492                                                             ref, extent_op);
2493                                 btrfs_free_delayed_extent_op(extent_op);
2494
2495                                 if (ret) {
2496                                         /*
2497                                          * Need to reset must_insert_reserved if
2498                                          * there was an error so the abort stuff
2499                                          * can cleanup the reserved space
2500                                          * properly.
2501                                          */
2502                                         if (must_insert_reserved)
2503                                                 locked_ref->must_insert_reserved = 1;
2504                                         locked_ref->processing = 0;
2505                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2506                                         btrfs_delayed_ref_unlock(locked_ref);
2507                                         return ret;
2508                                 }
2509                                 continue;
2510                         }
2511
2512                         /*
2513                          * Need to drop our head ref lock and re-aqcuire the
2514                          * delayed ref lock and then re-check to make sure
2515                          * nobody got added.
2516                          */
2517                         spin_unlock(&locked_ref->lock);
2518                         spin_lock(&delayed_refs->lock);
2519                         spin_lock(&locked_ref->lock);
2520                         if (!list_empty(&locked_ref->ref_list) ||
2521                             locked_ref->extent_op) {
2522                                 spin_unlock(&locked_ref->lock);
2523                                 spin_unlock(&delayed_refs->lock);
2524                                 continue;
2525                         }
2526                         ref->in_tree = 0;
2527                         delayed_refs->num_heads--;
2528                         rb_erase(&locked_ref->href_node,
2529                                  &delayed_refs->href_root);
2530                         spin_unlock(&delayed_refs->lock);
2531                 } else {
2532                         actual_count++;
2533                         ref->in_tree = 0;
2534                         list_del(&ref->list);
2535                 }
2536                 atomic_dec(&delayed_refs->num_entries);
2537
2538                 if (!btrfs_delayed_ref_is_head(ref)) {
2539                         /*
2540                          * when we play the delayed ref, also correct the
2541                          * ref_mod on head
2542                          */
2543                         switch (ref->action) {
2544                         case BTRFS_ADD_DELAYED_REF:
2545                         case BTRFS_ADD_DELAYED_EXTENT:
2546                                 locked_ref->node.ref_mod -= ref->ref_mod;
2547                                 break;
2548                         case BTRFS_DROP_DELAYED_REF:
2549                                 locked_ref->node.ref_mod += ref->ref_mod;
2550                                 break;
2551                         default:
2552                                 WARN_ON(1);
2553                         }
2554                 }
2555                 spin_unlock(&locked_ref->lock);
2556
2557                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2558                                           must_insert_reserved);
2559
2560                 btrfs_free_delayed_extent_op(extent_op);
2561                 if (ret) {
2562                         locked_ref->processing = 0;
2563                         btrfs_delayed_ref_unlock(locked_ref);
2564                         btrfs_put_delayed_ref(ref);
2565                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2566                         return ret;
2567                 }
2568
2569                 /*
2570                  * If this node is a head, that means all the refs in this head
2571                  * have been dealt with, and we will pick the next head to deal
2572                  * with, so we must unlock the head and drop it from the cluster
2573                  * list before we release it.
2574                  */
2575                 if (btrfs_delayed_ref_is_head(ref)) {
2576                         if (locked_ref->is_data &&
2577                             locked_ref->total_ref_mod < 0) {
2578                                 spin_lock(&delayed_refs->lock);
2579                                 delayed_refs->pending_csums -= ref->num_bytes;
2580                                 spin_unlock(&delayed_refs->lock);
2581                         }
2582                         btrfs_delayed_ref_unlock(locked_ref);
2583                         locked_ref = NULL;
2584                 }
2585                 btrfs_put_delayed_ref(ref);
2586                 count++;
2587                 cond_resched();
2588         }
2589
2590         /*
2591          * We don't want to include ref heads since we can have empty ref heads
2592          * and those will drastically skew our runtime down since we just do
2593          * accounting, no actual extent tree updates.
2594          */
2595         if (actual_count > 0) {
2596                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2597                 u64 avg;
2598
2599                 /*
2600                  * We weigh the current average higher than our current runtime
2601                  * to avoid large swings in the average.
2602                  */
2603                 spin_lock(&delayed_refs->lock);
2604                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2605                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2606                 spin_unlock(&delayed_refs->lock);
2607         }
2608         return 0;
2609 }
2610
2611 #ifdef SCRAMBLE_DELAYED_REFS
2612 /*
2613  * Normally delayed refs get processed in ascending bytenr order. This
2614  * correlates in most cases to the order added. To expose dependencies on this
2615  * order, we start to process the tree in the middle instead of the beginning
2616  */
2617 static u64 find_middle(struct rb_root *root)
2618 {
2619         struct rb_node *n = root->rb_node;
2620         struct btrfs_delayed_ref_node *entry;
2621         int alt = 1;
2622         u64 middle;
2623         u64 first = 0, last = 0;
2624
2625         n = rb_first(root);
2626         if (n) {
2627                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2628                 first = entry->bytenr;
2629         }
2630         n = rb_last(root);
2631         if (n) {
2632                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2633                 last = entry->bytenr;
2634         }
2635         n = root->rb_node;
2636
2637         while (n) {
2638                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2639                 WARN_ON(!entry->in_tree);
2640
2641                 middle = entry->bytenr;
2642
2643                 if (alt)
2644                         n = n->rb_left;
2645                 else
2646                         n = n->rb_right;
2647
2648                 alt = 1 - alt;
2649         }
2650         return middle;
2651 }
2652 #endif
2653
2654 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2655 {
2656         u64 num_bytes;
2657
2658         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2659                              sizeof(struct btrfs_extent_inline_ref));
2660         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2661                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2662
2663         /*
2664          * We don't ever fill up leaves all the way so multiply by 2 just to be
2665          * closer to what we're really going to want to ouse.
2666          */
2667         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2668 }
2669
2670 /*
2671  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2672  * would require to store the csums for that many bytes.
2673  */
2674 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2675 {
2676         u64 csum_size;
2677         u64 num_csums_per_leaf;
2678         u64 num_csums;
2679
2680         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2681         num_csums_per_leaf = div64_u64(csum_size,
2682                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2683         num_csums = div64_u64(csum_bytes, root->sectorsize);
2684         num_csums += num_csums_per_leaf - 1;
2685         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2686         return num_csums;
2687 }
2688
2689 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2690                                        struct btrfs_root *root)
2691 {
2692         struct btrfs_block_rsv *global_rsv;
2693         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2694         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2695         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2696         u64 num_bytes, num_dirty_bgs_bytes;
2697         int ret = 0;
2698
2699         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2700         num_heads = heads_to_leaves(root, num_heads);
2701         if (num_heads > 1)
2702                 num_bytes += (num_heads - 1) * root->nodesize;
2703         num_bytes <<= 1;
2704         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2705         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2706                                                              num_dirty_bgs);
2707         global_rsv = &root->fs_info->global_block_rsv;
2708
2709         /*
2710          * If we can't allocate any more chunks lets make sure we have _lots_ of
2711          * wiggle room since running delayed refs can create more delayed refs.
2712          */
2713         if (global_rsv->space_info->full) {
2714                 num_dirty_bgs_bytes <<= 1;
2715                 num_bytes <<= 1;
2716         }
2717
2718         spin_lock(&global_rsv->lock);
2719         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2720                 ret = 1;
2721         spin_unlock(&global_rsv->lock);
2722         return ret;
2723 }
2724
2725 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2726                                        struct btrfs_root *root)
2727 {
2728         struct btrfs_fs_info *fs_info = root->fs_info;
2729         u64 num_entries =
2730                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2731         u64 avg_runtime;
2732         u64 val;
2733
2734         smp_mb();
2735         avg_runtime = fs_info->avg_delayed_ref_runtime;
2736         val = num_entries * avg_runtime;
2737         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2738                 return 1;
2739         if (val >= NSEC_PER_SEC / 2)
2740                 return 2;
2741
2742         return btrfs_check_space_for_delayed_refs(trans, root);
2743 }
2744
2745 struct async_delayed_refs {
2746         struct btrfs_root *root;
2747         int count;
2748         int error;
2749         int sync;
2750         struct completion wait;
2751         struct btrfs_work work;
2752 };
2753
2754 static void delayed_ref_async_start(struct btrfs_work *work)
2755 {
2756         struct async_delayed_refs *async;
2757         struct btrfs_trans_handle *trans;
2758         int ret;
2759
2760         async = container_of(work, struct async_delayed_refs, work);
2761
2762         trans = btrfs_join_transaction(async->root);
2763         if (IS_ERR(trans)) {
2764                 async->error = PTR_ERR(trans);
2765                 goto done;
2766         }
2767
2768         /*
2769          * trans->sync means that when we call end_transaciton, we won't
2770          * wait on delayed refs
2771          */
2772         trans->sync = true;
2773         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2774         if (ret)
2775                 async->error = ret;
2776
2777         ret = btrfs_end_transaction(trans, async->root);
2778         if (ret && !async->error)
2779                 async->error = ret;
2780 done:
2781         if (async->sync)
2782                 complete(&async->wait);
2783         else
2784                 kfree(async);
2785 }
2786
2787 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2788                                  unsigned long count, int wait)
2789 {
2790         struct async_delayed_refs *async;
2791         int ret;
2792
2793         async = kmalloc(sizeof(*async), GFP_NOFS);
2794         if (!async)
2795                 return -ENOMEM;
2796
2797         async->root = root->fs_info->tree_root;
2798         async->count = count;
2799         async->error = 0;
2800         if (wait)
2801                 async->sync = 1;
2802         else
2803                 async->sync = 0;
2804         init_completion(&async->wait);
2805
2806         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2807                         delayed_ref_async_start, NULL, NULL);
2808
2809         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2810
2811         if (wait) {
2812                 wait_for_completion(&async->wait);
2813                 ret = async->error;
2814                 kfree(async);
2815                 return ret;
2816         }
2817         return 0;
2818 }
2819
2820 /*
2821  * this starts processing the delayed reference count updates and
2822  * extent insertions we have queued up so far.  count can be
2823  * 0, which means to process everything in the tree at the start
2824  * of the run (but not newly added entries), or it can be some target
2825  * number you'd like to process.
2826  *
2827  * Returns 0 on success or if called with an aborted transaction
2828  * Returns <0 on error and aborts the transaction
2829  */
2830 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2831                            struct btrfs_root *root, unsigned long count)
2832 {
2833         struct rb_node *node;
2834         struct btrfs_delayed_ref_root *delayed_refs;
2835         struct btrfs_delayed_ref_head *head;
2836         int ret;
2837         int run_all = count == (unsigned long)-1;
2838         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2839
2840         /* We'll clean this up in btrfs_cleanup_transaction */
2841         if (trans->aborted)
2842                 return 0;
2843
2844         if (root == root->fs_info->extent_root)
2845                 root = root->fs_info->tree_root;
2846
2847         delayed_refs = &trans->transaction->delayed_refs;
2848         if (count == 0)
2849                 count = atomic_read(&delayed_refs->num_entries) * 2;
2850
2851 again:
2852 #ifdef SCRAMBLE_DELAYED_REFS
2853         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2854 #endif
2855         trans->can_flush_pending_bgs = false;
2856         ret = __btrfs_run_delayed_refs(trans, root, count);
2857         if (ret < 0) {
2858                 btrfs_abort_transaction(trans, root, ret);
2859                 return ret;
2860         }
2861
2862         if (run_all) {
2863                 if (!list_empty(&trans->new_bgs))
2864                         btrfs_create_pending_block_groups(trans, root);
2865
2866                 spin_lock(&delayed_refs->lock);
2867                 node = rb_first(&delayed_refs->href_root);
2868                 if (!node) {
2869                         spin_unlock(&delayed_refs->lock);
2870                         goto out;
2871                 }
2872                 count = (unsigned long)-1;
2873
2874                 while (node) {
2875                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2876                                         href_node);
2877                         if (btrfs_delayed_ref_is_head(&head->node)) {
2878                                 struct btrfs_delayed_ref_node *ref;
2879
2880                                 ref = &head->node;
2881                                 atomic_inc(&ref->refs);
2882
2883                                 spin_unlock(&delayed_refs->lock);
2884                                 /*
2885                                  * Mutex was contended, block until it's
2886                                  * released and try again
2887                                  */
2888                                 mutex_lock(&head->mutex);
2889                                 mutex_unlock(&head->mutex);
2890
2891                                 btrfs_put_delayed_ref(ref);
2892                                 cond_resched();
2893                                 goto again;
2894                         } else {
2895                                 WARN_ON(1);
2896                         }
2897                         node = rb_next(node);
2898                 }
2899                 spin_unlock(&delayed_refs->lock);
2900                 cond_resched();
2901                 goto again;
2902         }
2903 out:
2904         assert_qgroups_uptodate(trans);
2905         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2906         return 0;
2907 }
2908
2909 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2910                                 struct btrfs_root *root,
2911                                 u64 bytenr, u64 num_bytes, u64 flags,
2912                                 int level, int is_data)
2913 {
2914         struct btrfs_delayed_extent_op *extent_op;
2915         int ret;
2916
2917         extent_op = btrfs_alloc_delayed_extent_op();
2918         if (!extent_op)
2919                 return -ENOMEM;
2920
2921         extent_op->flags_to_set = flags;
2922         extent_op->update_flags = 1;
2923         extent_op->update_key = 0;
2924         extent_op->is_data = is_data ? 1 : 0;
2925         extent_op->level = level;
2926
2927         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2928                                           num_bytes, extent_op);
2929         if (ret)
2930                 btrfs_free_delayed_extent_op(extent_op);
2931         return ret;
2932 }
2933
2934 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2935                                       struct btrfs_root *root,
2936                                       struct btrfs_path *path,
2937                                       u64 objectid, u64 offset, u64 bytenr)
2938 {
2939         struct btrfs_delayed_ref_head *head;
2940         struct btrfs_delayed_ref_node *ref;
2941         struct btrfs_delayed_data_ref *data_ref;
2942         struct btrfs_delayed_ref_root *delayed_refs;
2943         int ret = 0;
2944
2945         delayed_refs = &trans->transaction->delayed_refs;
2946         spin_lock(&delayed_refs->lock);
2947         head = btrfs_find_delayed_ref_head(trans, bytenr);
2948         if (!head) {
2949                 spin_unlock(&delayed_refs->lock);
2950                 return 0;
2951         }
2952
2953         if (!mutex_trylock(&head->mutex)) {
2954                 atomic_inc(&head->node.refs);
2955                 spin_unlock(&delayed_refs->lock);
2956
2957                 btrfs_release_path(path);
2958
2959                 /*
2960                  * Mutex was contended, block until it's released and let
2961                  * caller try again
2962                  */
2963                 mutex_lock(&head->mutex);
2964                 mutex_unlock(&head->mutex);
2965                 btrfs_put_delayed_ref(&head->node);
2966                 return -EAGAIN;
2967         }
2968         spin_unlock(&delayed_refs->lock);
2969
2970         spin_lock(&head->lock);
2971         list_for_each_entry(ref, &head->ref_list, list) {
2972                 /* If it's a shared ref we know a cross reference exists */
2973                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2974                         ret = 1;
2975                         break;
2976                 }
2977
2978                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2979
2980                 /*
2981                  * If our ref doesn't match the one we're currently looking at
2982                  * then we have a cross reference.
2983                  */
2984                 if (data_ref->root != root->root_key.objectid ||
2985                     data_ref->objectid != objectid ||
2986                     data_ref->offset != offset) {
2987                         ret = 1;
2988                         break;
2989                 }
2990         }
2991         spin_unlock(&head->lock);
2992         mutex_unlock(&head->mutex);
2993         return ret;
2994 }
2995
2996 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2997                                         struct btrfs_root *root,
2998                                         struct btrfs_path *path,
2999                                         u64 objectid, u64 offset, u64 bytenr)
3000 {
3001         struct btrfs_root *extent_root = root->fs_info->extent_root;
3002         struct extent_buffer *leaf;
3003         struct btrfs_extent_data_ref *ref;
3004         struct btrfs_extent_inline_ref *iref;
3005         struct btrfs_extent_item *ei;
3006         struct btrfs_key key;
3007         u32 item_size;
3008         int ret;
3009
3010         key.objectid = bytenr;
3011         key.offset = (u64)-1;
3012         key.type = BTRFS_EXTENT_ITEM_KEY;
3013
3014         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3015         if (ret < 0)
3016                 goto out;
3017         BUG_ON(ret == 0); /* Corruption */
3018
3019         ret = -ENOENT;
3020         if (path->slots[0] == 0)
3021                 goto out;
3022
3023         path->slots[0]--;
3024         leaf = path->nodes[0];
3025         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3026
3027         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3028                 goto out;
3029
3030         ret = 1;
3031         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3032 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3033         if (item_size < sizeof(*ei)) {
3034                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3035                 goto out;
3036         }
3037 #endif
3038         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3039
3040         if (item_size != sizeof(*ei) +
3041             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3042                 goto out;
3043
3044         if (btrfs_extent_generation(leaf, ei) <=
3045             btrfs_root_last_snapshot(&root->root_item))
3046                 goto out;
3047
3048         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3049         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3050             BTRFS_EXTENT_DATA_REF_KEY)
3051                 goto out;
3052
3053         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3054         if (btrfs_extent_refs(leaf, ei) !=
3055             btrfs_extent_data_ref_count(leaf, ref) ||
3056             btrfs_extent_data_ref_root(leaf, ref) !=
3057             root->root_key.objectid ||
3058             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3059             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3060                 goto out;
3061
3062         ret = 0;
3063 out:
3064         return ret;
3065 }
3066
3067 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3068                           struct btrfs_root *root,
3069                           u64 objectid, u64 offset, u64 bytenr)
3070 {
3071         struct btrfs_path *path;
3072         int ret;
3073         int ret2;
3074
3075         path = btrfs_alloc_path();
3076         if (!path)
3077                 return -ENOENT;
3078
3079         do {
3080                 ret = check_committed_ref(trans, root, path, objectid,
3081                                           offset, bytenr);
3082                 if (ret && ret != -ENOENT)
3083                         goto out;
3084
3085                 ret2 = check_delayed_ref(trans, root, path, objectid,
3086                                          offset, bytenr);
3087         } while (ret2 == -EAGAIN);
3088
3089         if (ret2 && ret2 != -ENOENT) {
3090                 ret = ret2;
3091                 goto out;
3092         }
3093
3094         if (ret != -ENOENT || ret2 != -ENOENT)
3095                 ret = 0;
3096 out:
3097         btrfs_free_path(path);
3098         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3099                 WARN_ON(ret > 0);
3100         return ret;
3101 }
3102
3103 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3104                            struct btrfs_root *root,
3105                            struct extent_buffer *buf,
3106                            int full_backref, int inc)
3107 {
3108         u64 bytenr;
3109         u64 num_bytes;
3110         u64 parent;
3111         u64 ref_root;
3112         u32 nritems;
3113         struct btrfs_key key;
3114         struct btrfs_file_extent_item *fi;
3115         int i;
3116         int level;
3117         int ret = 0;
3118         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3119                             u64, u64, u64, u64, u64, u64, int);
3120
3121
3122         if (btrfs_test_is_dummy_root(root))
3123                 return 0;
3124
3125         ref_root = btrfs_header_owner(buf);
3126         nritems = btrfs_header_nritems(buf);
3127         level = btrfs_header_level(buf);
3128
3129         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3130                 return 0;
3131
3132         if (inc)
3133                 process_func = btrfs_inc_extent_ref;
3134         else
3135                 process_func = btrfs_free_extent;
3136
3137         if (full_backref)
3138                 parent = buf->start;
3139         else
3140                 parent = 0;
3141
3142         for (i = 0; i < nritems; i++) {
3143                 if (level == 0) {
3144                         btrfs_item_key_to_cpu(buf, &key, i);
3145                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3146                                 continue;
3147                         fi = btrfs_item_ptr(buf, i,
3148                                             struct btrfs_file_extent_item);
3149                         if (btrfs_file_extent_type(buf, fi) ==
3150                             BTRFS_FILE_EXTENT_INLINE)
3151                                 continue;
3152                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3153                         if (bytenr == 0)
3154                                 continue;
3155
3156                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3157                         key.offset -= btrfs_file_extent_offset(buf, fi);
3158                         ret = process_func(trans, root, bytenr, num_bytes,
3159                                            parent, ref_root, key.objectid,
3160                                            key.offset, 1);
3161                         if (ret)
3162                                 goto fail;
3163                 } else {
3164                         bytenr = btrfs_node_blockptr(buf, i);
3165                         num_bytes = root->nodesize;
3166                         ret = process_func(trans, root, bytenr, num_bytes,
3167                                            parent, ref_root, level - 1, 0,
3168                                            1);
3169                         if (ret)
3170                                 goto fail;
3171                 }
3172         }
3173         return 0;
3174 fail:
3175         return ret;
3176 }
3177
3178 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3179                   struct extent_buffer *buf, int full_backref)
3180 {
3181         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3182 }
3183
3184 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3185                   struct extent_buffer *buf, int full_backref)
3186 {
3187         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3188 }
3189
3190 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3191                                  struct btrfs_root *root,
3192                                  struct btrfs_path *path,
3193                                  struct btrfs_block_group_cache *cache)
3194 {
3195         int ret;
3196         struct btrfs_root *extent_root = root->fs_info->extent_root;
3197         unsigned long bi;
3198         struct extent_buffer *leaf;
3199
3200         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3201         if (ret) {
3202                 if (ret > 0)
3203                         ret = -ENOENT;
3204                 goto fail;
3205         }
3206
3207         leaf = path->nodes[0];
3208         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3209         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3210         btrfs_mark_buffer_dirty(leaf);
3211 fail:
3212         btrfs_release_path(path);
3213         return ret;
3214
3215 }
3216
3217 static struct btrfs_block_group_cache *
3218 next_block_group(struct btrfs_root *root,
3219                  struct btrfs_block_group_cache *cache)
3220 {
3221         struct rb_node *node;
3222
3223         spin_lock(&root->fs_info->block_group_cache_lock);
3224
3225         /* If our block group was removed, we need a full search. */
3226         if (RB_EMPTY_NODE(&cache->cache_node)) {
3227                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3228
3229                 spin_unlock(&root->fs_info->block_group_cache_lock);
3230                 btrfs_put_block_group(cache);
3231                 cache = btrfs_lookup_first_block_group(root->fs_info,
3232                                                        next_bytenr);
3233                 return cache;
3234         }
3235         node = rb_next(&cache->cache_node);
3236         btrfs_put_block_group(cache);
3237         if (node) {
3238                 cache = rb_entry(node, struct btrfs_block_group_cache,
3239                                  cache_node);
3240                 btrfs_get_block_group(cache);
3241         } else
3242                 cache = NULL;
3243         spin_unlock(&root->fs_info->block_group_cache_lock);
3244         return cache;
3245 }
3246
3247 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3248                             struct btrfs_trans_handle *trans,
3249                             struct btrfs_path *path)
3250 {
3251         struct btrfs_root *root = block_group->fs_info->tree_root;
3252         struct inode *inode = NULL;
3253         u64 alloc_hint = 0;
3254         int dcs = BTRFS_DC_ERROR;
3255         u64 num_pages = 0;
3256         int retries = 0;
3257         int ret = 0;
3258
3259         /*
3260          * If this block group is smaller than 100 megs don't bother caching the
3261          * block group.
3262          */
3263         if (block_group->key.offset < (100 * 1024 * 1024)) {
3264                 spin_lock(&block_group->lock);
3265                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3266                 spin_unlock(&block_group->lock);
3267                 return 0;
3268         }
3269
3270         if (trans->aborted)
3271                 return 0;
3272 again:
3273         inode = lookup_free_space_inode(root, block_group, path);
3274         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3275                 ret = PTR_ERR(inode);
3276                 btrfs_release_path(path);
3277                 goto out;
3278         }
3279
3280         if (IS_ERR(inode)) {
3281                 BUG_ON(retries);
3282                 retries++;
3283
3284                 if (block_group->ro)
3285                         goto out_free;
3286
3287                 ret = create_free_space_inode(root, trans, block_group, path);
3288                 if (ret)
3289                         goto out_free;
3290                 goto again;
3291         }
3292
3293         /* We've already setup this transaction, go ahead and exit */
3294         if (block_group->cache_generation == trans->transid &&
3295             i_size_read(inode)) {
3296                 dcs = BTRFS_DC_SETUP;
3297                 goto out_put;
3298         }
3299
3300         /*
3301          * We want to set the generation to 0, that way if anything goes wrong
3302          * from here on out we know not to trust this cache when we load up next
3303          * time.
3304          */
3305         BTRFS_I(inode)->generation = 0;
3306         ret = btrfs_update_inode(trans, root, inode);
3307         if (ret) {
3308                 /*
3309                  * So theoretically we could recover from this, simply set the
3310                  * super cache generation to 0 so we know to invalidate the
3311                  * cache, but then we'd have to keep track of the block groups
3312                  * that fail this way so we know we _have_ to reset this cache
3313                  * before the next commit or risk reading stale cache.  So to
3314                  * limit our exposure to horrible edge cases lets just abort the
3315                  * transaction, this only happens in really bad situations
3316                  * anyway.
3317                  */
3318                 btrfs_abort_transaction(trans, root, ret);
3319                 goto out_put;
3320         }
3321         WARN_ON(ret);
3322
3323         if (i_size_read(inode) > 0) {
3324                 ret = btrfs_check_trunc_cache_free_space(root,
3325                                         &root->fs_info->global_block_rsv);
3326                 if (ret)
3327                         goto out_put;
3328
3329                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3330                 if (ret)
3331                         goto out_put;
3332         }
3333
3334         spin_lock(&block_group->lock);
3335         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3336             !btrfs_test_opt(root, SPACE_CACHE)) {
3337                 /*
3338                  * don't bother trying to write stuff out _if_
3339                  * a) we're not cached,
3340                  * b) we're with nospace_cache mount option.
3341                  */
3342                 dcs = BTRFS_DC_WRITTEN;
3343                 spin_unlock(&block_group->lock);
3344                 goto out_put;
3345         }
3346         spin_unlock(&block_group->lock);
3347
3348         /*
3349          * Try to preallocate enough space based on how big the block group is.
3350          * Keep in mind this has to include any pinned space which could end up
3351          * taking up quite a bit since it's not folded into the other space
3352          * cache.
3353          */
3354         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3355         if (!num_pages)
3356                 num_pages = 1;
3357
3358         num_pages *= 16;
3359         num_pages *= PAGE_CACHE_SIZE;
3360
3361         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3362         if (ret)
3363                 goto out_put;
3364
3365         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3366                                               num_pages, num_pages,
3367                                               &alloc_hint);
3368         if (!ret)
3369                 dcs = BTRFS_DC_SETUP;
3370         btrfs_free_reserved_data_space(inode, num_pages);
3371
3372 out_put:
3373         iput(inode);
3374 out_free:
3375         btrfs_release_path(path);
3376 out:
3377         spin_lock(&block_group->lock);
3378         if (!ret && dcs == BTRFS_DC_SETUP)
3379                 block_group->cache_generation = trans->transid;
3380         block_group->disk_cache_state = dcs;
3381         spin_unlock(&block_group->lock);
3382
3383         return ret;
3384 }
3385
3386 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3387                             struct btrfs_root *root)
3388 {
3389         struct btrfs_block_group_cache *cache, *tmp;
3390         struct btrfs_transaction *cur_trans = trans->transaction;
3391         struct btrfs_path *path;
3392
3393         if (list_empty(&cur_trans->dirty_bgs) ||
3394             !btrfs_test_opt(root, SPACE_CACHE))
3395                 return 0;
3396
3397         path = btrfs_alloc_path();
3398         if (!path)
3399                 return -ENOMEM;
3400
3401         /* Could add new block groups, use _safe just in case */
3402         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3403                                  dirty_list) {
3404                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3405                         cache_save_setup(cache, trans, path);
3406         }
3407
3408         btrfs_free_path(path);
3409         return 0;
3410 }
3411
3412 /*
3413  * transaction commit does final block group cache writeback during a
3414  * critical section where nothing is allowed to change the FS.  This is
3415  * required in order for the cache to actually match the block group,
3416  * but can introduce a lot of latency into the commit.
3417  *
3418  * So, btrfs_start_dirty_block_groups is here to kick off block group
3419  * cache IO.  There's a chance we'll have to redo some of it if the
3420  * block group changes again during the commit, but it greatly reduces
3421  * the commit latency by getting rid of the easy block groups while
3422  * we're still allowing others to join the commit.
3423  */
3424 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3425                                    struct btrfs_root *root)
3426 {
3427         struct btrfs_block_group_cache *cache;
3428         struct btrfs_transaction *cur_trans = trans->transaction;
3429         int ret = 0;
3430         int should_put;
3431         struct btrfs_path *path = NULL;
3432         LIST_HEAD(dirty);
3433         struct list_head *io = &cur_trans->io_bgs;
3434         int num_started = 0;
3435         int loops = 0;
3436
3437         spin_lock(&cur_trans->dirty_bgs_lock);
3438         if (list_empty(&cur_trans->dirty_bgs)) {
3439                 spin_unlock(&cur_trans->dirty_bgs_lock);
3440                 return 0;
3441         }
3442         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3443         spin_unlock(&cur_trans->dirty_bgs_lock);
3444
3445 again:
3446         /*
3447          * make sure all the block groups on our dirty list actually
3448          * exist
3449          */
3450         btrfs_create_pending_block_groups(trans, root);
3451
3452         if (!path) {
3453                 path = btrfs_alloc_path();
3454                 if (!path)
3455                         return -ENOMEM;
3456         }
3457
3458         /*
3459          * cache_write_mutex is here only to save us from balance or automatic
3460          * removal of empty block groups deleting this block group while we are
3461          * writing out the cache
3462          */
3463         mutex_lock(&trans->transaction->cache_write_mutex);
3464         while (!list_empty(&dirty)) {
3465                 cache = list_first_entry(&dirty,
3466                                          struct btrfs_block_group_cache,
3467                                          dirty_list);
3468                 /*
3469                  * this can happen if something re-dirties a block
3470                  * group that is already under IO.  Just wait for it to
3471                  * finish and then do it all again
3472                  */
3473                 if (!list_empty(&cache->io_list)) {
3474                         list_del_init(&cache->io_list);
3475                         btrfs_wait_cache_io(root, trans, cache,
3476                                             &cache->io_ctl, path,
3477                                             cache->key.objectid);
3478                         btrfs_put_block_group(cache);
3479                 }
3480
3481
3482                 /*
3483                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3484                  * if it should update the cache_state.  Don't delete
3485                  * until after we wait.
3486                  *
3487                  * Since we're not running in the commit critical section
3488                  * we need the dirty_bgs_lock to protect from update_block_group
3489                  */
3490                 spin_lock(&cur_trans->dirty_bgs_lock);
3491                 list_del_init(&cache->dirty_list);
3492                 spin_unlock(&cur_trans->dirty_bgs_lock);
3493
3494                 should_put = 1;
3495
3496                 cache_save_setup(cache, trans, path);
3497
3498                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3499                         cache->io_ctl.inode = NULL;
3500                         ret = btrfs_write_out_cache(root, trans, cache, path);
3501                         if (ret == 0 && cache->io_ctl.inode) {
3502                                 num_started++;
3503                                 should_put = 0;
3504
3505                                 /*
3506                                  * the cache_write_mutex is protecting
3507                                  * the io_list
3508                                  */
3509                                 list_add_tail(&cache->io_list, io);
3510                         } else {
3511                                 /*
3512                                  * if we failed to write the cache, the
3513                                  * generation will be bad and life goes on
3514                                  */
3515                                 ret = 0;
3516                         }
3517                 }
3518                 if (!ret) {
3519                         ret = write_one_cache_group(trans, root, path, cache);
3520                         /*
3521                          * Our block group might still be attached to the list
3522                          * of new block groups in the transaction handle of some
3523                          * other task (struct btrfs_trans_handle->new_bgs). This
3524                          * means its block group item isn't yet in the extent
3525                          * tree. If this happens ignore the error, as we will
3526                          * try again later in the critical section of the
3527                          * transaction commit.
3528                          */
3529                         if (ret == -ENOENT) {
3530                                 ret = 0;
3531                                 spin_lock(&cur_trans->dirty_bgs_lock);
3532                                 if (list_empty(&cache->dirty_list)) {
3533                                         list_add_tail(&cache->dirty_list,
3534                                                       &cur_trans->dirty_bgs);
3535                                         btrfs_get_block_group(cache);
3536                                 }
3537                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3538                         } else if (ret) {
3539                                 btrfs_abort_transaction(trans, root, ret);
3540                         }
3541                 }
3542
3543                 /* if its not on the io list, we need to put the block group */
3544                 if (should_put)
3545                         btrfs_put_block_group(cache);
3546
3547                 if (ret)
3548                         break;
3549
3550                 /*
3551                  * Avoid blocking other tasks for too long. It might even save
3552                  * us from writing caches for block groups that are going to be
3553                  * removed.
3554                  */
3555                 mutex_unlock(&trans->transaction->cache_write_mutex);
3556                 mutex_lock(&trans->transaction->cache_write_mutex);
3557         }
3558         mutex_unlock(&trans->transaction->cache_write_mutex);
3559
3560         /*
3561          * go through delayed refs for all the stuff we've just kicked off
3562          * and then loop back (just once)
3563          */
3564         ret = btrfs_run_delayed_refs(trans, root, 0);
3565         if (!ret && loops == 0) {
3566                 loops++;
3567                 spin_lock(&cur_trans->dirty_bgs_lock);
3568                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3569                 /*
3570                  * dirty_bgs_lock protects us from concurrent block group
3571                  * deletes too (not just cache_write_mutex).
3572                  */
3573                 if (!list_empty(&dirty)) {
3574                         spin_unlock(&cur_trans->dirty_bgs_lock);
3575                         goto again;
3576                 }
3577                 spin_unlock(&cur_trans->dirty_bgs_lock);
3578         }
3579
3580         btrfs_free_path(path);
3581         return ret;
3582 }
3583
3584 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3585                                    struct btrfs_root *root)
3586 {
3587         struct btrfs_block_group_cache *cache;
3588         struct btrfs_transaction *cur_trans = trans->transaction;
3589         int ret = 0;
3590         int should_put;
3591         struct btrfs_path *path;
3592         struct list_head *io = &cur_trans->io_bgs;
3593         int num_started = 0;
3594
3595         path = btrfs_alloc_path();
3596         if (!path)
3597                 return -ENOMEM;
3598
3599         /*
3600          * We don't need the lock here since we are protected by the transaction
3601          * commit.  We want to do the cache_save_setup first and then run the
3602          * delayed refs to make sure we have the best chance at doing this all
3603          * in one shot.
3604          */
3605         while (!list_empty(&cur_trans->dirty_bgs)) {
3606                 cache = list_first_entry(&cur_trans->dirty_bgs,
3607                                          struct btrfs_block_group_cache,
3608                                          dirty_list);
3609
3610                 /*
3611                  * this can happen if cache_save_setup re-dirties a block
3612                  * group that is already under IO.  Just wait for it to
3613                  * finish and then do it all again
3614                  */
3615                 if (!list_empty(&cache->io_list)) {
3616                         list_del_init(&cache->io_list);
3617                         btrfs_wait_cache_io(root, trans, cache,
3618                                             &cache->io_ctl, path,
3619                                             cache->key.objectid);
3620                         btrfs_put_block_group(cache);
3621                 }
3622
3623                 /*
3624                  * don't remove from the dirty list until after we've waited
3625                  * on any pending IO
3626                  */
3627                 list_del_init(&cache->dirty_list);
3628                 should_put = 1;
3629
3630                 cache_save_setup(cache, trans, path);
3631
3632                 if (!ret)
3633                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3634
3635                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3636                         cache->io_ctl.inode = NULL;
3637                         ret = btrfs_write_out_cache(root, trans, cache, path);
3638                         if (ret == 0 && cache->io_ctl.inode) {
3639                                 num_started++;
3640                                 should_put = 0;
3641                                 list_add_tail(&cache->io_list, io);
3642                         } else {
3643                                 /*
3644                                  * if we failed to write the cache, the
3645                                  * generation will be bad and life goes on
3646                                  */
3647                                 ret = 0;
3648                         }
3649                 }
3650                 if (!ret) {
3651                         ret = write_one_cache_group(trans, root, path, cache);
3652                         if (ret)
3653                                 btrfs_abort_transaction(trans, root, ret);
3654                 }
3655
3656                 /* if its not on the io list, we need to put the block group */
3657                 if (should_put)
3658                         btrfs_put_block_group(cache);
3659         }
3660
3661         while (!list_empty(io)) {
3662                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3663                                          io_list);
3664                 list_del_init(&cache->io_list);
3665                 btrfs_wait_cache_io(root, trans, cache,
3666                                     &cache->io_ctl, path, cache->key.objectid);
3667                 btrfs_put_block_group(cache);
3668         }
3669
3670         btrfs_free_path(path);
3671         return ret;
3672 }
3673
3674 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3675 {
3676         struct btrfs_block_group_cache *block_group;
3677         int readonly = 0;
3678
3679         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3680         if (!block_group || block_group->ro)
3681                 readonly = 1;
3682         if (block_group)
3683                 btrfs_put_block_group(block_group);
3684         return readonly;
3685 }
3686
3687 static const char *alloc_name(u64 flags)
3688 {
3689         switch (flags) {
3690         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3691                 return "mixed";
3692         case BTRFS_BLOCK_GROUP_METADATA:
3693                 return "metadata";
3694         case BTRFS_BLOCK_GROUP_DATA:
3695                 return "data";
3696         case BTRFS_BLOCK_GROUP_SYSTEM:
3697                 return "system";
3698         default:
3699                 WARN_ON(1);
3700                 return "invalid-combination";
3701         };
3702 }
3703
3704 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3705                              u64 total_bytes, u64 bytes_used,
3706                              struct btrfs_space_info **space_info)
3707 {
3708         struct btrfs_space_info *found;
3709         int i;
3710         int factor;
3711         int ret;
3712
3713         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3714                      BTRFS_BLOCK_GROUP_RAID10))
3715                 factor = 2;
3716         else
3717                 factor = 1;
3718
3719         found = __find_space_info(info, flags);
3720         if (found) {
3721                 spin_lock(&found->lock);
3722                 found->total_bytes += total_bytes;
3723                 found->disk_total += total_bytes * factor;
3724                 found->bytes_used += bytes_used;
3725                 found->disk_used += bytes_used * factor;
3726                 if (total_bytes > 0)
3727                         found->full = 0;
3728                 spin_unlock(&found->lock);
3729                 *space_info = found;
3730                 return 0;
3731         }
3732         found = kzalloc(sizeof(*found), GFP_NOFS);
3733         if (!found)
3734                 return -ENOMEM;
3735
3736         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3737         if (ret) {
3738                 kfree(found);
3739                 return ret;
3740         }
3741
3742         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3743                 INIT_LIST_HEAD(&found->block_groups[i]);
3744         init_rwsem(&found->groups_sem);
3745         spin_lock_init(&found->lock);
3746         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3747         found->total_bytes = total_bytes;
3748         found->disk_total = total_bytes * factor;
3749         found->bytes_used = bytes_used;
3750         found->disk_used = bytes_used * factor;
3751         found->bytes_pinned = 0;
3752         found->bytes_reserved = 0;
3753         found->bytes_readonly = 0;
3754         found->bytes_may_use = 0;
3755         found->full = 0;
3756         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3757         found->chunk_alloc = 0;
3758         found->flush = 0;
3759         init_waitqueue_head(&found->wait);
3760         INIT_LIST_HEAD(&found->ro_bgs);
3761
3762         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3763                                     info->space_info_kobj, "%s",
3764                                     alloc_name(found->flags));
3765         if (ret) {
3766                 kfree(found);
3767                 return ret;
3768         }
3769
3770         *space_info = found;
3771         list_add_rcu(&found->list, &info->space_info);
3772         if (flags & BTRFS_BLOCK_GROUP_DATA)
3773                 info->data_sinfo = found;
3774
3775         return ret;
3776 }
3777
3778 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3779 {
3780         u64 extra_flags = chunk_to_extended(flags) &
3781                                 BTRFS_EXTENDED_PROFILE_MASK;
3782
3783         write_seqlock(&fs_info->profiles_lock);
3784         if (flags & BTRFS_BLOCK_GROUP_DATA)
3785                 fs_info->avail_data_alloc_bits |= extra_flags;
3786         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3787                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3788         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3789                 fs_info->avail_system_alloc_bits |= extra_flags;
3790         write_sequnlock(&fs_info->profiles_lock);
3791 }
3792
3793 /*
3794  * returns target flags in extended format or 0 if restripe for this
3795  * chunk_type is not in progress
3796  *
3797  * should be called with either volume_mutex or balance_lock held
3798  */
3799 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3800 {
3801         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3802         u64 target = 0;
3803
3804         if (!bctl)
3805                 return 0;
3806
3807         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3808             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3809                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3810         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3811                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3812                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3813         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3814                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3815                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3816         }
3817
3818         return target;
3819 }
3820
3821 /*
3822  * @flags: available profiles in extended format (see ctree.h)
3823  *
3824  * Returns reduced profile in chunk format.  If profile changing is in
3825  * progress (either running or paused) picks the target profile (if it's
3826  * already available), otherwise falls back to plain reducing.
3827  */
3828 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3829 {
3830         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3831         u64 target;
3832         u64 tmp;
3833
3834         /*
3835          * see if restripe for this chunk_type is in progress, if so
3836          * try to reduce to the target profile
3837          */
3838         spin_lock(&root->fs_info->balance_lock);
3839         target = get_restripe_target(root->fs_info, flags);
3840         if (target) {
3841                 /* pick target profile only if it's already available */
3842                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3843                         spin_unlock(&root->fs_info->balance_lock);
3844                         return extended_to_chunk(target);
3845                 }
3846         }
3847         spin_unlock(&root->fs_info->balance_lock);
3848
3849         /* First, mask out the RAID levels which aren't possible */
3850         if (num_devices == 1)
3851                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3852                            BTRFS_BLOCK_GROUP_RAID5);
3853         if (num_devices < 3)
3854                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3855         if (num_devices < 4)
3856                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3857
3858         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3859                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3860                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3861         flags &= ~tmp;
3862
3863         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3864                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3865         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3866                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3867         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3868                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3869         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3870                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3871         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3872                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3873
3874         return extended_to_chunk(flags | tmp);
3875 }
3876
3877 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3878 {
3879         unsigned seq;
3880         u64 flags;
3881
3882         do {
3883                 flags = orig_flags;
3884                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3885
3886                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3887                         flags |= root->fs_info->avail_data_alloc_bits;
3888                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3889                         flags |= root->fs_info->avail_system_alloc_bits;
3890                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3891                         flags |= root->fs_info->avail_metadata_alloc_bits;
3892         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3893
3894         return btrfs_reduce_alloc_profile(root, flags);
3895 }
3896
3897 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3898 {
3899         u64 flags;
3900         u64 ret;
3901
3902         if (data)
3903                 flags = BTRFS_BLOCK_GROUP_DATA;
3904         else if (root == root->fs_info->chunk_root)
3905                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3906         else
3907                 flags = BTRFS_BLOCK_GROUP_METADATA;
3908
3909         ret = get_alloc_profile(root, flags);
3910         return ret;
3911 }
3912
3913 /*
3914  * This will check the space that the inode allocates from to make sure we have
3915  * enough space for bytes.
3916  */
3917 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3918 {
3919         struct btrfs_space_info *data_sinfo;
3920         struct btrfs_root *root = BTRFS_I(inode)->root;
3921         struct btrfs_fs_info *fs_info = root->fs_info;
3922         u64 used;
3923         int ret = 0;
3924         int need_commit = 2;
3925         int have_pinned_space;
3926
3927         /* make sure bytes are sectorsize aligned */
3928         bytes = ALIGN(bytes, root->sectorsize);
3929
3930         if (btrfs_is_free_space_inode(inode)) {
3931                 need_commit = 0;
3932                 ASSERT(current->journal_info);
3933         }
3934
3935         data_sinfo = fs_info->data_sinfo;
3936         if (!data_sinfo)
3937                 goto alloc;
3938
3939 again:
3940         /* make sure we have enough space to handle the data first */
3941         spin_lock(&data_sinfo->lock);
3942         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3943                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3944                 data_sinfo->bytes_may_use;
3945
3946         if (used + bytes > data_sinfo->total_bytes) {
3947                 struct btrfs_trans_handle *trans;
3948
3949                 /*
3950                  * if we don't have enough free bytes in this space then we need
3951                  * to alloc a new chunk.
3952                  */
3953                 if (!data_sinfo->full) {
3954                         u64 alloc_target;
3955
3956                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3957                         spin_unlock(&data_sinfo->lock);
3958 alloc:
3959                         alloc_target = btrfs_get_alloc_profile(root, 1);
3960                         /*
3961                          * It is ugly that we don't call nolock join
3962                          * transaction for the free space inode case here.
3963                          * But it is safe because we only do the data space
3964                          * reservation for the free space cache in the
3965                          * transaction context, the common join transaction
3966                          * just increase the counter of the current transaction
3967                          * handler, doesn't try to acquire the trans_lock of
3968                          * the fs.
3969                          */
3970                         trans = btrfs_join_transaction(root);
3971                         if (IS_ERR(trans))
3972                                 return PTR_ERR(trans);
3973
3974                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3975                                              alloc_target,
3976                                              CHUNK_ALLOC_NO_FORCE);
3977                         btrfs_end_transaction(trans, root);
3978                         if (ret < 0) {
3979                                 if (ret != -ENOSPC)
3980                                         return ret;
3981                                 else {
3982                                         have_pinned_space = 1;
3983                                         goto commit_trans;
3984                                 }
3985                         }
3986
3987                         if (!data_sinfo)
3988                                 data_sinfo = fs_info->data_sinfo;
3989
3990                         goto again;
3991                 }
3992
3993                 /*
3994                  * If we don't have enough pinned space to deal with this
3995                  * allocation, and no removed chunk in current transaction,
3996                  * don't bother committing the transaction.
3997                  */
3998                 have_pinned_space = percpu_counter_compare(
3999                         &data_sinfo->total_bytes_pinned,
4000                         used + bytes - data_sinfo->total_bytes);
4001                 spin_unlock(&data_sinfo->lock);
4002
4003                 /* commit the current transaction and try again */
4004 commit_trans:
4005                 if (need_commit &&
4006                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4007                         need_commit--;
4008
4009                         if (need_commit > 0)
4010                                 btrfs_wait_ordered_roots(fs_info, -1);
4011
4012                         trans = btrfs_join_transaction(root);
4013                         if (IS_ERR(trans))
4014                                 return PTR_ERR(trans);
4015                         if (have_pinned_space >= 0 ||
4016                             trans->transaction->have_free_bgs ||
4017                             need_commit > 0) {
4018                                 ret = btrfs_commit_transaction(trans, root);
4019                                 if (ret)
4020                                         return ret;
4021                                 /*
4022                                  * make sure that all running delayed iput are
4023                                  * done
4024                                  */
4025                                 down_write(&root->fs_info->delayed_iput_sem);
4026                                 up_write(&root->fs_info->delayed_iput_sem);
4027                                 goto again;
4028                         } else {
4029                                 btrfs_end_transaction(trans, root);
4030                         }
4031                 }
4032
4033                 trace_btrfs_space_reservation(root->fs_info,
4034                                               "space_info:enospc",
4035                                               data_sinfo->flags, bytes, 1);
4036                 return -ENOSPC;
4037         }
4038         ret = btrfs_qgroup_reserve(root, write_bytes);
4039         if (ret)
4040                 goto out;
4041         data_sinfo->bytes_may_use += bytes;
4042         trace_btrfs_space_reservation(root->fs_info, "space_info",
4043                                       data_sinfo->flags, bytes, 1);
4044 out:
4045         spin_unlock(&data_sinfo->lock);
4046
4047         return ret;
4048 }
4049
4050 /*
4051  * Called if we need to clear a data reservation for this inode.
4052  */
4053 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4054 {
4055         struct btrfs_root *root = BTRFS_I(inode)->root;
4056         struct btrfs_space_info *data_sinfo;
4057
4058         /* make sure bytes are sectorsize aligned */
4059         bytes = ALIGN(bytes, root->sectorsize);
4060
4061         data_sinfo = root->fs_info->data_sinfo;
4062         spin_lock(&data_sinfo->lock);
4063         WARN_ON(data_sinfo->bytes_may_use < bytes);
4064         data_sinfo->bytes_may_use -= bytes;
4065         trace_btrfs_space_reservation(root->fs_info, "space_info",
4066                                       data_sinfo->flags, bytes, 0);
4067         spin_unlock(&data_sinfo->lock);
4068 }
4069
4070 static void force_metadata_allocation(struct btrfs_fs_info *info)
4071 {
4072         struct list_head *head = &info->space_info;
4073         struct btrfs_space_info *found;
4074
4075         rcu_read_lock();
4076         list_for_each_entry_rcu(found, head, list) {
4077                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4078                         found->force_alloc = CHUNK_ALLOC_FORCE;
4079         }
4080         rcu_read_unlock();
4081 }
4082
4083 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4084 {
4085         return (global->size << 1);
4086 }
4087
4088 static int should_alloc_chunk(struct btrfs_root *root,
4089                               struct btrfs_space_info *sinfo, int force)
4090 {
4091         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4092         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4093         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4094         u64 thresh;
4095
4096         if (force == CHUNK_ALLOC_FORCE)
4097                 return 1;
4098
4099         /*
4100          * We need to take into account the global rsv because for all intents
4101          * and purposes it's used space.  Don't worry about locking the
4102          * global_rsv, it doesn't change except when the transaction commits.
4103          */
4104         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4105                 num_allocated += calc_global_rsv_need_space(global_rsv);
4106
4107         /*
4108          * in limited mode, we want to have some free space up to
4109          * about 1% of the FS size.
4110          */
4111         if (force == CHUNK_ALLOC_LIMITED) {
4112                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4113                 thresh = max_t(u64, 64 * 1024 * 1024,
4114                                div_factor_fine(thresh, 1));
4115
4116                 if (num_bytes - num_allocated < thresh)
4117                         return 1;
4118         }
4119
4120         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4121                 return 0;
4122         return 1;
4123 }
4124
4125 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4126 {
4127         u64 num_dev;
4128
4129         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4130                     BTRFS_BLOCK_GROUP_RAID0 |
4131                     BTRFS_BLOCK_GROUP_RAID5 |
4132                     BTRFS_BLOCK_GROUP_RAID6))
4133                 num_dev = root->fs_info->fs_devices->rw_devices;
4134         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4135                 num_dev = 2;
4136         else
4137                 num_dev = 1;    /* DUP or single */
4138
4139         return num_dev;
4140 }
4141
4142 /*
4143  * If @is_allocation is true, reserve space in the system space info necessary
4144  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4145  * removing a chunk.
4146  */
4147 void check_system_chunk(struct btrfs_trans_handle *trans,
4148                         struct btrfs_root *root,
4149                         u64 type)
4150 {
4151         struct btrfs_space_info *info;
4152         u64 left;
4153         u64 thresh;
4154         int ret = 0;
4155         u64 num_devs;
4156
4157         /*
4158          * Needed because we can end up allocating a system chunk and for an
4159          * atomic and race free space reservation in the chunk block reserve.
4160          */
4161         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4162
4163         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4164         spin_lock(&info->lock);
4165         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4166                 info->bytes_reserved - info->bytes_readonly -
4167                 info->bytes_may_use;
4168         spin_unlock(&info->lock);
4169
4170         num_devs = get_profile_num_devs(root, type);
4171
4172         /* num_devs device items to update and 1 chunk item to add or remove */
4173         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4174                 btrfs_calc_trans_metadata_size(root, 1);
4175
4176         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4177                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4178                         left, thresh, type);
4179                 dump_space_info(info, 0, 0);
4180         }
4181
4182         if (left < thresh) {
4183                 u64 flags;
4184
4185                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4186                 /*
4187                  * Ignore failure to create system chunk. We might end up not
4188                  * needing it, as we might not need to COW all nodes/leafs from
4189                  * the paths we visit in the chunk tree (they were already COWed
4190                  * or created in the current transaction for example).
4191                  */
4192                 ret = btrfs_alloc_chunk(trans, root, flags);
4193         }
4194
4195         if (!ret) {
4196                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4197                                           &root->fs_info->chunk_block_rsv,
4198                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4199                 if (!ret)
4200                         trans->chunk_bytes_reserved += thresh;
4201         }
4202 }
4203
4204 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4205                           struct btrfs_root *extent_root, u64 flags, int force)
4206 {
4207         struct btrfs_space_info *space_info;
4208         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4209         int wait_for_alloc = 0;
4210         int ret = 0;
4211
4212         /* Don't re-enter if we're already allocating a chunk */
4213         if (trans->allocating_chunk)
4214                 return -ENOSPC;
4215
4216         space_info = __find_space_info(extent_root->fs_info, flags);
4217         if (!space_info) {
4218                 ret = update_space_info(extent_root->fs_info, flags,
4219                                         0, 0, &space_info);
4220                 BUG_ON(ret); /* -ENOMEM */
4221         }
4222         BUG_ON(!space_info); /* Logic error */
4223
4224 again:
4225         spin_lock(&space_info->lock);
4226         if (force < space_info->force_alloc)
4227                 force = space_info->force_alloc;
4228         if (space_info->full) {
4229                 if (should_alloc_chunk(extent_root, space_info, force))
4230                         ret = -ENOSPC;
4231                 else
4232                         ret = 0;
4233                 spin_unlock(&space_info->lock);
4234                 return ret;
4235         }
4236
4237         if (!should_alloc_chunk(extent_root, space_info, force)) {
4238                 spin_unlock(&space_info->lock);
4239                 return 0;
4240         } else if (space_info->chunk_alloc) {
4241                 wait_for_alloc = 1;
4242         } else {
4243                 space_info->chunk_alloc = 1;
4244         }
4245
4246         spin_unlock(&space_info->lock);
4247
4248         mutex_lock(&fs_info->chunk_mutex);
4249
4250         /*
4251          * The chunk_mutex is held throughout the entirety of a chunk
4252          * allocation, so once we've acquired the chunk_mutex we know that the
4253          * other guy is done and we need to recheck and see if we should
4254          * allocate.
4255          */
4256         if (wait_for_alloc) {
4257                 mutex_unlock(&fs_info->chunk_mutex);
4258                 wait_for_alloc = 0;
4259                 goto again;
4260         }
4261
4262         trans->allocating_chunk = true;
4263
4264         /*
4265          * If we have mixed data/metadata chunks we want to make sure we keep
4266          * allocating mixed chunks instead of individual chunks.
4267          */
4268         if (btrfs_mixed_space_info(space_info))
4269                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4270
4271         /*
4272          * if we're doing a data chunk, go ahead and make sure that
4273          * we keep a reasonable number of metadata chunks allocated in the
4274          * FS as well.
4275          */
4276         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4277                 fs_info->data_chunk_allocations++;
4278                 if (!(fs_info->data_chunk_allocations %
4279                       fs_info->metadata_ratio))
4280                         force_metadata_allocation(fs_info);
4281         }
4282
4283         /*
4284          * Check if we have enough space in SYSTEM chunk because we may need
4285          * to update devices.
4286          */
4287         check_system_chunk(trans, extent_root, flags);
4288
4289         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4290         trans->allocating_chunk = false;
4291
4292         spin_lock(&space_info->lock);
4293         if (ret < 0 && ret != -ENOSPC)
4294                 goto out;
4295         if (ret)
4296                 space_info->full = 1;
4297         else
4298                 ret = 1;
4299
4300         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4301 out:
4302         space_info->chunk_alloc = 0;
4303         spin_unlock(&space_info->lock);
4304         mutex_unlock(&fs_info->chunk_mutex);
4305         /*
4306          * When we allocate a new chunk we reserve space in the chunk block
4307          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4308          * add new nodes/leafs to it if we end up needing to do it when
4309          * inserting the chunk item and updating device items as part of the
4310          * second phase of chunk allocation, performed by
4311          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4312          * large number of new block groups to create in our transaction
4313          * handle's new_bgs list to avoid exhausting the chunk block reserve
4314          * in extreme cases - like having a single transaction create many new
4315          * block groups when starting to write out the free space caches of all
4316          * the block groups that were made dirty during the lifetime of the
4317          * transaction.
4318          */
4319         if (trans->can_flush_pending_bgs &&
4320             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4321                 btrfs_create_pending_block_groups(trans, trans->root);
4322                 btrfs_trans_release_chunk_metadata(trans);
4323         }
4324         return ret;
4325 }
4326
4327 static int can_overcommit(struct btrfs_root *root,
4328                           struct btrfs_space_info *space_info, u64 bytes,
4329                           enum btrfs_reserve_flush_enum flush)
4330 {
4331         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4332         u64 profile = btrfs_get_alloc_profile(root, 0);
4333         u64 space_size;
4334         u64 avail;
4335         u64 used;
4336
4337         used = space_info->bytes_used + space_info->bytes_reserved +
4338                 space_info->bytes_pinned + space_info->bytes_readonly;
4339
4340         /*
4341          * We only want to allow over committing if we have lots of actual space
4342          * free, but if we don't have enough space to handle the global reserve
4343          * space then we could end up having a real enospc problem when trying
4344          * to allocate a chunk or some other such important allocation.
4345          */
4346         spin_lock(&global_rsv->lock);
4347         space_size = calc_global_rsv_need_space(global_rsv);
4348         spin_unlock(&global_rsv->lock);
4349         if (used + space_size >= space_info->total_bytes)
4350                 return 0;
4351
4352         used += space_info->bytes_may_use;
4353
4354         spin_lock(&root->fs_info->free_chunk_lock);
4355         avail = root->fs_info->free_chunk_space;
4356         spin_unlock(&root->fs_info->free_chunk_lock);
4357
4358         /*
4359          * If we have dup, raid1 or raid10 then only half of the free
4360          * space is actually useable.  For raid56, the space info used
4361          * doesn't include the parity drive, so we don't have to
4362          * change the math
4363          */
4364         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4365                        BTRFS_BLOCK_GROUP_RAID1 |
4366                        BTRFS_BLOCK_GROUP_RAID10))
4367                 avail >>= 1;
4368
4369         /*
4370          * If we aren't flushing all things, let us overcommit up to
4371          * 1/2th of the space. If we can flush, don't let us overcommit
4372          * too much, let it overcommit up to 1/8 of the space.
4373          */
4374         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4375                 avail >>= 3;
4376         else
4377                 avail >>= 1;
4378
4379         if (used + bytes < space_info->total_bytes + avail)
4380                 return 1;
4381         return 0;
4382 }
4383
4384 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4385                                          unsigned long nr_pages, int nr_items)
4386 {
4387         struct super_block *sb = root->fs_info->sb;
4388
4389         if (down_read_trylock(&sb->s_umount)) {
4390                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4391                 up_read(&sb->s_umount);
4392         } else {
4393                 /*
4394                  * We needn't worry the filesystem going from r/w to r/o though
4395                  * we don't acquire ->s_umount mutex, because the filesystem
4396                  * should guarantee the delalloc inodes list be empty after
4397                  * the filesystem is readonly(all dirty pages are written to
4398                  * the disk).
4399                  */
4400                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4401                 if (!current->journal_info)
4402                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4403         }
4404 }
4405
4406 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4407 {
4408         u64 bytes;
4409         int nr;
4410
4411         bytes = btrfs_calc_trans_metadata_size(root, 1);
4412         nr = (int)div64_u64(to_reclaim, bytes);
4413         if (!nr)
4414                 nr = 1;
4415         return nr;
4416 }
4417
4418 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4419
4420 /*
4421  * shrink metadata reservation for delalloc
4422  */
4423 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4424                             bool wait_ordered)
4425 {
4426         struct btrfs_block_rsv *block_rsv;
4427         struct btrfs_space_info *space_info;
4428         struct btrfs_trans_handle *trans;
4429         u64 delalloc_bytes;
4430         u64 max_reclaim;
4431         long time_left;
4432         unsigned long nr_pages;
4433         int loops;
4434         int items;
4435         enum btrfs_reserve_flush_enum flush;
4436
4437         /* Calc the number of the pages we need flush for space reservation */
4438         items = calc_reclaim_items_nr(root, to_reclaim);
4439         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4440
4441         trans = (struct btrfs_trans_handle *)current->journal_info;
4442         block_rsv = &root->fs_info->delalloc_block_rsv;
4443         space_info = block_rsv->space_info;
4444
4445         delalloc_bytes = percpu_counter_sum_positive(
4446                                                 &root->fs_info->delalloc_bytes);
4447         if (delalloc_bytes == 0) {
4448                 if (trans)
4449                         return;
4450                 if (wait_ordered)
4451                         btrfs_wait_ordered_roots(root->fs_info, items);
4452                 return;
4453         }
4454
4455         loops = 0;
4456         while (delalloc_bytes && loops < 3) {
4457                 max_reclaim = min(delalloc_bytes, to_reclaim);
4458                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4459                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4460                 /*
4461                  * We need to wait for the async pages to actually start before
4462                  * we do anything.
4463                  */
4464                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4465                 if (!max_reclaim)
4466                         goto skip_async;
4467
4468                 if (max_reclaim <= nr_pages)
4469                         max_reclaim = 0;
4470                 else
4471                         max_reclaim -= nr_pages;
4472
4473                 wait_event(root->fs_info->async_submit_wait,
4474                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4475                            (int)max_reclaim);
4476 skip_async:
4477                 if (!trans)
4478                         flush = BTRFS_RESERVE_FLUSH_ALL;
4479                 else
4480                         flush = BTRFS_RESERVE_NO_FLUSH;
4481                 spin_lock(&space_info->lock);
4482                 if (can_overcommit(root, space_info, orig, flush)) {
4483                         spin_unlock(&space_info->lock);
4484                         break;
4485                 }
4486                 spin_unlock(&space_info->lock);
4487
4488                 loops++;
4489                 if (wait_ordered && !trans) {
4490                         btrfs_wait_ordered_roots(root->fs_info, items);
4491                 } else {
4492                         time_left = schedule_timeout_killable(1);
4493                         if (time_left)
4494                                 break;
4495                 }
4496                 delalloc_bytes = percpu_counter_sum_positive(
4497                                                 &root->fs_info->delalloc_bytes);
4498         }
4499 }
4500
4501 /**
4502  * maybe_commit_transaction - possibly commit the transaction if its ok to
4503  * @root - the root we're allocating for
4504  * @bytes - the number of bytes we want to reserve
4505  * @force - force the commit
4506  *
4507  * This will check to make sure that committing the transaction will actually
4508  * get us somewhere and then commit the transaction if it does.  Otherwise it
4509  * will return -ENOSPC.
4510  */
4511 static int may_commit_transaction(struct btrfs_root *root,
4512                                   struct btrfs_space_info *space_info,
4513                                   u64 bytes, int force)
4514 {
4515         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4516         struct btrfs_trans_handle *trans;
4517
4518         trans = (struct btrfs_trans_handle *)current->journal_info;
4519         if (trans)
4520                 return -EAGAIN;
4521
4522         if (force)
4523                 goto commit;
4524
4525         /* See if there is enough pinned space to make this reservation */
4526         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4527                                    bytes) >= 0)
4528                 goto commit;
4529
4530         /*
4531          * See if there is some space in the delayed insertion reservation for
4532          * this reservation.
4533          */
4534         if (space_info != delayed_rsv->space_info)
4535                 return -ENOSPC;
4536
4537         spin_lock(&delayed_rsv->lock);
4538         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4539                                    bytes - delayed_rsv->size) >= 0) {
4540                 spin_unlock(&delayed_rsv->lock);
4541                 return -ENOSPC;
4542         }
4543         spin_unlock(&delayed_rsv->lock);
4544
4545 commit:
4546         trans = btrfs_join_transaction(root);
4547         if (IS_ERR(trans))
4548                 return -ENOSPC;
4549
4550         return btrfs_commit_transaction(trans, root);
4551 }
4552
4553 enum flush_state {
4554         FLUSH_DELAYED_ITEMS_NR  =       1,
4555         FLUSH_DELAYED_ITEMS     =       2,
4556         FLUSH_DELALLOC          =       3,
4557         FLUSH_DELALLOC_WAIT     =       4,
4558         ALLOC_CHUNK             =       5,
4559         COMMIT_TRANS            =       6,
4560 };
4561
4562 static int flush_space(struct btrfs_root *root,
4563                        struct btrfs_space_info *space_info, u64 num_bytes,
4564                        u64 orig_bytes, int state)
4565 {
4566         struct btrfs_trans_handle *trans;
4567         int nr;
4568         int ret = 0;
4569
4570         switch (state) {
4571         case FLUSH_DELAYED_ITEMS_NR:
4572         case FLUSH_DELAYED_ITEMS:
4573                 if (state == FLUSH_DELAYED_ITEMS_NR)
4574                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4575                 else
4576                         nr = -1;
4577
4578                 trans = btrfs_join_transaction(root);
4579                 if (IS_ERR(trans)) {
4580                         ret = PTR_ERR(trans);
4581                         break;
4582                 }
4583                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4584                 btrfs_end_transaction(trans, root);
4585                 break;
4586         case FLUSH_DELALLOC:
4587         case FLUSH_DELALLOC_WAIT:
4588                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4589                                 state == FLUSH_DELALLOC_WAIT);
4590                 break;
4591         case ALLOC_CHUNK:
4592                 trans = btrfs_join_transaction(root);
4593                 if (IS_ERR(trans)) {
4594                         ret = PTR_ERR(trans);
4595                         break;
4596                 }
4597                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4598                                      btrfs_get_alloc_profile(root, 0),
4599                                      CHUNK_ALLOC_NO_FORCE);
4600                 btrfs_end_transaction(trans, root);
4601                 if (ret == -ENOSPC)
4602                         ret = 0;
4603                 break;
4604         case COMMIT_TRANS:
4605                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4606                 break;
4607         default:
4608                 ret = -ENOSPC;
4609                 break;
4610         }
4611
4612         return ret;
4613 }
4614
4615 static inline u64
4616 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4617                                  struct btrfs_space_info *space_info)
4618 {
4619         u64 used;
4620         u64 expected;
4621         u64 to_reclaim;
4622
4623         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4624                                 16 * 1024 * 1024);
4625         spin_lock(&space_info->lock);
4626         if (can_overcommit(root, space_info, to_reclaim,
4627                            BTRFS_RESERVE_FLUSH_ALL)) {
4628                 to_reclaim = 0;
4629                 goto out;
4630         }
4631
4632         used = space_info->bytes_used + space_info->bytes_reserved +
4633                space_info->bytes_pinned + space_info->bytes_readonly +
4634                space_info->bytes_may_use;
4635         if (can_overcommit(root, space_info, 1024 * 1024,
4636                            BTRFS_RESERVE_FLUSH_ALL))
4637                 expected = div_factor_fine(space_info->total_bytes, 95);
4638         else
4639                 expected = div_factor_fine(space_info->total_bytes, 90);
4640
4641         if (used > expected)
4642                 to_reclaim = used - expected;
4643         else
4644                 to_reclaim = 0;
4645         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4646                                      space_info->bytes_reserved);
4647 out:
4648         spin_unlock(&space_info->lock);
4649
4650         return to_reclaim;
4651 }
4652
4653 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4654                                         struct btrfs_fs_info *fs_info, u64 used)
4655 {
4656         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4657
4658         /* If we're just plain full then async reclaim just slows us down. */
4659         if (space_info->bytes_used >= thresh)
4660                 return 0;
4661
4662         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4663                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4664 }
4665
4666 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4667                                        struct btrfs_fs_info *fs_info,
4668                                        int flush_state)
4669 {
4670         u64 used;
4671
4672         spin_lock(&space_info->lock);
4673         /*
4674          * We run out of space and have not got any free space via flush_space,
4675          * so don't bother doing async reclaim.
4676          */
4677         if (flush_state > COMMIT_TRANS && space_info->full) {
4678                 spin_unlock(&space_info->lock);
4679                 return 0;
4680         }
4681
4682         used = space_info->bytes_used + space_info->bytes_reserved +
4683                space_info->bytes_pinned + space_info->bytes_readonly +
4684                space_info->bytes_may_use;
4685         if (need_do_async_reclaim(space_info, fs_info, used)) {
4686                 spin_unlock(&space_info->lock);
4687                 return 1;
4688         }
4689         spin_unlock(&space_info->lock);
4690
4691         return 0;
4692 }
4693
4694 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4695 {
4696         struct btrfs_fs_info *fs_info;
4697         struct btrfs_space_info *space_info;
4698         u64 to_reclaim;
4699         int flush_state;
4700
4701         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4702         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4703
4704         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4705                                                       space_info);
4706         if (!to_reclaim)
4707                 return;
4708
4709         flush_state = FLUSH_DELAYED_ITEMS_NR;
4710         do {
4711                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4712                             to_reclaim, flush_state);
4713                 flush_state++;
4714                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4715                                                  flush_state))
4716                         return;
4717         } while (flush_state < COMMIT_TRANS);
4718 }
4719
4720 void btrfs_init_async_reclaim_work(struct work_struct *work)
4721 {
4722         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4723 }
4724
4725 /**
4726  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4727  * @root - the root we're allocating for
4728  * @block_rsv - the block_rsv we're allocating for
4729  * @orig_bytes - the number of bytes we want
4730  * @flush - whether or not we can flush to make our reservation
4731  *
4732  * This will reserve orgi_bytes number of bytes from the space info associated
4733  * with the block_rsv.  If there is not enough space it will make an attempt to
4734  * flush out space to make room.  It will do this by flushing delalloc if
4735  * possible or committing the transaction.  If flush is 0 then no attempts to
4736  * regain reservations will be made and this will fail if there is not enough
4737  * space already.
4738  */
4739 static int reserve_metadata_bytes(struct btrfs_root *root,
4740                                   struct btrfs_block_rsv *block_rsv,
4741                                   u64 orig_bytes,
4742                                   enum btrfs_reserve_flush_enum flush)
4743 {
4744         struct btrfs_space_info *space_info = block_rsv->space_info;
4745         u64 used;
4746         u64 num_bytes = orig_bytes;
4747         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4748         int ret = 0;
4749         bool flushing = false;
4750
4751 again:
4752         ret = 0;
4753         spin_lock(&space_info->lock);
4754         /*
4755          * We only want to wait if somebody other than us is flushing and we
4756          * are actually allowed to flush all things.
4757          */
4758         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4759                space_info->flush) {
4760                 spin_unlock(&space_info->lock);
4761                 /*
4762                  * If we have a trans handle we can't wait because the flusher
4763                  * may have to commit the transaction, which would mean we would
4764                  * deadlock since we are waiting for the flusher to finish, but
4765                  * hold the current transaction open.
4766                  */
4767                 if (current->journal_info)
4768                         return -EAGAIN;
4769                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4770                 /* Must have been killed, return */
4771                 if (ret)
4772                         return -EINTR;
4773
4774                 spin_lock(&space_info->lock);
4775         }
4776
4777         ret = -ENOSPC;
4778         used = space_info->bytes_used + space_info->bytes_reserved +
4779                 space_info->bytes_pinned + space_info->bytes_readonly +
4780                 space_info->bytes_may_use;
4781
4782         /*
4783          * The idea here is that we've not already over-reserved the block group
4784          * then we can go ahead and save our reservation first and then start
4785          * flushing if we need to.  Otherwise if we've already overcommitted
4786          * lets start flushing stuff first and then come back and try to make
4787          * our reservation.
4788          */
4789         if (used <= space_info->total_bytes) {
4790                 if (used + orig_bytes <= space_info->total_bytes) {
4791                         space_info->bytes_may_use += orig_bytes;
4792                         trace_btrfs_space_reservation(root->fs_info,
4793                                 "space_info", space_info->flags, orig_bytes, 1);
4794                         ret = 0;
4795                 } else {
4796                         /*
4797                          * Ok set num_bytes to orig_bytes since we aren't
4798                          * overocmmitted, this way we only try and reclaim what
4799                          * we need.
4800                          */
4801                         num_bytes = orig_bytes;
4802                 }
4803         } else {
4804                 /*
4805                  * Ok we're over committed, set num_bytes to the overcommitted
4806                  * amount plus the amount of bytes that we need for this
4807                  * reservation.
4808                  */
4809                 num_bytes = used - space_info->total_bytes +
4810                         (orig_bytes * 2);
4811         }
4812
4813         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4814                 space_info->bytes_may_use += orig_bytes;
4815                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4816                                               space_info->flags, orig_bytes,
4817                                               1);
4818                 ret = 0;
4819         }
4820
4821         /*
4822          * Couldn't make our reservation, save our place so while we're trying
4823          * to reclaim space we can actually use it instead of somebody else
4824          * stealing it from us.
4825          *
4826          * We make the other tasks wait for the flush only when we can flush
4827          * all things.
4828          */
4829         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4830                 flushing = true;
4831                 space_info->flush = 1;
4832         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4833                 used += orig_bytes;
4834                 /*
4835                  * We will do the space reservation dance during log replay,
4836                  * which means we won't have fs_info->fs_root set, so don't do
4837                  * the async reclaim as we will panic.
4838                  */
4839                 if (!root->fs_info->log_root_recovering &&
4840                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4841                     !work_busy(&root->fs_info->async_reclaim_work))
4842                         queue_work(system_unbound_wq,
4843                                    &root->fs_info->async_reclaim_work);
4844         }
4845         spin_unlock(&space_info->lock);
4846
4847         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4848                 goto out;
4849
4850         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4851                           flush_state);
4852         flush_state++;
4853
4854         /*
4855          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4856          * would happen. So skip delalloc flush.
4857          */
4858         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4859             (flush_state == FLUSH_DELALLOC ||
4860              flush_state == FLUSH_DELALLOC_WAIT))
4861                 flush_state = ALLOC_CHUNK;
4862
4863         if (!ret)
4864                 goto again;
4865         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4866                  flush_state < COMMIT_TRANS)
4867                 goto again;
4868         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4869                  flush_state <= COMMIT_TRANS)
4870                 goto again;
4871
4872 out:
4873         if (ret == -ENOSPC &&
4874             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4875                 struct btrfs_block_rsv *global_rsv =
4876                         &root->fs_info->global_block_rsv;
4877
4878                 if (block_rsv != global_rsv &&
4879                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4880                         ret = 0;
4881         }
4882         if (ret == -ENOSPC)
4883                 trace_btrfs_space_reservation(root->fs_info,
4884                                               "space_info:enospc",
4885                                               space_info->flags, orig_bytes, 1);
4886         if (flushing) {
4887                 spin_lock(&space_info->lock);
4888                 space_info->flush = 0;
4889                 wake_up_all(&space_info->wait);
4890                 spin_unlock(&space_info->lock);
4891         }
4892         return ret;
4893 }
4894
4895 static struct btrfs_block_rsv *get_block_rsv(
4896                                         const struct btrfs_trans_handle *trans,
4897                                         const struct btrfs_root *root)
4898 {
4899         struct btrfs_block_rsv *block_rsv = NULL;
4900
4901         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4902                 block_rsv = trans->block_rsv;
4903
4904         if (root == root->fs_info->csum_root && trans->adding_csums)
4905                 block_rsv = trans->block_rsv;
4906
4907         if (root == root->fs_info->uuid_root)
4908                 block_rsv = trans->block_rsv;
4909
4910         if (!block_rsv)
4911                 block_rsv = root->block_rsv;
4912
4913         if (!block_rsv)
4914                 block_rsv = &root->fs_info->empty_block_rsv;
4915
4916         return block_rsv;
4917 }
4918
4919 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4920                                u64 num_bytes)
4921 {
4922         int ret = -ENOSPC;
4923         spin_lock(&block_rsv->lock);
4924         if (block_rsv->reserved >= num_bytes) {
4925                 block_rsv->reserved -= num_bytes;
4926                 if (block_rsv->reserved < block_rsv->size)
4927                         block_rsv->full = 0;
4928                 ret = 0;
4929         }
4930         spin_unlock(&block_rsv->lock);
4931         return ret;
4932 }
4933
4934 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4935                                 u64 num_bytes, int update_size)
4936 {
4937         spin_lock(&block_rsv->lock);
4938         block_rsv->reserved += num_bytes;
4939         if (update_size)
4940                 block_rsv->size += num_bytes;
4941         else if (block_rsv->reserved >= block_rsv->size)
4942                 block_rsv->full = 1;
4943         spin_unlock(&block_rsv->lock);
4944 }
4945
4946 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4947                              struct btrfs_block_rsv *dest, u64 num_bytes,
4948                              int min_factor)
4949 {
4950         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4951         u64 min_bytes;
4952
4953         if (global_rsv->space_info != dest->space_info)
4954                 return -ENOSPC;
4955
4956         spin_lock(&global_rsv->lock);
4957         min_bytes = div_factor(global_rsv->size, min_factor);
4958         if (global_rsv->reserved < min_bytes + num_bytes) {
4959                 spin_unlock(&global_rsv->lock);
4960                 return -ENOSPC;
4961         }
4962         global_rsv->reserved -= num_bytes;
4963         if (global_rsv->reserved < global_rsv->size)
4964                 global_rsv->full = 0;
4965         spin_unlock(&global_rsv->lock);
4966
4967         block_rsv_add_bytes(dest, num_bytes, 1);
4968         return 0;
4969 }
4970
4971 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4972                                     struct btrfs_block_rsv *block_rsv,
4973                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4974 {
4975         struct btrfs_space_info *space_info = block_rsv->space_info;
4976
4977         spin_lock(&block_rsv->lock);
4978         if (num_bytes == (u64)-1)
4979                 num_bytes = block_rsv->size;
4980         block_rsv->size -= num_bytes;
4981         if (block_rsv->reserved >= block_rsv->size) {
4982                 num_bytes = block_rsv->reserved - block_rsv->size;
4983                 block_rsv->reserved = block_rsv->size;
4984                 block_rsv->full = 1;
4985         } else {
4986                 num_bytes = 0;
4987         }
4988         spin_unlock(&block_rsv->lock);
4989
4990         if (num_bytes > 0) {
4991                 if (dest) {
4992                         spin_lock(&dest->lock);
4993                         if (!dest->full) {
4994                                 u64 bytes_to_add;
4995
4996                                 bytes_to_add = dest->size - dest->reserved;
4997                                 bytes_to_add = min(num_bytes, bytes_to_add);
4998                                 dest->reserved += bytes_to_add;
4999                                 if (dest->reserved >= dest->size)
5000                                         dest->full = 1;
5001                                 num_bytes -= bytes_to_add;
5002                         }
5003                         spin_unlock(&dest->lock);
5004                 }
5005                 if (num_bytes) {
5006                         spin_lock(&space_info->lock);
5007                         space_info->bytes_may_use -= num_bytes;
5008                         trace_btrfs_space_reservation(fs_info, "space_info",
5009                                         space_info->flags, num_bytes, 0);
5010                         spin_unlock(&space_info->lock);
5011                 }
5012         }
5013 }
5014
5015 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5016                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5017 {
5018         int ret;
5019
5020         ret = block_rsv_use_bytes(src, num_bytes);
5021         if (ret)
5022                 return ret;
5023
5024         block_rsv_add_bytes(dst, num_bytes, 1);
5025         return 0;
5026 }
5027
5028 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5029 {
5030         memset(rsv, 0, sizeof(*rsv));
5031         spin_lock_init(&rsv->lock);
5032         rsv->type = type;
5033 }
5034
5035 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5036                                               unsigned short type)
5037 {
5038         struct btrfs_block_rsv *block_rsv;
5039         struct btrfs_fs_info *fs_info = root->fs_info;
5040
5041         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5042         if (!block_rsv)
5043                 return NULL;
5044
5045         btrfs_init_block_rsv(block_rsv, type);
5046         block_rsv->space_info = __find_space_info(fs_info,
5047                                                   BTRFS_BLOCK_GROUP_METADATA);
5048         return block_rsv;
5049 }
5050
5051 void btrfs_free_block_rsv(struct btrfs_root *root,
5052                           struct btrfs_block_rsv *rsv)
5053 {
5054         if (!rsv)
5055                 return;
5056         btrfs_block_rsv_release(root, rsv, (u64)-1);
5057         kfree(rsv);
5058 }
5059
5060 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5061 {
5062         kfree(rsv);
5063 }
5064
5065 int btrfs_block_rsv_add(struct btrfs_root *root,
5066                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5067                         enum btrfs_reserve_flush_enum flush)
5068 {
5069         int ret;
5070
5071         if (num_bytes == 0)
5072                 return 0;
5073
5074         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5075         if (!ret) {
5076                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5077                 return 0;
5078         }
5079
5080         return ret;
5081 }
5082
5083 int btrfs_block_rsv_check(struct btrfs_root *root,
5084                           struct btrfs_block_rsv *block_rsv, int min_factor)
5085 {
5086         u64 num_bytes = 0;
5087         int ret = -ENOSPC;
5088
5089         if (!block_rsv)
5090                 return 0;
5091
5092         spin_lock(&block_rsv->lock);
5093         num_bytes = div_factor(block_rsv->size, min_factor);
5094         if (block_rsv->reserved >= num_bytes)
5095                 ret = 0;
5096         spin_unlock(&block_rsv->lock);
5097
5098         return ret;
5099 }
5100
5101 int btrfs_block_rsv_refill(struct btrfs_root *root,
5102                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5103                            enum btrfs_reserve_flush_enum flush)
5104 {
5105         u64 num_bytes = 0;
5106         int ret = -ENOSPC;
5107
5108         if (!block_rsv)
5109                 return 0;
5110
5111         spin_lock(&block_rsv->lock);
5112         num_bytes = min_reserved;
5113         if (block_rsv->reserved >= num_bytes)
5114                 ret = 0;
5115         else
5116                 num_bytes -= block_rsv->reserved;
5117         spin_unlock(&block_rsv->lock);
5118
5119         if (!ret)
5120                 return 0;
5121
5122         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5123         if (!ret) {
5124                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5125                 return 0;
5126         }
5127
5128         return ret;
5129 }
5130
5131 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5132                             struct btrfs_block_rsv *dst_rsv,
5133                             u64 num_bytes)
5134 {
5135         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5136 }
5137
5138 void btrfs_block_rsv_release(struct btrfs_root *root,
5139                              struct btrfs_block_rsv *block_rsv,
5140                              u64 num_bytes)
5141 {
5142         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5143         if (global_rsv == block_rsv ||
5144             block_rsv->space_info != global_rsv->space_info)
5145                 global_rsv = NULL;
5146         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5147                                 num_bytes);
5148 }
5149
5150 /*
5151  * helper to calculate size of global block reservation.
5152  * the desired value is sum of space used by extent tree,
5153  * checksum tree and root tree
5154  */
5155 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5156 {
5157         struct btrfs_space_info *sinfo;
5158         u64 num_bytes;
5159         u64 meta_used;
5160         u64 data_used;
5161         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5162
5163         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5164         spin_lock(&sinfo->lock);
5165         data_used = sinfo->bytes_used;
5166         spin_unlock(&sinfo->lock);
5167
5168         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5169         spin_lock(&sinfo->lock);
5170         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5171                 data_used = 0;
5172         meta_used = sinfo->bytes_used;
5173         spin_unlock(&sinfo->lock);
5174
5175         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5176                     csum_size * 2;
5177         num_bytes += div_u64(data_used + meta_used, 50);
5178
5179         if (num_bytes * 3 > meta_used)
5180                 num_bytes = div_u64(meta_used, 3);
5181
5182         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5183 }
5184
5185 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5186 {
5187         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5188         struct btrfs_space_info *sinfo = block_rsv->space_info;
5189         u64 num_bytes;
5190
5191         num_bytes = calc_global_metadata_size(fs_info);
5192
5193         spin_lock(&sinfo->lock);
5194         spin_lock(&block_rsv->lock);
5195
5196         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5197
5198         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5199                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5200                     sinfo->bytes_may_use;
5201
5202         if (sinfo->total_bytes > num_bytes) {
5203                 num_bytes = sinfo->total_bytes - num_bytes;
5204                 block_rsv->reserved += num_bytes;
5205                 sinfo->bytes_may_use += num_bytes;
5206                 trace_btrfs_space_reservation(fs_info, "space_info",
5207                                       sinfo->flags, num_bytes, 1);
5208         }
5209
5210         if (block_rsv->reserved >= block_rsv->size) {
5211                 num_bytes = block_rsv->reserved - block_rsv->size;
5212                 sinfo->bytes_may_use -= num_bytes;
5213                 trace_btrfs_space_reservation(fs_info, "space_info",
5214                                       sinfo->flags, num_bytes, 0);
5215                 block_rsv->reserved = block_rsv->size;
5216                 block_rsv->full = 1;
5217         }
5218
5219         spin_unlock(&block_rsv->lock);
5220         spin_unlock(&sinfo->lock);
5221 }
5222
5223 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5224 {
5225         struct btrfs_space_info *space_info;
5226
5227         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5228         fs_info->chunk_block_rsv.space_info = space_info;
5229
5230         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5231         fs_info->global_block_rsv.space_info = space_info;
5232         fs_info->delalloc_block_rsv.space_info = space_info;
5233         fs_info->trans_block_rsv.space_info = space_info;
5234         fs_info->empty_block_rsv.space_info = space_info;
5235         fs_info->delayed_block_rsv.space_info = space_info;
5236
5237         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5238         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5239         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5240         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5241         if (fs_info->quota_root)
5242                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5243         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5244
5245         update_global_block_rsv(fs_info);
5246 }
5247
5248 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5249 {
5250         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5251                                 (u64)-1);
5252         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5253         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5254         WARN_ON(fs_info->trans_block_rsv.size > 0);
5255         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5256         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5257         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5258         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5259         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5260 }
5261
5262 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5263                                   struct btrfs_root *root)
5264 {
5265         if (!trans->block_rsv)
5266                 return;
5267
5268         if (!trans->bytes_reserved)
5269                 return;
5270
5271         trace_btrfs_space_reservation(root->fs_info, "transaction",
5272                                       trans->transid, trans->bytes_reserved, 0);
5273         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5274         trans->bytes_reserved = 0;
5275 }
5276
5277 /*
5278  * To be called after all the new block groups attached to the transaction
5279  * handle have been created (btrfs_create_pending_block_groups()).
5280  */
5281 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5282 {
5283         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5284
5285         if (!trans->chunk_bytes_reserved)
5286                 return;
5287
5288         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5289
5290         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5291                                 trans->chunk_bytes_reserved);
5292         trans->chunk_bytes_reserved = 0;
5293 }
5294
5295 /* Can only return 0 or -ENOSPC */
5296 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5297                                   struct inode *inode)
5298 {
5299         struct btrfs_root *root = BTRFS_I(inode)->root;
5300         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5301         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5302
5303         /*
5304          * We need to hold space in order to delete our orphan item once we've
5305          * added it, so this takes the reservation so we can release it later
5306          * when we are truly done with the orphan item.
5307          */
5308         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5309         trace_btrfs_space_reservation(root->fs_info, "orphan",
5310                                       btrfs_ino(inode), num_bytes, 1);
5311         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5312 }
5313
5314 void btrfs_orphan_release_metadata(struct inode *inode)
5315 {
5316         struct btrfs_root *root = BTRFS_I(inode)->root;
5317         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5318         trace_btrfs_space_reservation(root->fs_info, "orphan",
5319                                       btrfs_ino(inode), num_bytes, 0);
5320         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5321 }
5322
5323 /*
5324  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5325  * root: the root of the parent directory
5326  * rsv: block reservation
5327  * items: the number of items that we need do reservation
5328  * qgroup_reserved: used to return the reserved size in qgroup
5329  *
5330  * This function is used to reserve the space for snapshot/subvolume
5331  * creation and deletion. Those operations are different with the
5332  * common file/directory operations, they change two fs/file trees
5333  * and root tree, the number of items that the qgroup reserves is
5334  * different with the free space reservation. So we can not use
5335  * the space reseravtion mechanism in start_transaction().
5336  */
5337 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5338                                      struct btrfs_block_rsv *rsv,
5339                                      int items,
5340                                      u64 *qgroup_reserved,
5341                                      bool use_global_rsv)
5342 {
5343         u64 num_bytes;
5344         int ret;
5345         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5346
5347         if (root->fs_info->quota_enabled) {
5348                 /* One for parent inode, two for dir entries */
5349                 num_bytes = 3 * root->nodesize;
5350                 ret = btrfs_qgroup_reserve(root, num_bytes);
5351                 if (ret)
5352                         return ret;
5353         } else {
5354                 num_bytes = 0;
5355         }
5356
5357         *qgroup_reserved = num_bytes;
5358
5359         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5360         rsv->space_info = __find_space_info(root->fs_info,
5361                                             BTRFS_BLOCK_GROUP_METADATA);
5362         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5363                                   BTRFS_RESERVE_FLUSH_ALL);
5364
5365         if (ret == -ENOSPC && use_global_rsv)
5366                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5367
5368         if (ret) {
5369                 if (*qgroup_reserved)
5370                         btrfs_qgroup_free(root, *qgroup_reserved);
5371         }
5372
5373         return ret;
5374 }
5375
5376 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5377                                       struct btrfs_block_rsv *rsv,
5378                                       u64 qgroup_reserved)
5379 {
5380         btrfs_block_rsv_release(root, rsv, (u64)-1);
5381 }
5382
5383 /**
5384  * drop_outstanding_extent - drop an outstanding extent
5385  * @inode: the inode we're dropping the extent for
5386  * @num_bytes: the number of bytes we're relaseing.
5387  *
5388  * This is called when we are freeing up an outstanding extent, either called
5389  * after an error or after an extent is written.  This will return the number of
5390  * reserved extents that need to be freed.  This must be called with
5391  * BTRFS_I(inode)->lock held.
5392  */
5393 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5394 {
5395         unsigned drop_inode_space = 0;
5396         unsigned dropped_extents = 0;
5397         unsigned num_extents = 0;
5398
5399         num_extents = (unsigned)div64_u64(num_bytes +
5400                                           BTRFS_MAX_EXTENT_SIZE - 1,
5401                                           BTRFS_MAX_EXTENT_SIZE);
5402         ASSERT(num_extents);
5403         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5404         BTRFS_I(inode)->outstanding_extents -= num_extents;
5405
5406         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5407             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5408                                &BTRFS_I(inode)->runtime_flags))
5409                 drop_inode_space = 1;
5410
5411         /*
5412          * If we have more or the same amount of outsanding extents than we have
5413          * reserved then we need to leave the reserved extents count alone.
5414          */
5415         if (BTRFS_I(inode)->outstanding_extents >=
5416             BTRFS_I(inode)->reserved_extents)
5417                 return drop_inode_space;
5418
5419         dropped_extents = BTRFS_I(inode)->reserved_extents -
5420                 BTRFS_I(inode)->outstanding_extents;
5421         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5422         return dropped_extents + drop_inode_space;
5423 }
5424
5425 /**
5426  * calc_csum_metadata_size - return the amount of metada space that must be
5427  *      reserved/free'd for the given bytes.
5428  * @inode: the inode we're manipulating
5429  * @num_bytes: the number of bytes in question
5430  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5431  *
5432  * This adjusts the number of csum_bytes in the inode and then returns the
5433  * correct amount of metadata that must either be reserved or freed.  We
5434  * calculate how many checksums we can fit into one leaf and then divide the
5435  * number of bytes that will need to be checksumed by this value to figure out
5436  * how many checksums will be required.  If we are adding bytes then the number
5437  * may go up and we will return the number of additional bytes that must be
5438  * reserved.  If it is going down we will return the number of bytes that must
5439  * be freed.
5440  *
5441  * This must be called with BTRFS_I(inode)->lock held.
5442  */
5443 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5444                                    int reserve)
5445 {
5446         struct btrfs_root *root = BTRFS_I(inode)->root;
5447         u64 old_csums, num_csums;
5448
5449         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5450             BTRFS_I(inode)->csum_bytes == 0)
5451                 return 0;
5452
5453         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5454         if (reserve)
5455                 BTRFS_I(inode)->csum_bytes += num_bytes;
5456         else
5457                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5458         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5459
5460         /* No change, no need to reserve more */
5461         if (old_csums == num_csums)
5462                 return 0;
5463
5464         if (reserve)
5465                 return btrfs_calc_trans_metadata_size(root,
5466                                                       num_csums - old_csums);
5467
5468         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5469 }
5470
5471 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5472 {
5473         struct btrfs_root *root = BTRFS_I(inode)->root;
5474         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5475         u64 to_reserve = 0;
5476         u64 csum_bytes;
5477         unsigned nr_extents = 0;
5478         int extra_reserve = 0;
5479         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5480         int ret = 0;
5481         bool delalloc_lock = true;
5482         u64 to_free = 0;
5483         unsigned dropped;
5484
5485         /* If we are a free space inode we need to not flush since we will be in
5486          * the middle of a transaction commit.  We also don't need the delalloc
5487          * mutex since we won't race with anybody.  We need this mostly to make
5488          * lockdep shut its filthy mouth.
5489          */
5490         if (btrfs_is_free_space_inode(inode)) {
5491                 flush = BTRFS_RESERVE_NO_FLUSH;
5492                 delalloc_lock = false;
5493         }
5494
5495         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5496             btrfs_transaction_in_commit(root->fs_info))
5497                 schedule_timeout(1);
5498
5499         if (delalloc_lock)
5500                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5501
5502         num_bytes = ALIGN(num_bytes, root->sectorsize);
5503
5504         spin_lock(&BTRFS_I(inode)->lock);
5505         nr_extents = (unsigned)div64_u64(num_bytes +
5506                                          BTRFS_MAX_EXTENT_SIZE - 1,
5507                                          BTRFS_MAX_EXTENT_SIZE);
5508         BTRFS_I(inode)->outstanding_extents += nr_extents;
5509         nr_extents = 0;
5510
5511         if (BTRFS_I(inode)->outstanding_extents >
5512             BTRFS_I(inode)->reserved_extents)
5513                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5514                         BTRFS_I(inode)->reserved_extents;
5515
5516         /*
5517          * Add an item to reserve for updating the inode when we complete the
5518          * delalloc io.
5519          */
5520         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5521                       &BTRFS_I(inode)->runtime_flags)) {
5522                 nr_extents++;
5523                 extra_reserve = 1;
5524         }
5525
5526         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5527         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5528         csum_bytes = BTRFS_I(inode)->csum_bytes;
5529         spin_unlock(&BTRFS_I(inode)->lock);
5530
5531         if (root->fs_info->quota_enabled) {
5532                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5533                 if (ret)
5534                         goto out_fail;
5535         }
5536
5537         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5538         if (unlikely(ret)) {
5539                 if (root->fs_info->quota_enabled)
5540                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5541                 goto out_fail;
5542         }
5543
5544         spin_lock(&BTRFS_I(inode)->lock);
5545         if (extra_reserve) {
5546                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5547                         &BTRFS_I(inode)->runtime_flags);
5548                 nr_extents--;
5549         }
5550         BTRFS_I(inode)->reserved_extents += nr_extents;
5551         spin_unlock(&BTRFS_I(inode)->lock);
5552
5553         if (delalloc_lock)
5554                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5555
5556         if (to_reserve)
5557                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5558                                               btrfs_ino(inode), to_reserve, 1);
5559         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5560
5561         return 0;
5562
5563 out_fail:
5564         spin_lock(&BTRFS_I(inode)->lock);
5565         dropped = drop_outstanding_extent(inode, num_bytes);
5566         /*
5567          * If the inodes csum_bytes is the same as the original
5568          * csum_bytes then we know we haven't raced with any free()ers
5569          * so we can just reduce our inodes csum bytes and carry on.
5570          */
5571         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5572                 calc_csum_metadata_size(inode, num_bytes, 0);
5573         } else {
5574                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5575                 u64 bytes;
5576
5577                 /*
5578                  * This is tricky, but first we need to figure out how much we
5579                  * free'd from any free-ers that occured during this
5580                  * reservation, so we reset ->csum_bytes to the csum_bytes
5581                  * before we dropped our lock, and then call the free for the
5582                  * number of bytes that were freed while we were trying our
5583                  * reservation.
5584                  */
5585                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5586                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5587                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5588
5589
5590                 /*
5591                  * Now we need to see how much we would have freed had we not
5592                  * been making this reservation and our ->csum_bytes were not
5593                  * artificially inflated.
5594                  */
5595                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5596                 bytes = csum_bytes - orig_csum_bytes;
5597                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5598
5599                 /*
5600                  * Now reset ->csum_bytes to what it should be.  If bytes is
5601                  * more than to_free then we would have free'd more space had we
5602                  * not had an artificially high ->csum_bytes, so we need to free
5603                  * the remainder.  If bytes is the same or less then we don't
5604                  * need to do anything, the other free-ers did the correct
5605                  * thing.
5606                  */
5607                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5608                 if (bytes > to_free)
5609                         to_free = bytes - to_free;
5610                 else
5611                         to_free = 0;
5612         }
5613         spin_unlock(&BTRFS_I(inode)->lock);
5614         if (dropped)
5615                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5616
5617         if (to_free) {
5618                 btrfs_block_rsv_release(root, block_rsv, to_free);
5619                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5620                                               btrfs_ino(inode), to_free, 0);
5621         }
5622         if (delalloc_lock)
5623                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5624         return ret;
5625 }
5626
5627 /**
5628  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5629  * @inode: the inode to release the reservation for
5630  * @num_bytes: the number of bytes we're releasing
5631  *
5632  * This will release the metadata reservation for an inode.  This can be called
5633  * once we complete IO for a given set of bytes to release their metadata
5634  * reservations.
5635  */
5636 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5637 {
5638         struct btrfs_root *root = BTRFS_I(inode)->root;
5639         u64 to_free = 0;
5640         unsigned dropped;
5641
5642         num_bytes = ALIGN(num_bytes, root->sectorsize);
5643         spin_lock(&BTRFS_I(inode)->lock);
5644         dropped = drop_outstanding_extent(inode, num_bytes);
5645
5646         if (num_bytes)
5647                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5648         spin_unlock(&BTRFS_I(inode)->lock);
5649         if (dropped > 0)
5650                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5651
5652         if (btrfs_test_is_dummy_root(root))
5653                 return;
5654
5655         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5656                                       btrfs_ino(inode), to_free, 0);
5657
5658         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5659                                 to_free);
5660 }
5661
5662 /**
5663  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5664  * @inode: inode we're writing to
5665  * @num_bytes: the number of bytes we want to allocate
5666  *
5667  * This will do the following things
5668  *
5669  * o reserve space in the data space info for num_bytes
5670  * o reserve space in the metadata space info based on number of outstanding
5671  *   extents and how much csums will be needed
5672  * o add to the inodes ->delalloc_bytes
5673  * o add it to the fs_info's delalloc inodes list.
5674  *
5675  * This will return 0 for success and -ENOSPC if there is no space left.
5676  */
5677 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5678 {
5679         int ret;
5680
5681         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5682         if (ret)
5683                 return ret;
5684
5685         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5686         if (ret) {
5687                 btrfs_free_reserved_data_space(inode, num_bytes);
5688                 return ret;
5689         }
5690
5691         return 0;
5692 }
5693
5694 /**
5695  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5696  * @inode: inode we're releasing space for
5697  * @num_bytes: the number of bytes we want to free up
5698  *
5699  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5700  * called in the case that we don't need the metadata AND data reservations
5701  * anymore.  So if there is an error or we insert an inline extent.
5702  *
5703  * This function will release the metadata space that was not used and will
5704  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5705  * list if there are no delalloc bytes left.
5706  */
5707 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5708 {
5709         btrfs_delalloc_release_metadata(inode, num_bytes);
5710         btrfs_free_reserved_data_space(inode, num_bytes);
5711 }
5712
5713 static int update_block_group(struct btrfs_trans_handle *trans,
5714                               struct btrfs_root *root, u64 bytenr,
5715                               u64 num_bytes, int alloc)
5716 {
5717         struct btrfs_block_group_cache *cache = NULL;
5718         struct btrfs_fs_info *info = root->fs_info;
5719         u64 total = num_bytes;
5720         u64 old_val;
5721         u64 byte_in_group;
5722         int factor;
5723
5724         /* block accounting for super block */
5725         spin_lock(&info->delalloc_root_lock);
5726         old_val = btrfs_super_bytes_used(info->super_copy);
5727         if (alloc)
5728                 old_val += num_bytes;
5729         else
5730                 old_val -= num_bytes;
5731         btrfs_set_super_bytes_used(info->super_copy, old_val);
5732         spin_unlock(&info->delalloc_root_lock);
5733
5734         while (total) {
5735                 cache = btrfs_lookup_block_group(info, bytenr);
5736                 if (!cache)
5737                         return -ENOENT;
5738                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5739                                     BTRFS_BLOCK_GROUP_RAID1 |
5740                                     BTRFS_BLOCK_GROUP_RAID10))
5741                         factor = 2;
5742                 else
5743                         factor = 1;
5744                 /*
5745                  * If this block group has free space cache written out, we
5746                  * need to make sure to load it if we are removing space.  This
5747                  * is because we need the unpinning stage to actually add the
5748                  * space back to the block group, otherwise we will leak space.
5749                  */
5750                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5751                         cache_block_group(cache, 1);
5752
5753                 byte_in_group = bytenr - cache->key.objectid;
5754                 WARN_ON(byte_in_group > cache->key.offset);
5755
5756                 spin_lock(&cache->space_info->lock);
5757                 spin_lock(&cache->lock);
5758
5759                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5760                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5761                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5762
5763                 old_val = btrfs_block_group_used(&cache->item);
5764                 num_bytes = min(total, cache->key.offset - byte_in_group);
5765                 if (alloc) {
5766                         old_val += num_bytes;
5767                         btrfs_set_block_group_used(&cache->item, old_val);
5768                         cache->reserved -= num_bytes;
5769                         cache->space_info->bytes_reserved -= num_bytes;
5770                         cache->space_info->bytes_used += num_bytes;
5771                         cache->space_info->disk_used += num_bytes * factor;
5772                         spin_unlock(&cache->lock);
5773                         spin_unlock(&cache->space_info->lock);
5774                 } else {
5775                         old_val -= num_bytes;
5776                         btrfs_set_block_group_used(&cache->item, old_val);
5777                         cache->pinned += num_bytes;
5778                         cache->space_info->bytes_pinned += num_bytes;
5779                         cache->space_info->bytes_used -= num_bytes;
5780                         cache->space_info->disk_used -= num_bytes * factor;
5781                         spin_unlock(&cache->lock);
5782                         spin_unlock(&cache->space_info->lock);
5783
5784                         set_extent_dirty(info->pinned_extents,
5785                                          bytenr, bytenr + num_bytes - 1,
5786                                          GFP_NOFS | __GFP_NOFAIL);
5787                         /*
5788                          * No longer have used bytes in this block group, queue
5789                          * it for deletion.
5790                          */
5791                         if (old_val == 0) {
5792                                 spin_lock(&info->unused_bgs_lock);
5793                                 if (list_empty(&cache->bg_list)) {
5794                                         btrfs_get_block_group(cache);
5795                                         list_add_tail(&cache->bg_list,
5796                                                       &info->unused_bgs);
5797                                 }
5798                                 spin_unlock(&info->unused_bgs_lock);
5799                         }
5800                 }
5801
5802                 spin_lock(&trans->transaction->dirty_bgs_lock);
5803                 if (list_empty(&cache->dirty_list)) {
5804                         list_add_tail(&cache->dirty_list,
5805                                       &trans->transaction->dirty_bgs);
5806                                 trans->transaction->num_dirty_bgs++;
5807                         btrfs_get_block_group(cache);
5808                 }
5809                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5810
5811                 btrfs_put_block_group(cache);
5812                 total -= num_bytes;
5813                 bytenr += num_bytes;
5814         }
5815         return 0;
5816 }
5817
5818 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5819 {
5820         struct btrfs_block_group_cache *cache;
5821         u64 bytenr;
5822
5823         spin_lock(&root->fs_info->block_group_cache_lock);
5824         bytenr = root->fs_info->first_logical_byte;
5825         spin_unlock(&root->fs_info->block_group_cache_lock);
5826
5827         if (bytenr < (u64)-1)
5828                 return bytenr;
5829
5830         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5831         if (!cache)
5832                 return 0;
5833
5834         bytenr = cache->key.objectid;
5835         btrfs_put_block_group(cache);
5836
5837         return bytenr;
5838 }
5839
5840 static int pin_down_extent(struct btrfs_root *root,
5841                            struct btrfs_block_group_cache *cache,
5842                            u64 bytenr, u64 num_bytes, int reserved)
5843 {
5844         spin_lock(&cache->space_info->lock);
5845         spin_lock(&cache->lock);
5846         cache->pinned += num_bytes;
5847         cache->space_info->bytes_pinned += num_bytes;
5848         if (reserved) {
5849                 cache->reserved -= num_bytes;
5850                 cache->space_info->bytes_reserved -= num_bytes;
5851         }
5852         spin_unlock(&cache->lock);
5853         spin_unlock(&cache->space_info->lock);
5854
5855         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5856                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5857         if (reserved)
5858                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5859         return 0;
5860 }
5861
5862 /*
5863  * this function must be called within transaction
5864  */
5865 int btrfs_pin_extent(struct btrfs_root *root,
5866                      u64 bytenr, u64 num_bytes, int reserved)
5867 {
5868         struct btrfs_block_group_cache *cache;
5869
5870         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5871         BUG_ON(!cache); /* Logic error */
5872
5873         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5874
5875         btrfs_put_block_group(cache);
5876         return 0;
5877 }
5878
5879 /*
5880  * this function must be called within transaction
5881  */
5882 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5883                                     u64 bytenr, u64 num_bytes)
5884 {
5885         struct btrfs_block_group_cache *cache;
5886         int ret;
5887
5888         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5889         if (!cache)
5890                 return -EINVAL;
5891
5892         /*
5893          * pull in the free space cache (if any) so that our pin
5894          * removes the free space from the cache.  We have load_only set
5895          * to one because the slow code to read in the free extents does check
5896          * the pinned extents.
5897          */
5898         cache_block_group(cache, 1);
5899
5900         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5901
5902         /* remove us from the free space cache (if we're there at all) */
5903         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5904         btrfs_put_block_group(cache);
5905         return ret;
5906 }
5907
5908 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5909 {
5910         int ret;
5911         struct btrfs_block_group_cache *block_group;
5912         struct btrfs_caching_control *caching_ctl;
5913
5914         block_group = btrfs_lookup_block_group(root->fs_info, start);
5915         if (!block_group)
5916                 return -EINVAL;
5917
5918         cache_block_group(block_group, 0);
5919         caching_ctl = get_caching_control(block_group);
5920
5921         if (!caching_ctl) {
5922                 /* Logic error */
5923                 BUG_ON(!block_group_cache_done(block_group));
5924                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5925         } else {
5926                 mutex_lock(&caching_ctl->mutex);
5927
5928                 if (start >= caching_ctl->progress) {
5929                         ret = add_excluded_extent(root, start, num_bytes);
5930                 } else if (start + num_bytes <= caching_ctl->progress) {
5931                         ret = btrfs_remove_free_space(block_group,
5932                                                       start, num_bytes);
5933                 } else {
5934                         num_bytes = caching_ctl->progress - start;
5935                         ret = btrfs_remove_free_space(block_group,
5936                                                       start, num_bytes);
5937                         if (ret)
5938                                 goto out_lock;
5939
5940                         num_bytes = (start + num_bytes) -
5941                                 caching_ctl->progress;
5942                         start = caching_ctl->progress;
5943                         ret = add_excluded_extent(root, start, num_bytes);
5944                 }
5945 out_lock:
5946                 mutex_unlock(&caching_ctl->mutex);
5947                 put_caching_control(caching_ctl);
5948         }
5949         btrfs_put_block_group(block_group);
5950         return ret;
5951 }
5952
5953 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5954                                  struct extent_buffer *eb)
5955 {
5956         struct btrfs_file_extent_item *item;
5957         struct btrfs_key key;
5958         int found_type;
5959         int i;
5960
5961         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5962                 return 0;
5963
5964         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5965                 btrfs_item_key_to_cpu(eb, &key, i);
5966                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5967                         continue;
5968                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5969                 found_type = btrfs_file_extent_type(eb, item);
5970                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5971                         continue;
5972                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5973                         continue;
5974                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5975                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5976                 __exclude_logged_extent(log, key.objectid, key.offset);
5977         }
5978
5979         return 0;
5980 }
5981
5982 /**
5983  * btrfs_update_reserved_bytes - update the block_group and space info counters
5984  * @cache:      The cache we are manipulating
5985  * @num_bytes:  The number of bytes in question
5986  * @reserve:    One of the reservation enums
5987  * @delalloc:   The blocks are allocated for the delalloc write
5988  *
5989  * This is called by the allocator when it reserves space, or by somebody who is
5990  * freeing space that was never actually used on disk.  For example if you
5991  * reserve some space for a new leaf in transaction A and before transaction A
5992  * commits you free that leaf, you call this with reserve set to 0 in order to
5993  * clear the reservation.
5994  *
5995  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5996  * ENOSPC accounting.  For data we handle the reservation through clearing the
5997  * delalloc bits in the io_tree.  We have to do this since we could end up
5998  * allocating less disk space for the amount of data we have reserved in the
5999  * case of compression.
6000  *
6001  * If this is a reservation and the block group has become read only we cannot
6002  * make the reservation and return -EAGAIN, otherwise this function always
6003  * succeeds.
6004  */
6005 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6006                                        u64 num_bytes, int reserve, int delalloc)
6007 {
6008         struct btrfs_space_info *space_info = cache->space_info;
6009         int ret = 0;
6010
6011         spin_lock(&space_info->lock);
6012         spin_lock(&cache->lock);
6013         if (reserve != RESERVE_FREE) {
6014                 if (cache->ro) {
6015                         ret = -EAGAIN;
6016                 } else {
6017                         cache->reserved += num_bytes;
6018                         space_info->bytes_reserved += num_bytes;
6019                         if (reserve == RESERVE_ALLOC) {
6020                                 trace_btrfs_space_reservation(cache->fs_info,
6021                                                 "space_info", space_info->flags,
6022                                                 num_bytes, 0);
6023                                 space_info->bytes_may_use -= num_bytes;
6024                         }
6025
6026                         if (delalloc)
6027                                 cache->delalloc_bytes += num_bytes;
6028                 }
6029         } else {
6030                 if (cache->ro)
6031                         space_info->bytes_readonly += num_bytes;
6032                 cache->reserved -= num_bytes;
6033                 space_info->bytes_reserved -= num_bytes;
6034
6035                 if (delalloc)
6036                         cache->delalloc_bytes -= num_bytes;
6037         }
6038         spin_unlock(&cache->lock);
6039         spin_unlock(&space_info->lock);
6040         return ret;
6041 }
6042
6043 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6044                                 struct btrfs_root *root)
6045 {
6046         struct btrfs_fs_info *fs_info = root->fs_info;
6047         struct btrfs_caching_control *next;
6048         struct btrfs_caching_control *caching_ctl;
6049         struct btrfs_block_group_cache *cache;
6050
6051         down_write(&fs_info->commit_root_sem);
6052
6053         list_for_each_entry_safe(caching_ctl, next,
6054                                  &fs_info->caching_block_groups, list) {
6055                 cache = caching_ctl->block_group;
6056                 if (block_group_cache_done(cache)) {
6057                         cache->last_byte_to_unpin = (u64)-1;
6058                         list_del_init(&caching_ctl->list);
6059                         put_caching_control(caching_ctl);
6060                 } else {
6061                         cache->last_byte_to_unpin = caching_ctl->progress;
6062                 }
6063         }
6064
6065         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6066                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6067         else
6068                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6069
6070         up_write(&fs_info->commit_root_sem);
6071
6072         update_global_block_rsv(fs_info);
6073 }
6074
6075 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6076                               const bool return_free_space)
6077 {
6078         struct btrfs_fs_info *fs_info = root->fs_info;
6079         struct btrfs_block_group_cache *cache = NULL;
6080         struct btrfs_space_info *space_info;
6081         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6082         u64 len;
6083         bool readonly;
6084
6085         while (start <= end) {
6086                 readonly = false;
6087                 if (!cache ||
6088                     start >= cache->key.objectid + cache->key.offset) {
6089                         if (cache)
6090                                 btrfs_put_block_group(cache);
6091                         cache = btrfs_lookup_block_group(fs_info, start);
6092                         BUG_ON(!cache); /* Logic error */
6093                 }
6094
6095                 len = cache->key.objectid + cache->key.offset - start;
6096                 len = min(len, end + 1 - start);
6097
6098                 if (start < cache->last_byte_to_unpin) {
6099                         len = min(len, cache->last_byte_to_unpin - start);
6100                         if (return_free_space)
6101                                 btrfs_add_free_space(cache, start, len);
6102                 }
6103
6104                 start += len;
6105                 space_info = cache->space_info;
6106
6107                 spin_lock(&space_info->lock);
6108                 spin_lock(&cache->lock);
6109                 cache->pinned -= len;
6110                 space_info->bytes_pinned -= len;
6111                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6112                 if (cache->ro) {
6113                         space_info->bytes_readonly += len;
6114                         readonly = true;
6115                 }
6116                 spin_unlock(&cache->lock);
6117                 if (!readonly && global_rsv->space_info == space_info) {
6118                         spin_lock(&global_rsv->lock);
6119                         if (!global_rsv->full) {
6120                                 len = min(len, global_rsv->size -
6121                                           global_rsv->reserved);
6122                                 global_rsv->reserved += len;
6123                                 space_info->bytes_may_use += len;
6124                                 if (global_rsv->reserved >= global_rsv->size)
6125                                         global_rsv->full = 1;
6126                         }
6127                         spin_unlock(&global_rsv->lock);
6128                 }
6129                 spin_unlock(&space_info->lock);
6130         }
6131
6132         if (cache)
6133                 btrfs_put_block_group(cache);
6134         return 0;
6135 }
6136
6137 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6138                                struct btrfs_root *root)
6139 {
6140         struct btrfs_fs_info *fs_info = root->fs_info;
6141         struct btrfs_block_group_cache *block_group, *tmp;
6142         struct list_head *deleted_bgs;
6143         struct extent_io_tree *unpin;
6144         u64 start;
6145         u64 end;
6146         int ret;
6147
6148         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6149                 unpin = &fs_info->freed_extents[1];
6150         else
6151                 unpin = &fs_info->freed_extents[0];
6152
6153         while (!trans->aborted) {
6154                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6155                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6156                                             EXTENT_DIRTY, NULL);
6157                 if (ret) {
6158                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6159                         break;
6160                 }
6161
6162                 if (btrfs_test_opt(root, DISCARD))
6163                         ret = btrfs_discard_extent(root, start,
6164                                                    end + 1 - start, NULL);
6165
6166                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6167                 unpin_extent_range(root, start, end, true);
6168                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6169                 cond_resched();
6170         }
6171
6172         /*
6173          * Transaction is finished.  We don't need the lock anymore.  We
6174          * do need to clean up the block groups in case of a transaction
6175          * abort.
6176          */
6177         deleted_bgs = &trans->transaction->deleted_bgs;
6178         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6179                 u64 trimmed = 0;
6180
6181                 ret = -EROFS;
6182                 if (!trans->aborted)
6183                         ret = btrfs_discard_extent(root,
6184                                                    block_group->key.objectid,
6185                                                    block_group->key.offset,
6186                                                    &trimmed);
6187
6188                 list_del_init(&block_group->bg_list);
6189                 btrfs_put_block_group_trimming(block_group);
6190                 btrfs_put_block_group(block_group);
6191
6192                 if (ret) {
6193                         const char *errstr = btrfs_decode_error(ret);
6194                         btrfs_warn(fs_info,
6195                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6196                                    ret, errstr);
6197                 }
6198         }
6199
6200         return 0;
6201 }
6202
6203 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6204                              u64 owner, u64 root_objectid)
6205 {
6206         struct btrfs_space_info *space_info;
6207         u64 flags;
6208
6209         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6210                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6211                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6212                 else
6213                         flags = BTRFS_BLOCK_GROUP_METADATA;
6214         } else {
6215                 flags = BTRFS_BLOCK_GROUP_DATA;
6216         }
6217
6218         space_info = __find_space_info(fs_info, flags);
6219         BUG_ON(!space_info); /* Logic bug */
6220         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6221 }
6222
6223
6224 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6225                                 struct btrfs_root *root,
6226                                 struct btrfs_delayed_ref_node *node, u64 parent,
6227                                 u64 root_objectid, u64 owner_objectid,
6228                                 u64 owner_offset, int refs_to_drop,
6229                                 struct btrfs_delayed_extent_op *extent_op)
6230 {
6231         struct btrfs_key key;
6232         struct btrfs_path *path;
6233         struct btrfs_fs_info *info = root->fs_info;
6234         struct btrfs_root *extent_root = info->extent_root;
6235         struct extent_buffer *leaf;
6236         struct btrfs_extent_item *ei;
6237         struct btrfs_extent_inline_ref *iref;
6238         int ret;
6239         int is_data;
6240         int extent_slot = 0;
6241         int found_extent = 0;
6242         int num_to_del = 1;
6243         int no_quota = node->no_quota;
6244         u32 item_size;
6245         u64 refs;
6246         u64 bytenr = node->bytenr;
6247         u64 num_bytes = node->num_bytes;
6248         int last_ref = 0;
6249         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6250                                                  SKINNY_METADATA);
6251
6252         if (!info->quota_enabled || !is_fstree(root_objectid))
6253                 no_quota = 1;
6254
6255         path = btrfs_alloc_path();
6256         if (!path)
6257                 return -ENOMEM;
6258
6259         path->reada = 1;
6260         path->leave_spinning = 1;
6261
6262         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6263         BUG_ON(!is_data && refs_to_drop != 1);
6264
6265         if (is_data)
6266                 skinny_metadata = 0;
6267
6268         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6269                                     bytenr, num_bytes, parent,
6270                                     root_objectid, owner_objectid,
6271                                     owner_offset);
6272         if (ret == 0) {
6273                 extent_slot = path->slots[0];
6274                 while (extent_slot >= 0) {
6275                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6276                                               extent_slot);
6277                         if (key.objectid != bytenr)
6278                                 break;
6279                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6280                             key.offset == num_bytes) {
6281                                 found_extent = 1;
6282                                 break;
6283                         }
6284                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6285                             key.offset == owner_objectid) {
6286                                 found_extent = 1;
6287                                 break;
6288                         }
6289                         if (path->slots[0] - extent_slot > 5)
6290                                 break;
6291                         extent_slot--;
6292                 }
6293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6294                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6295                 if (found_extent && item_size < sizeof(*ei))
6296                         found_extent = 0;
6297 #endif
6298                 if (!found_extent) {
6299                         BUG_ON(iref);
6300                         ret = remove_extent_backref(trans, extent_root, path,
6301                                                     NULL, refs_to_drop,
6302                                                     is_data, &last_ref);
6303                         if (ret) {
6304                                 btrfs_abort_transaction(trans, extent_root, ret);
6305                                 goto out;
6306                         }
6307                         btrfs_release_path(path);
6308                         path->leave_spinning = 1;
6309
6310                         key.objectid = bytenr;
6311                         key.type = BTRFS_EXTENT_ITEM_KEY;
6312                         key.offset = num_bytes;
6313
6314                         if (!is_data && skinny_metadata) {
6315                                 key.type = BTRFS_METADATA_ITEM_KEY;
6316                                 key.offset = owner_objectid;
6317                         }
6318
6319                         ret = btrfs_search_slot(trans, extent_root,
6320                                                 &key, path, -1, 1);
6321                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6322                                 /*
6323                                  * Couldn't find our skinny metadata item,
6324                                  * see if we have ye olde extent item.
6325                                  */
6326                                 path->slots[0]--;
6327                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6328                                                       path->slots[0]);
6329                                 if (key.objectid == bytenr &&
6330                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6331                                     key.offset == num_bytes)
6332                                         ret = 0;
6333                         }
6334
6335                         if (ret > 0 && skinny_metadata) {
6336                                 skinny_metadata = false;
6337                                 key.objectid = bytenr;
6338                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6339                                 key.offset = num_bytes;
6340                                 btrfs_release_path(path);
6341                                 ret = btrfs_search_slot(trans, extent_root,
6342                                                         &key, path, -1, 1);
6343                         }
6344
6345                         if (ret) {
6346                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6347                                         ret, bytenr);
6348                                 if (ret > 0)
6349                                         btrfs_print_leaf(extent_root,
6350                                                          path->nodes[0]);
6351                         }
6352                         if (ret < 0) {
6353                                 btrfs_abort_transaction(trans, extent_root, ret);
6354                                 goto out;
6355                         }
6356                         extent_slot = path->slots[0];
6357                 }
6358         } else if (WARN_ON(ret == -ENOENT)) {
6359                 btrfs_print_leaf(extent_root, path->nodes[0]);
6360                 btrfs_err(info,
6361                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6362                         bytenr, parent, root_objectid, owner_objectid,
6363                         owner_offset);
6364                 btrfs_abort_transaction(trans, extent_root, ret);
6365                 goto out;
6366         } else {
6367                 btrfs_abort_transaction(trans, extent_root, ret);
6368                 goto out;
6369         }
6370
6371         leaf = path->nodes[0];
6372         item_size = btrfs_item_size_nr(leaf, extent_slot);
6373 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6374         if (item_size < sizeof(*ei)) {
6375                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6376                 ret = convert_extent_item_v0(trans, extent_root, path,
6377                                              owner_objectid, 0);
6378                 if (ret < 0) {
6379                         btrfs_abort_transaction(trans, extent_root, ret);
6380                         goto out;
6381                 }
6382
6383                 btrfs_release_path(path);
6384                 path->leave_spinning = 1;
6385
6386                 key.objectid = bytenr;
6387                 key.type = BTRFS_EXTENT_ITEM_KEY;
6388                 key.offset = num_bytes;
6389
6390                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6391                                         -1, 1);
6392                 if (ret) {
6393                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6394                                 ret, bytenr);
6395                         btrfs_print_leaf(extent_root, path->nodes[0]);
6396                 }
6397                 if (ret < 0) {
6398                         btrfs_abort_transaction(trans, extent_root, ret);
6399                         goto out;
6400                 }
6401
6402                 extent_slot = path->slots[0];
6403                 leaf = path->nodes[0];
6404                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6405         }
6406 #endif
6407         BUG_ON(item_size < sizeof(*ei));
6408         ei = btrfs_item_ptr(leaf, extent_slot,
6409                             struct btrfs_extent_item);
6410         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6411             key.type == BTRFS_EXTENT_ITEM_KEY) {
6412                 struct btrfs_tree_block_info *bi;
6413                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6414                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6415                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6416         }
6417
6418         refs = btrfs_extent_refs(leaf, ei);
6419         if (refs < refs_to_drop) {
6420                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6421                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6422                 ret = -EINVAL;
6423                 btrfs_abort_transaction(trans, extent_root, ret);
6424                 goto out;
6425         }
6426         refs -= refs_to_drop;
6427
6428         if (refs > 0) {
6429                 if (extent_op)
6430                         __run_delayed_extent_op(extent_op, leaf, ei);
6431                 /*
6432                  * In the case of inline back ref, reference count will
6433                  * be updated by remove_extent_backref
6434                  */
6435                 if (iref) {
6436                         BUG_ON(!found_extent);
6437                 } else {
6438                         btrfs_set_extent_refs(leaf, ei, refs);
6439                         btrfs_mark_buffer_dirty(leaf);
6440                 }
6441                 if (found_extent) {
6442                         ret = remove_extent_backref(trans, extent_root, path,
6443                                                     iref, refs_to_drop,
6444                                                     is_data, &last_ref);
6445                         if (ret) {
6446                                 btrfs_abort_transaction(trans, extent_root, ret);
6447                                 goto out;
6448                         }
6449                 }
6450                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6451                                  root_objectid);
6452         } else {
6453                 if (found_extent) {
6454                         BUG_ON(is_data && refs_to_drop !=
6455                                extent_data_ref_count(path, iref));
6456                         if (iref) {
6457                                 BUG_ON(path->slots[0] != extent_slot);
6458                         } else {
6459                                 BUG_ON(path->slots[0] != extent_slot + 1);
6460                                 path->slots[0] = extent_slot;
6461                                 num_to_del = 2;
6462                         }
6463                 }
6464
6465                 last_ref = 1;
6466                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6467                                       num_to_del);
6468                 if (ret) {
6469                         btrfs_abort_transaction(trans, extent_root, ret);
6470                         goto out;
6471                 }
6472                 btrfs_release_path(path);
6473
6474                 if (is_data) {
6475                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6476                         if (ret) {
6477                                 btrfs_abort_transaction(trans, extent_root, ret);
6478                                 goto out;
6479                         }
6480                 }
6481
6482                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6483                 if (ret) {
6484                         btrfs_abort_transaction(trans, extent_root, ret);
6485                         goto out;
6486                 }
6487         }
6488         btrfs_release_path(path);
6489
6490 out:
6491         btrfs_free_path(path);
6492         return ret;
6493 }
6494
6495 /*
6496  * when we free an block, it is possible (and likely) that we free the last
6497  * delayed ref for that extent as well.  This searches the delayed ref tree for
6498  * a given extent, and if there are no other delayed refs to be processed, it
6499  * removes it from the tree.
6500  */
6501 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6502                                       struct btrfs_root *root, u64 bytenr)
6503 {
6504         struct btrfs_delayed_ref_head *head;
6505         struct btrfs_delayed_ref_root *delayed_refs;
6506         int ret = 0;
6507
6508         delayed_refs = &trans->transaction->delayed_refs;
6509         spin_lock(&delayed_refs->lock);
6510         head = btrfs_find_delayed_ref_head(trans, bytenr);
6511         if (!head)
6512                 goto out_delayed_unlock;
6513
6514         spin_lock(&head->lock);
6515         if (!list_empty(&head->ref_list))
6516                 goto out;
6517
6518         if (head->extent_op) {
6519                 if (!head->must_insert_reserved)
6520                         goto out;
6521                 btrfs_free_delayed_extent_op(head->extent_op);
6522                 head->extent_op = NULL;
6523         }
6524
6525         /*
6526          * waiting for the lock here would deadlock.  If someone else has it
6527          * locked they are already in the process of dropping it anyway
6528          */
6529         if (!mutex_trylock(&head->mutex))
6530                 goto out;
6531
6532         /*
6533          * at this point we have a head with no other entries.  Go
6534          * ahead and process it.
6535          */
6536         head->node.in_tree = 0;
6537         rb_erase(&head->href_node, &delayed_refs->href_root);
6538
6539         atomic_dec(&delayed_refs->num_entries);
6540
6541         /*
6542          * we don't take a ref on the node because we're removing it from the
6543          * tree, so we just steal the ref the tree was holding.
6544          */
6545         delayed_refs->num_heads--;
6546         if (head->processing == 0)
6547                 delayed_refs->num_heads_ready--;
6548         head->processing = 0;
6549         spin_unlock(&head->lock);
6550         spin_unlock(&delayed_refs->lock);
6551
6552         BUG_ON(head->extent_op);
6553         if (head->must_insert_reserved)
6554                 ret = 1;
6555
6556         mutex_unlock(&head->mutex);
6557         btrfs_put_delayed_ref(&head->node);
6558         return ret;
6559 out:
6560         spin_unlock(&head->lock);
6561
6562 out_delayed_unlock:
6563         spin_unlock(&delayed_refs->lock);
6564         return 0;
6565 }
6566
6567 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6568                            struct btrfs_root *root,
6569                            struct extent_buffer *buf,
6570                            u64 parent, int last_ref)
6571 {
6572         int pin = 1;
6573         int ret;
6574
6575         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6576                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6577                                         buf->start, buf->len,
6578                                         parent, root->root_key.objectid,
6579                                         btrfs_header_level(buf),
6580                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6581                 BUG_ON(ret); /* -ENOMEM */
6582         }
6583
6584         if (!last_ref)
6585                 return;
6586
6587         if (btrfs_header_generation(buf) == trans->transid) {
6588                 struct btrfs_block_group_cache *cache;
6589
6590                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6591                         ret = check_ref_cleanup(trans, root, buf->start);
6592                         if (!ret)
6593                                 goto out;
6594                 }
6595
6596                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6597
6598                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6599                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6600                         btrfs_put_block_group(cache);
6601                         goto out;
6602                 }
6603
6604                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6605
6606                 btrfs_add_free_space(cache, buf->start, buf->len);
6607                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6608                 btrfs_put_block_group(cache);
6609                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6610                 pin = 0;
6611         }
6612 out:
6613         if (pin)
6614                 add_pinned_bytes(root->fs_info, buf->len,
6615                                  btrfs_header_level(buf),
6616                                  root->root_key.objectid);
6617
6618         /*
6619          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6620          * anymore.
6621          */
6622         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6623 }
6624
6625 /* Can return -ENOMEM */
6626 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6627                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6628                       u64 owner, u64 offset, int no_quota)
6629 {
6630         int ret;
6631         struct btrfs_fs_info *fs_info = root->fs_info;
6632
6633         if (btrfs_test_is_dummy_root(root))
6634                 return 0;
6635
6636         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6637
6638         /*
6639          * tree log blocks never actually go into the extent allocation
6640          * tree, just update pinning info and exit early.
6641          */
6642         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6643                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6644                 /* unlocks the pinned mutex */
6645                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6646                 ret = 0;
6647         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6648                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6649                                         num_bytes,
6650                                         parent, root_objectid, (int)owner,
6651                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6652         } else {
6653                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6654                                                 num_bytes,
6655                                                 parent, root_objectid, owner,
6656                                                 offset, BTRFS_DROP_DELAYED_REF,
6657                                                 NULL, no_quota);
6658         }
6659         return ret;
6660 }
6661
6662 /*
6663  * when we wait for progress in the block group caching, its because
6664  * our allocation attempt failed at least once.  So, we must sleep
6665  * and let some progress happen before we try again.
6666  *
6667  * This function will sleep at least once waiting for new free space to
6668  * show up, and then it will check the block group free space numbers
6669  * for our min num_bytes.  Another option is to have it go ahead
6670  * and look in the rbtree for a free extent of a given size, but this
6671  * is a good start.
6672  *
6673  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6674  * any of the information in this block group.
6675  */
6676 static noinline void
6677 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6678                                 u64 num_bytes)
6679 {
6680         struct btrfs_caching_control *caching_ctl;
6681
6682         caching_ctl = get_caching_control(cache);
6683         if (!caching_ctl)
6684                 return;
6685
6686         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6687                    (cache->free_space_ctl->free_space >= num_bytes));
6688
6689         put_caching_control(caching_ctl);
6690 }
6691
6692 static noinline int
6693 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6694 {
6695         struct btrfs_caching_control *caching_ctl;
6696         int ret = 0;
6697
6698         caching_ctl = get_caching_control(cache);
6699         if (!caching_ctl)
6700                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6701
6702         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6703         if (cache->cached == BTRFS_CACHE_ERROR)
6704                 ret = -EIO;
6705         put_caching_control(caching_ctl);
6706         return ret;
6707 }
6708
6709 int __get_raid_index(u64 flags)
6710 {
6711         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6712                 return BTRFS_RAID_RAID10;
6713         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6714                 return BTRFS_RAID_RAID1;
6715         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6716                 return BTRFS_RAID_DUP;
6717         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6718                 return BTRFS_RAID_RAID0;
6719         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6720                 return BTRFS_RAID_RAID5;
6721         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6722                 return BTRFS_RAID_RAID6;
6723
6724         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6725 }
6726
6727 int get_block_group_index(struct btrfs_block_group_cache *cache)
6728 {
6729         return __get_raid_index(cache->flags);
6730 }
6731
6732 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6733         [BTRFS_RAID_RAID10]     = "raid10",
6734         [BTRFS_RAID_RAID1]      = "raid1",
6735         [BTRFS_RAID_DUP]        = "dup",
6736         [BTRFS_RAID_RAID0]      = "raid0",
6737         [BTRFS_RAID_SINGLE]     = "single",
6738         [BTRFS_RAID_RAID5]      = "raid5",
6739         [BTRFS_RAID_RAID6]      = "raid6",
6740 };
6741
6742 static const char *get_raid_name(enum btrfs_raid_types type)
6743 {
6744         if (type >= BTRFS_NR_RAID_TYPES)
6745                 return NULL;
6746
6747         return btrfs_raid_type_names[type];
6748 }
6749
6750 enum btrfs_loop_type {
6751         LOOP_CACHING_NOWAIT = 0,
6752         LOOP_CACHING_WAIT = 1,
6753         LOOP_ALLOC_CHUNK = 2,
6754         LOOP_NO_EMPTY_SIZE = 3,
6755 };
6756
6757 static inline void
6758 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6759                        int delalloc)
6760 {
6761         if (delalloc)
6762                 down_read(&cache->data_rwsem);
6763 }
6764
6765 static inline void
6766 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6767                        int delalloc)
6768 {
6769         btrfs_get_block_group(cache);
6770         if (delalloc)
6771                 down_read(&cache->data_rwsem);
6772 }
6773
6774 static struct btrfs_block_group_cache *
6775 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6776                    struct btrfs_free_cluster *cluster,
6777                    int delalloc)
6778 {
6779         struct btrfs_block_group_cache *used_bg;
6780         bool locked = false;
6781 again:
6782         spin_lock(&cluster->refill_lock);
6783         if (locked) {
6784                 if (used_bg == cluster->block_group)
6785                         return used_bg;
6786
6787                 up_read(&used_bg->data_rwsem);
6788                 btrfs_put_block_group(used_bg);
6789         }
6790
6791         used_bg = cluster->block_group;
6792         if (!used_bg)
6793                 return NULL;
6794
6795         if (used_bg == block_group)
6796                 return used_bg;
6797
6798         btrfs_get_block_group(used_bg);
6799
6800         if (!delalloc)
6801                 return used_bg;
6802
6803         if (down_read_trylock(&used_bg->data_rwsem))
6804                 return used_bg;
6805
6806         spin_unlock(&cluster->refill_lock);
6807         down_read(&used_bg->data_rwsem);
6808         locked = true;
6809         goto again;
6810 }
6811
6812 static inline void
6813 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6814                          int delalloc)
6815 {
6816         if (delalloc)
6817                 up_read(&cache->data_rwsem);
6818         btrfs_put_block_group(cache);
6819 }
6820
6821 /*
6822  * walks the btree of allocated extents and find a hole of a given size.
6823  * The key ins is changed to record the hole:
6824  * ins->objectid == start position
6825  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6826  * ins->offset == the size of the hole.
6827  * Any available blocks before search_start are skipped.
6828  *
6829  * If there is no suitable free space, we will record the max size of
6830  * the free space extent currently.
6831  */
6832 static noinline int find_free_extent(struct btrfs_root *orig_root,
6833                                      u64 num_bytes, u64 empty_size,
6834                                      u64 hint_byte, struct btrfs_key *ins,
6835                                      u64 flags, int delalloc)
6836 {
6837         int ret = 0;
6838         struct btrfs_root *root = orig_root->fs_info->extent_root;
6839         struct btrfs_free_cluster *last_ptr = NULL;
6840         struct btrfs_block_group_cache *block_group = NULL;
6841         u64 search_start = 0;
6842         u64 max_extent_size = 0;
6843         int empty_cluster = 2 * 1024 * 1024;
6844         struct btrfs_space_info *space_info;
6845         int loop = 0;
6846         int index = __get_raid_index(flags);
6847         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6848                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6849         bool failed_cluster_refill = false;
6850         bool failed_alloc = false;
6851         bool use_cluster = true;
6852         bool have_caching_bg = false;
6853
6854         WARN_ON(num_bytes < root->sectorsize);
6855         ins->type = BTRFS_EXTENT_ITEM_KEY;
6856         ins->objectid = 0;
6857         ins->offset = 0;
6858
6859         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6860
6861         space_info = __find_space_info(root->fs_info, flags);
6862         if (!space_info) {
6863                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6864                 return -ENOSPC;
6865         }
6866
6867         /*
6868          * If the space info is for both data and metadata it means we have a
6869          * small filesystem and we can't use the clustering stuff.
6870          */
6871         if (btrfs_mixed_space_info(space_info))
6872                 use_cluster = false;
6873
6874         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6875                 last_ptr = &root->fs_info->meta_alloc_cluster;
6876                 if (!btrfs_test_opt(root, SSD))
6877                         empty_cluster = 64 * 1024;
6878         }
6879
6880         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6881             btrfs_test_opt(root, SSD)) {
6882                 last_ptr = &root->fs_info->data_alloc_cluster;
6883         }
6884
6885         if (last_ptr) {
6886                 spin_lock(&last_ptr->lock);
6887                 if (last_ptr->block_group)
6888                         hint_byte = last_ptr->window_start;
6889                 spin_unlock(&last_ptr->lock);
6890         }
6891
6892         search_start = max(search_start, first_logical_byte(root, 0));
6893         search_start = max(search_start, hint_byte);
6894
6895         if (!last_ptr)
6896                 empty_cluster = 0;
6897
6898         if (search_start == hint_byte) {
6899                 block_group = btrfs_lookup_block_group(root->fs_info,
6900                                                        search_start);
6901                 /*
6902                  * we don't want to use the block group if it doesn't match our
6903                  * allocation bits, or if its not cached.
6904                  *
6905                  * However if we are re-searching with an ideal block group
6906                  * picked out then we don't care that the block group is cached.
6907                  */
6908                 if (block_group && block_group_bits(block_group, flags) &&
6909                     block_group->cached != BTRFS_CACHE_NO) {
6910                         down_read(&space_info->groups_sem);
6911                         if (list_empty(&block_group->list) ||
6912                             block_group->ro) {
6913                                 /*
6914                                  * someone is removing this block group,
6915                                  * we can't jump into the have_block_group
6916                                  * target because our list pointers are not
6917                                  * valid
6918                                  */
6919                                 btrfs_put_block_group(block_group);
6920                                 up_read(&space_info->groups_sem);
6921                         } else {
6922                                 index = get_block_group_index(block_group);
6923                                 btrfs_lock_block_group(block_group, delalloc);
6924                                 goto have_block_group;
6925                         }
6926                 } else if (block_group) {
6927                         btrfs_put_block_group(block_group);
6928                 }
6929         }
6930 search:
6931         have_caching_bg = false;
6932         down_read(&space_info->groups_sem);
6933         list_for_each_entry(block_group, &space_info->block_groups[index],
6934                             list) {
6935                 u64 offset;
6936                 int cached;
6937
6938                 btrfs_grab_block_group(block_group, delalloc);
6939                 search_start = block_group->key.objectid;
6940
6941                 /*
6942                  * this can happen if we end up cycling through all the
6943                  * raid types, but we want to make sure we only allocate
6944                  * for the proper type.
6945                  */
6946                 if (!block_group_bits(block_group, flags)) {
6947                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6948                                 BTRFS_BLOCK_GROUP_RAID1 |
6949                                 BTRFS_BLOCK_GROUP_RAID5 |
6950                                 BTRFS_BLOCK_GROUP_RAID6 |
6951                                 BTRFS_BLOCK_GROUP_RAID10;
6952
6953                         /*
6954                          * if they asked for extra copies and this block group
6955                          * doesn't provide them, bail.  This does allow us to
6956                          * fill raid0 from raid1.
6957                          */
6958                         if ((flags & extra) && !(block_group->flags & extra))
6959                                 goto loop;
6960                 }
6961
6962 have_block_group:
6963                 cached = block_group_cache_done(block_group);
6964                 if (unlikely(!cached)) {
6965                         ret = cache_block_group(block_group, 0);
6966                         BUG_ON(ret < 0);
6967                         ret = 0;
6968                 }
6969
6970                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6971                         goto loop;
6972                 if (unlikely(block_group->ro))
6973                         goto loop;
6974
6975                 /*
6976                  * Ok we want to try and use the cluster allocator, so
6977                  * lets look there
6978                  */
6979                 if (last_ptr) {
6980                         struct btrfs_block_group_cache *used_block_group;
6981                         unsigned long aligned_cluster;
6982                         /*
6983                          * the refill lock keeps out other
6984                          * people trying to start a new cluster
6985                          */
6986                         used_block_group = btrfs_lock_cluster(block_group,
6987                                                               last_ptr,
6988                                                               delalloc);
6989                         if (!used_block_group)
6990                                 goto refill_cluster;
6991
6992                         if (used_block_group != block_group &&
6993                             (used_block_group->ro ||
6994                              !block_group_bits(used_block_group, flags)))
6995                                 goto release_cluster;
6996
6997                         offset = btrfs_alloc_from_cluster(used_block_group,
6998                                                 last_ptr,
6999                                                 num_bytes,
7000                                                 used_block_group->key.objectid,
7001                                                 &max_extent_size);
7002                         if (offset) {
7003                                 /* we have a block, we're done */
7004                                 spin_unlock(&last_ptr->refill_lock);
7005                                 trace_btrfs_reserve_extent_cluster(root,
7006                                                 used_block_group,
7007                                                 search_start, num_bytes);
7008                                 if (used_block_group != block_group) {
7009                                         btrfs_release_block_group(block_group,
7010                                                                   delalloc);
7011                                         block_group = used_block_group;
7012                                 }
7013                                 goto checks;
7014                         }
7015
7016                         WARN_ON(last_ptr->block_group != used_block_group);
7017 release_cluster:
7018                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7019                          * set up a new clusters, so lets just skip it
7020                          * and let the allocator find whatever block
7021                          * it can find.  If we reach this point, we
7022                          * will have tried the cluster allocator
7023                          * plenty of times and not have found
7024                          * anything, so we are likely way too
7025                          * fragmented for the clustering stuff to find
7026                          * anything.
7027                          *
7028                          * However, if the cluster is taken from the
7029                          * current block group, release the cluster
7030                          * first, so that we stand a better chance of
7031                          * succeeding in the unclustered
7032                          * allocation.  */
7033                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7034                             used_block_group != block_group) {
7035                                 spin_unlock(&last_ptr->refill_lock);
7036                                 btrfs_release_block_group(used_block_group,
7037                                                           delalloc);
7038                                 goto unclustered_alloc;
7039                         }
7040
7041                         /*
7042                          * this cluster didn't work out, free it and
7043                          * start over
7044                          */
7045                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7046
7047                         if (used_block_group != block_group)
7048                                 btrfs_release_block_group(used_block_group,
7049                                                           delalloc);
7050 refill_cluster:
7051                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7052                                 spin_unlock(&last_ptr->refill_lock);
7053                                 goto unclustered_alloc;
7054                         }
7055
7056                         aligned_cluster = max_t(unsigned long,
7057                                                 empty_cluster + empty_size,
7058                                               block_group->full_stripe_len);
7059
7060                         /* allocate a cluster in this block group */
7061                         ret = btrfs_find_space_cluster(root, block_group,
7062                                                        last_ptr, search_start,
7063                                                        num_bytes,
7064                                                        aligned_cluster);
7065                         if (ret == 0) {
7066                                 /*
7067                                  * now pull our allocation out of this
7068                                  * cluster
7069                                  */
7070                                 offset = btrfs_alloc_from_cluster(block_group,
7071                                                         last_ptr,
7072                                                         num_bytes,
7073                                                         search_start,
7074                                                         &max_extent_size);
7075                                 if (offset) {
7076                                         /* we found one, proceed */
7077                                         spin_unlock(&last_ptr->refill_lock);
7078                                         trace_btrfs_reserve_extent_cluster(root,
7079                                                 block_group, search_start,
7080                                                 num_bytes);
7081                                         goto checks;
7082                                 }
7083                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7084                                    && !failed_cluster_refill) {
7085                                 spin_unlock(&last_ptr->refill_lock);
7086
7087                                 failed_cluster_refill = true;
7088                                 wait_block_group_cache_progress(block_group,
7089                                        num_bytes + empty_cluster + empty_size);
7090                                 goto have_block_group;
7091                         }
7092
7093                         /*
7094                          * at this point we either didn't find a cluster
7095                          * or we weren't able to allocate a block from our
7096                          * cluster.  Free the cluster we've been trying
7097                          * to use, and go to the next block group
7098                          */
7099                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7100                         spin_unlock(&last_ptr->refill_lock);
7101                         goto loop;
7102                 }
7103
7104 unclustered_alloc:
7105                 spin_lock(&block_group->free_space_ctl->tree_lock);
7106                 if (cached &&
7107                     block_group->free_space_ctl->free_space <
7108                     num_bytes + empty_cluster + empty_size) {
7109                         if (block_group->free_space_ctl->free_space >
7110                             max_extent_size)
7111                                 max_extent_size =
7112                                         block_group->free_space_ctl->free_space;
7113                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7114                         goto loop;
7115                 }
7116                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7117
7118                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7119                                                     num_bytes, empty_size,
7120                                                     &max_extent_size);
7121                 /*
7122                  * If we didn't find a chunk, and we haven't failed on this
7123                  * block group before, and this block group is in the middle of
7124                  * caching and we are ok with waiting, then go ahead and wait
7125                  * for progress to be made, and set failed_alloc to true.
7126                  *
7127                  * If failed_alloc is true then we've already waited on this
7128                  * block group once and should move on to the next block group.
7129                  */
7130                 if (!offset && !failed_alloc && !cached &&
7131                     loop > LOOP_CACHING_NOWAIT) {
7132                         wait_block_group_cache_progress(block_group,
7133                                                 num_bytes + empty_size);
7134                         failed_alloc = true;
7135                         goto have_block_group;
7136                 } else if (!offset) {
7137                         if (!cached)
7138                                 have_caching_bg = true;
7139                         goto loop;
7140                 }
7141 checks:
7142                 search_start = ALIGN(offset, root->stripesize);
7143
7144                 /* move on to the next group */
7145                 if (search_start + num_bytes >
7146                     block_group->key.objectid + block_group->key.offset) {
7147                         btrfs_add_free_space(block_group, offset, num_bytes);
7148                         goto loop;
7149                 }
7150
7151                 if (offset < search_start)
7152                         btrfs_add_free_space(block_group, offset,
7153                                              search_start - offset);
7154                 BUG_ON(offset > search_start);
7155
7156                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7157                                                   alloc_type, delalloc);
7158                 if (ret == -EAGAIN) {
7159                         btrfs_add_free_space(block_group, offset, num_bytes);
7160                         goto loop;
7161                 }
7162
7163                 /* we are all good, lets return */
7164                 ins->objectid = search_start;
7165                 ins->offset = num_bytes;
7166
7167                 trace_btrfs_reserve_extent(orig_root, block_group,
7168                                            search_start, num_bytes);
7169                 btrfs_release_block_group(block_group, delalloc);
7170                 break;
7171 loop:
7172                 failed_cluster_refill = false;
7173                 failed_alloc = false;
7174                 BUG_ON(index != get_block_group_index(block_group));
7175                 btrfs_release_block_group(block_group, delalloc);
7176         }
7177         up_read(&space_info->groups_sem);
7178
7179         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7180                 goto search;
7181
7182         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7183                 goto search;
7184
7185         /*
7186          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7187          *                      caching kthreads as we move along
7188          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7189          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7190          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7191          *                      again
7192          */
7193         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7194                 index = 0;
7195                 loop++;
7196                 if (loop == LOOP_ALLOC_CHUNK) {
7197                         struct btrfs_trans_handle *trans;
7198                         int exist = 0;
7199
7200                         trans = current->journal_info;
7201                         if (trans)
7202                                 exist = 1;
7203                         else
7204                                 trans = btrfs_join_transaction(root);
7205
7206                         if (IS_ERR(trans)) {
7207                                 ret = PTR_ERR(trans);
7208                                 goto out;
7209                         }
7210
7211                         ret = do_chunk_alloc(trans, root, flags,
7212                                              CHUNK_ALLOC_FORCE);
7213                         /*
7214                          * Do not bail out on ENOSPC since we
7215                          * can do more things.
7216                          */
7217                         if (ret < 0 && ret != -ENOSPC)
7218                                 btrfs_abort_transaction(trans,
7219                                                         root, ret);
7220                         else
7221                                 ret = 0;
7222                         if (!exist)
7223                                 btrfs_end_transaction(trans, root);
7224                         if (ret)
7225                                 goto out;
7226                 }
7227
7228                 if (loop == LOOP_NO_EMPTY_SIZE) {
7229                         empty_size = 0;
7230                         empty_cluster = 0;
7231                 }
7232
7233                 goto search;
7234         } else if (!ins->objectid) {
7235                 ret = -ENOSPC;
7236         } else if (ins->objectid) {
7237                 ret = 0;
7238         }
7239 out:
7240         if (ret == -ENOSPC)
7241                 ins->offset = max_extent_size;
7242         return ret;
7243 }
7244
7245 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7246                             int dump_block_groups)
7247 {
7248         struct btrfs_block_group_cache *cache;
7249         int index = 0;
7250
7251         spin_lock(&info->lock);
7252         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7253                info->flags,
7254                info->total_bytes - info->bytes_used - info->bytes_pinned -
7255                info->bytes_reserved - info->bytes_readonly,
7256                (info->full) ? "" : "not ");
7257         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7258                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7259                info->total_bytes, info->bytes_used, info->bytes_pinned,
7260                info->bytes_reserved, info->bytes_may_use,
7261                info->bytes_readonly);
7262         spin_unlock(&info->lock);
7263
7264         if (!dump_block_groups)
7265                 return;
7266
7267         down_read(&info->groups_sem);
7268 again:
7269         list_for_each_entry(cache, &info->block_groups[index], list) {
7270                 spin_lock(&cache->lock);
7271                 printk(KERN_INFO "BTRFS: "
7272                            "block group %llu has %llu bytes, "
7273                            "%llu used %llu pinned %llu reserved %s\n",
7274                        cache->key.objectid, cache->key.offset,
7275                        btrfs_block_group_used(&cache->item), cache->pinned,
7276                        cache->reserved, cache->ro ? "[readonly]" : "");
7277                 btrfs_dump_free_space(cache, bytes);
7278                 spin_unlock(&cache->lock);
7279         }
7280         if (++index < BTRFS_NR_RAID_TYPES)
7281                 goto again;
7282         up_read(&info->groups_sem);
7283 }
7284
7285 int btrfs_reserve_extent(struct btrfs_root *root,
7286                          u64 num_bytes, u64 min_alloc_size,
7287                          u64 empty_size, u64 hint_byte,
7288                          struct btrfs_key *ins, int is_data, int delalloc)
7289 {
7290         bool final_tried = false;
7291         u64 flags;
7292         int ret;
7293
7294         flags = btrfs_get_alloc_profile(root, is_data);
7295 again:
7296         WARN_ON(num_bytes < root->sectorsize);
7297         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7298                                flags, delalloc);
7299
7300         if (ret == -ENOSPC) {
7301                 if (!final_tried && ins->offset) {
7302                         num_bytes = min(num_bytes >> 1, ins->offset);
7303                         num_bytes = round_down(num_bytes, root->sectorsize);
7304                         num_bytes = max(num_bytes, min_alloc_size);
7305                         if (num_bytes == min_alloc_size)
7306                                 final_tried = true;
7307                         goto again;
7308                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7309                         struct btrfs_space_info *sinfo;
7310
7311                         sinfo = __find_space_info(root->fs_info, flags);
7312                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7313                                 flags, num_bytes);
7314                         if (sinfo)
7315                                 dump_space_info(sinfo, num_bytes, 1);
7316                 }
7317         }
7318
7319         return ret;
7320 }
7321
7322 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7323                                         u64 start, u64 len,
7324                                         int pin, int delalloc)
7325 {
7326         struct btrfs_block_group_cache *cache;
7327         int ret = 0;
7328
7329         cache = btrfs_lookup_block_group(root->fs_info, start);
7330         if (!cache) {
7331                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7332                         start);
7333                 return -ENOSPC;
7334         }
7335
7336         if (pin)
7337                 pin_down_extent(root, cache, start, len, 1);
7338         else {
7339                 if (btrfs_test_opt(root, DISCARD))
7340                         ret = btrfs_discard_extent(root, start, len, NULL);
7341                 btrfs_add_free_space(cache, start, len);
7342                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7343         }
7344
7345         btrfs_put_block_group(cache);
7346
7347         trace_btrfs_reserved_extent_free(root, start, len);
7348
7349         return ret;
7350 }
7351
7352 int btrfs_free_reserved_extent(struct btrfs_root *root,
7353                                u64 start, u64 len, int delalloc)
7354 {
7355         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7356 }
7357
7358 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7359                                        u64 start, u64 len)
7360 {
7361         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7362 }
7363
7364 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7365                                       struct btrfs_root *root,
7366                                       u64 parent, u64 root_objectid,
7367                                       u64 flags, u64 owner, u64 offset,
7368                                       struct btrfs_key *ins, int ref_mod)
7369 {
7370         int ret;
7371         struct btrfs_fs_info *fs_info = root->fs_info;
7372         struct btrfs_extent_item *extent_item;
7373         struct btrfs_extent_inline_ref *iref;
7374         struct btrfs_path *path;
7375         struct extent_buffer *leaf;
7376         int type;
7377         u32 size;
7378
7379         if (parent > 0)
7380                 type = BTRFS_SHARED_DATA_REF_KEY;
7381         else
7382                 type = BTRFS_EXTENT_DATA_REF_KEY;
7383
7384         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7385
7386         path = btrfs_alloc_path();
7387         if (!path)
7388                 return -ENOMEM;
7389
7390         path->leave_spinning = 1;
7391         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7392                                       ins, size);
7393         if (ret) {
7394                 btrfs_free_path(path);
7395                 return ret;
7396         }
7397
7398         leaf = path->nodes[0];
7399         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7400                                      struct btrfs_extent_item);
7401         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7402         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7403         btrfs_set_extent_flags(leaf, extent_item,
7404                                flags | BTRFS_EXTENT_FLAG_DATA);
7405
7406         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7407         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7408         if (parent > 0) {
7409                 struct btrfs_shared_data_ref *ref;
7410                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7411                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7412                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7413         } else {
7414                 struct btrfs_extent_data_ref *ref;
7415                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7416                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7417                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7418                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7419                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7420         }
7421
7422         btrfs_mark_buffer_dirty(path->nodes[0]);
7423         btrfs_free_path(path);
7424
7425         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7426         if (ret) { /* -ENOENT, logic error */
7427                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7428                         ins->objectid, ins->offset);
7429                 BUG();
7430         }
7431         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7432         return ret;
7433 }
7434
7435 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7436                                      struct btrfs_root *root,
7437                                      u64 parent, u64 root_objectid,
7438                                      u64 flags, struct btrfs_disk_key *key,
7439                                      int level, struct btrfs_key *ins,
7440                                      int no_quota)
7441 {
7442         int ret;
7443         struct btrfs_fs_info *fs_info = root->fs_info;
7444         struct btrfs_extent_item *extent_item;
7445         struct btrfs_tree_block_info *block_info;
7446         struct btrfs_extent_inline_ref *iref;
7447         struct btrfs_path *path;
7448         struct extent_buffer *leaf;
7449         u32 size = sizeof(*extent_item) + sizeof(*iref);
7450         u64 num_bytes = ins->offset;
7451         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7452                                                  SKINNY_METADATA);
7453
7454         if (!skinny_metadata)
7455                 size += sizeof(*block_info);
7456
7457         path = btrfs_alloc_path();
7458         if (!path) {
7459                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7460                                                    root->nodesize);
7461                 return -ENOMEM;
7462         }
7463
7464         path->leave_spinning = 1;
7465         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7466                                       ins, size);
7467         if (ret) {
7468                 btrfs_free_path(path);
7469                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7470                                                    root->nodesize);
7471                 return ret;
7472         }
7473
7474         leaf = path->nodes[0];
7475         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7476                                      struct btrfs_extent_item);
7477         btrfs_set_extent_refs(leaf, extent_item, 1);
7478         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7479         btrfs_set_extent_flags(leaf, extent_item,
7480                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7481
7482         if (skinny_metadata) {
7483                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7484                 num_bytes = root->nodesize;
7485         } else {
7486                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7487                 btrfs_set_tree_block_key(leaf, block_info, key);
7488                 btrfs_set_tree_block_level(leaf, block_info, level);
7489                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7490         }
7491
7492         if (parent > 0) {
7493                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7494                 btrfs_set_extent_inline_ref_type(leaf, iref,
7495                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7496                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7497         } else {
7498                 btrfs_set_extent_inline_ref_type(leaf, iref,
7499                                                  BTRFS_TREE_BLOCK_REF_KEY);
7500                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7501         }
7502
7503         btrfs_mark_buffer_dirty(leaf);
7504         btrfs_free_path(path);
7505
7506         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7507                                  1);
7508         if (ret) { /* -ENOENT, logic error */
7509                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7510                         ins->objectid, ins->offset);
7511                 BUG();
7512         }
7513
7514         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7515         return ret;
7516 }
7517
7518 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7519                                      struct btrfs_root *root,
7520                                      u64 root_objectid, u64 owner,
7521                                      u64 offset, struct btrfs_key *ins)
7522 {
7523         int ret;
7524
7525         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7526
7527         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7528                                          ins->offset, 0,
7529                                          root_objectid, owner, offset,
7530                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7531         return ret;
7532 }
7533
7534 /*
7535  * this is used by the tree logging recovery code.  It records that
7536  * an extent has been allocated and makes sure to clear the free
7537  * space cache bits as well
7538  */
7539 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7540                                    struct btrfs_root *root,
7541                                    u64 root_objectid, u64 owner, u64 offset,
7542                                    struct btrfs_key *ins)
7543 {
7544         int ret;
7545         struct btrfs_block_group_cache *block_group;
7546
7547         /*
7548          * Mixed block groups will exclude before processing the log so we only
7549          * need to do the exlude dance if this fs isn't mixed.
7550          */
7551         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7552                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7553                 if (ret)
7554                         return ret;
7555         }
7556
7557         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7558         if (!block_group)
7559                 return -EINVAL;
7560
7561         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7562                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7563         BUG_ON(ret); /* logic error */
7564         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7565                                          0, owner, offset, ins, 1);
7566         btrfs_put_block_group(block_group);
7567         return ret;
7568 }
7569
7570 static struct extent_buffer *
7571 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7572                       u64 bytenr, int level)
7573 {
7574         struct extent_buffer *buf;
7575
7576         buf = btrfs_find_create_tree_block(root, bytenr);
7577         if (!buf)
7578                 return ERR_PTR(-ENOMEM);
7579         btrfs_set_header_generation(buf, trans->transid);
7580         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7581         btrfs_tree_lock(buf);
7582         clean_tree_block(trans, root->fs_info, buf);
7583         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7584
7585         btrfs_set_lock_blocking(buf);
7586         btrfs_set_buffer_uptodate(buf);
7587
7588         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7589                 buf->log_index = root->log_transid % 2;
7590                 /*
7591                  * we allow two log transactions at a time, use different
7592                  * EXENT bit to differentiate dirty pages.
7593                  */
7594                 if (buf->log_index == 0)
7595                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7596                                         buf->start + buf->len - 1, GFP_NOFS);
7597                 else
7598                         set_extent_new(&root->dirty_log_pages, buf->start,
7599                                         buf->start + buf->len - 1, GFP_NOFS);
7600         } else {
7601                 buf->log_index = -1;
7602                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7603                          buf->start + buf->len - 1, GFP_NOFS);
7604         }
7605         trans->blocks_used++;
7606         /* this returns a buffer locked for blocking */
7607         return buf;
7608 }
7609
7610 static struct btrfs_block_rsv *
7611 use_block_rsv(struct btrfs_trans_handle *trans,
7612               struct btrfs_root *root, u32 blocksize)
7613 {
7614         struct btrfs_block_rsv *block_rsv;
7615         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7616         int ret;
7617         bool global_updated = false;
7618
7619         block_rsv = get_block_rsv(trans, root);
7620
7621         if (unlikely(block_rsv->size == 0))
7622                 goto try_reserve;
7623 again:
7624         ret = block_rsv_use_bytes(block_rsv, blocksize);
7625         if (!ret)
7626                 return block_rsv;
7627
7628         if (block_rsv->failfast)
7629                 return ERR_PTR(ret);
7630
7631         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7632                 global_updated = true;
7633                 update_global_block_rsv(root->fs_info);
7634                 goto again;
7635         }
7636
7637         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7638                 static DEFINE_RATELIMIT_STATE(_rs,
7639                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7640                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7641                 if (__ratelimit(&_rs))
7642                         WARN(1, KERN_DEBUG
7643                                 "BTRFS: block rsv returned %d\n", ret);
7644         }
7645 try_reserve:
7646         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7647                                      BTRFS_RESERVE_NO_FLUSH);
7648         if (!ret)
7649                 return block_rsv;
7650         /*
7651          * If we couldn't reserve metadata bytes try and use some from
7652          * the global reserve if its space type is the same as the global
7653          * reservation.
7654          */
7655         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7656             block_rsv->space_info == global_rsv->space_info) {
7657                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7658                 if (!ret)
7659                         return global_rsv;
7660         }
7661         return ERR_PTR(ret);
7662 }
7663
7664 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7665                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7666 {
7667         block_rsv_add_bytes(block_rsv, blocksize, 0);
7668         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7669 }
7670
7671 /*
7672  * finds a free extent and does all the dirty work required for allocation
7673  * returns the tree buffer or an ERR_PTR on error.
7674  */
7675 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7676                                         struct btrfs_root *root,
7677                                         u64 parent, u64 root_objectid,
7678                                         struct btrfs_disk_key *key, int level,
7679                                         u64 hint, u64 empty_size)
7680 {
7681         struct btrfs_key ins;
7682         struct btrfs_block_rsv *block_rsv;
7683         struct extent_buffer *buf;
7684         struct btrfs_delayed_extent_op *extent_op;
7685         u64 flags = 0;
7686         int ret;
7687         u32 blocksize = root->nodesize;
7688         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7689                                                  SKINNY_METADATA);
7690
7691         if (btrfs_test_is_dummy_root(root)) {
7692                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7693                                             level);
7694                 if (!IS_ERR(buf))
7695                         root->alloc_bytenr += blocksize;
7696                 return buf;
7697         }
7698
7699         block_rsv = use_block_rsv(trans, root, blocksize);
7700         if (IS_ERR(block_rsv))
7701                 return ERR_CAST(block_rsv);
7702
7703         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7704                                    empty_size, hint, &ins, 0, 0);
7705         if (ret)
7706                 goto out_unuse;
7707
7708         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7709         if (IS_ERR(buf)) {
7710                 ret = PTR_ERR(buf);
7711                 goto out_free_reserved;
7712         }
7713
7714         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7715                 if (parent == 0)
7716                         parent = ins.objectid;
7717                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7718         } else
7719                 BUG_ON(parent > 0);
7720
7721         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7722                 extent_op = btrfs_alloc_delayed_extent_op();
7723                 if (!extent_op) {
7724                         ret = -ENOMEM;
7725                         goto out_free_buf;
7726                 }
7727                 if (key)
7728                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7729                 else
7730                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7731                 extent_op->flags_to_set = flags;
7732                 if (skinny_metadata)
7733                         extent_op->update_key = 0;
7734                 else
7735                         extent_op->update_key = 1;
7736                 extent_op->update_flags = 1;
7737                 extent_op->is_data = 0;
7738                 extent_op->level = level;
7739
7740                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7741                                                  ins.objectid, ins.offset,
7742                                                  parent, root_objectid, level,
7743                                                  BTRFS_ADD_DELAYED_EXTENT,
7744                                                  extent_op, 0);
7745                 if (ret)
7746                         goto out_free_delayed;
7747         }
7748         return buf;
7749
7750 out_free_delayed:
7751         btrfs_free_delayed_extent_op(extent_op);
7752 out_free_buf:
7753         free_extent_buffer(buf);
7754 out_free_reserved:
7755         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7756 out_unuse:
7757         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7758         return ERR_PTR(ret);
7759 }
7760
7761 struct walk_control {
7762         u64 refs[BTRFS_MAX_LEVEL];
7763         u64 flags[BTRFS_MAX_LEVEL];
7764         struct btrfs_key update_progress;
7765         int stage;
7766         int level;
7767         int shared_level;
7768         int update_ref;
7769         int keep_locks;
7770         int reada_slot;
7771         int reada_count;
7772         int for_reloc;
7773 };
7774
7775 #define DROP_REFERENCE  1
7776 #define UPDATE_BACKREF  2
7777
7778 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7779                                      struct btrfs_root *root,
7780                                      struct walk_control *wc,
7781                                      struct btrfs_path *path)
7782 {
7783         u64 bytenr;
7784         u64 generation;
7785         u64 refs;
7786         u64 flags;
7787         u32 nritems;
7788         u32 blocksize;
7789         struct btrfs_key key;
7790         struct extent_buffer *eb;
7791         int ret;
7792         int slot;
7793         int nread = 0;
7794
7795         if (path->slots[wc->level] < wc->reada_slot) {
7796                 wc->reada_count = wc->reada_count * 2 / 3;
7797                 wc->reada_count = max(wc->reada_count, 2);
7798         } else {
7799                 wc->reada_count = wc->reada_count * 3 / 2;
7800                 wc->reada_count = min_t(int, wc->reada_count,
7801                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7802         }
7803
7804         eb = path->nodes[wc->level];
7805         nritems = btrfs_header_nritems(eb);
7806         blocksize = root->nodesize;
7807
7808         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7809                 if (nread >= wc->reada_count)
7810                         break;
7811
7812                 cond_resched();
7813                 bytenr = btrfs_node_blockptr(eb, slot);
7814                 generation = btrfs_node_ptr_generation(eb, slot);
7815
7816                 if (slot == path->slots[wc->level])
7817                         goto reada;
7818
7819                 if (wc->stage == UPDATE_BACKREF &&
7820                     generation <= root->root_key.offset)
7821                         continue;
7822
7823                 /* We don't lock the tree block, it's OK to be racy here */
7824                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7825                                                wc->level - 1, 1, &refs,
7826                                                &flags);
7827                 /* We don't care about errors in readahead. */
7828                 if (ret < 0)
7829                         continue;
7830                 BUG_ON(refs == 0);
7831
7832                 if (wc->stage == DROP_REFERENCE) {
7833                         if (refs == 1)
7834                                 goto reada;
7835
7836                         if (wc->level == 1 &&
7837                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7838                                 continue;
7839                         if (!wc->update_ref ||
7840                             generation <= root->root_key.offset)
7841                                 continue;
7842                         btrfs_node_key_to_cpu(eb, &key, slot);
7843                         ret = btrfs_comp_cpu_keys(&key,
7844                                                   &wc->update_progress);
7845                         if (ret < 0)
7846                                 continue;
7847                 } else {
7848                         if (wc->level == 1 &&
7849                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7850                                 continue;
7851                 }
7852 reada:
7853                 readahead_tree_block(root, bytenr);
7854                 nread++;
7855         }
7856         wc->reada_slot = slot;
7857 }
7858
7859 /*
7860  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7861  * for later qgroup accounting.
7862  *
7863  * Current, this function does nothing.
7864  */
7865 static int account_leaf_items(struct btrfs_trans_handle *trans,
7866                               struct btrfs_root *root,
7867                               struct extent_buffer *eb)
7868 {
7869         int nr = btrfs_header_nritems(eb);
7870         int i, extent_type;
7871         struct btrfs_key key;
7872         struct btrfs_file_extent_item *fi;
7873         u64 bytenr, num_bytes;
7874
7875         for (i = 0; i < nr; i++) {
7876                 btrfs_item_key_to_cpu(eb, &key, i);
7877
7878                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7879                         continue;
7880
7881                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7882                 /* filter out non qgroup-accountable extents  */
7883                 extent_type = btrfs_file_extent_type(eb, fi);
7884
7885                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7886                         continue;
7887
7888                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7889                 if (!bytenr)
7890                         continue;
7891
7892                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7893         }
7894         return 0;
7895 }
7896
7897 /*
7898  * Walk up the tree from the bottom, freeing leaves and any interior
7899  * nodes which have had all slots visited. If a node (leaf or
7900  * interior) is freed, the node above it will have it's slot
7901  * incremented. The root node will never be freed.
7902  *
7903  * At the end of this function, we should have a path which has all
7904  * slots incremented to the next position for a search. If we need to
7905  * read a new node it will be NULL and the node above it will have the
7906  * correct slot selected for a later read.
7907  *
7908  * If we increment the root nodes slot counter past the number of
7909  * elements, 1 is returned to signal completion of the search.
7910  */
7911 static int adjust_slots_upwards(struct btrfs_root *root,
7912                                 struct btrfs_path *path, int root_level)
7913 {
7914         int level = 0;
7915         int nr, slot;
7916         struct extent_buffer *eb;
7917
7918         if (root_level == 0)
7919                 return 1;
7920
7921         while (level <= root_level) {
7922                 eb = path->nodes[level];
7923                 nr = btrfs_header_nritems(eb);
7924                 path->slots[level]++;
7925                 slot = path->slots[level];
7926                 if (slot >= nr || level == 0) {
7927                         /*
7928                          * Don't free the root -  we will detect this
7929                          * condition after our loop and return a
7930                          * positive value for caller to stop walking the tree.
7931                          */
7932                         if (level != root_level) {
7933                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7934                                 path->locks[level] = 0;
7935
7936                                 free_extent_buffer(eb);
7937                                 path->nodes[level] = NULL;
7938                                 path->slots[level] = 0;
7939                         }
7940                 } else {
7941                         /*
7942                          * We have a valid slot to walk back down
7943                          * from. Stop here so caller can process these
7944                          * new nodes.
7945                          */
7946                         break;
7947                 }
7948
7949                 level++;
7950         }
7951
7952         eb = path->nodes[root_level];
7953         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7954                 return 1;
7955
7956         return 0;
7957 }
7958
7959 /*
7960  * root_eb is the subtree root and is locked before this function is called.
7961  * TODO: Modify this function to mark all (including complete shared node)
7962  * to dirty_extent_root to allow it get accounted in qgroup.
7963  */
7964 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7965                                   struct btrfs_root *root,
7966                                   struct extent_buffer *root_eb,
7967                                   u64 root_gen,
7968                                   int root_level)
7969 {
7970         int ret = 0;
7971         int level;
7972         struct extent_buffer *eb = root_eb;
7973         struct btrfs_path *path = NULL;
7974
7975         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7976         BUG_ON(root_eb == NULL);
7977
7978         if (!root->fs_info->quota_enabled)
7979                 return 0;
7980
7981         if (!extent_buffer_uptodate(root_eb)) {
7982                 ret = btrfs_read_buffer(root_eb, root_gen);
7983                 if (ret)
7984                         goto out;
7985         }
7986
7987         if (root_level == 0) {
7988                 ret = account_leaf_items(trans, root, root_eb);
7989                 goto out;
7990         }
7991
7992         path = btrfs_alloc_path();
7993         if (!path)
7994                 return -ENOMEM;
7995
7996         /*
7997          * Walk down the tree.  Missing extent blocks are filled in as
7998          * we go. Metadata is accounted every time we read a new
7999          * extent block.
8000          *
8001          * When we reach a leaf, we account for file extent items in it,
8002          * walk back up the tree (adjusting slot pointers as we go)
8003          * and restart the search process.
8004          */
8005         extent_buffer_get(root_eb); /* For path */
8006         path->nodes[root_level] = root_eb;
8007         path->slots[root_level] = 0;
8008         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8009 walk_down:
8010         level = root_level;
8011         while (level >= 0) {
8012                 if (path->nodes[level] == NULL) {
8013                         int parent_slot;
8014                         u64 child_gen;
8015                         u64 child_bytenr;
8016
8017                         /* We need to get child blockptr/gen from
8018                          * parent before we can read it. */
8019                         eb = path->nodes[level + 1];
8020                         parent_slot = path->slots[level + 1];
8021                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8022                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8023
8024                         eb = read_tree_block(root, child_bytenr, child_gen);
8025                         if (IS_ERR(eb)) {
8026                                 ret = PTR_ERR(eb);
8027                                 goto out;
8028                         } else if (!extent_buffer_uptodate(eb)) {
8029                                 free_extent_buffer(eb);
8030                                 ret = -EIO;
8031                                 goto out;
8032                         }
8033
8034                         path->nodes[level] = eb;
8035                         path->slots[level] = 0;
8036
8037                         btrfs_tree_read_lock(eb);
8038                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8039                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8040                 }
8041
8042                 if (level == 0) {
8043                         ret = account_leaf_items(trans, root, path->nodes[level]);
8044                         if (ret)
8045                                 goto out;
8046
8047                         /* Nonzero return here means we completed our search */
8048                         ret = adjust_slots_upwards(root, path, root_level);
8049                         if (ret)
8050                                 break;
8051
8052                         /* Restart search with new slots */
8053                         goto walk_down;
8054                 }
8055
8056                 level--;
8057         }
8058
8059         ret = 0;
8060 out:
8061         btrfs_free_path(path);
8062
8063         return ret;
8064 }
8065
8066 /*
8067  * helper to process tree block while walking down the tree.
8068  *
8069  * when wc->stage == UPDATE_BACKREF, this function updates
8070  * back refs for pointers in the block.
8071  *
8072  * NOTE: return value 1 means we should stop walking down.
8073  */
8074 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8075                                    struct btrfs_root *root,
8076                                    struct btrfs_path *path,
8077                                    struct walk_control *wc, int lookup_info)
8078 {
8079         int level = wc->level;
8080         struct extent_buffer *eb = path->nodes[level];
8081         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8082         int ret;
8083
8084         if (wc->stage == UPDATE_BACKREF &&
8085             btrfs_header_owner(eb) != root->root_key.objectid)
8086                 return 1;
8087
8088         /*
8089          * when reference count of tree block is 1, it won't increase
8090          * again. once full backref flag is set, we never clear it.
8091          */
8092         if (lookup_info &&
8093             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8094              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8095                 BUG_ON(!path->locks[level]);
8096                 ret = btrfs_lookup_extent_info(trans, root,
8097                                                eb->start, level, 1,
8098                                                &wc->refs[level],
8099                                                &wc->flags[level]);
8100                 BUG_ON(ret == -ENOMEM);
8101                 if (ret)
8102                         return ret;
8103                 BUG_ON(wc->refs[level] == 0);
8104         }
8105
8106         if (wc->stage == DROP_REFERENCE) {
8107                 if (wc->refs[level] > 1)
8108                         return 1;
8109
8110                 if (path->locks[level] && !wc->keep_locks) {
8111                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8112                         path->locks[level] = 0;
8113                 }
8114                 return 0;
8115         }
8116
8117         /* wc->stage == UPDATE_BACKREF */
8118         if (!(wc->flags[level] & flag)) {
8119                 BUG_ON(!path->locks[level]);
8120                 ret = btrfs_inc_ref(trans, root, eb, 1);
8121                 BUG_ON(ret); /* -ENOMEM */
8122                 ret = btrfs_dec_ref(trans, root, eb, 0);
8123                 BUG_ON(ret); /* -ENOMEM */
8124                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8125                                                   eb->len, flag,
8126                                                   btrfs_header_level(eb), 0);
8127                 BUG_ON(ret); /* -ENOMEM */
8128                 wc->flags[level] |= flag;
8129         }
8130
8131         /*
8132          * the block is shared by multiple trees, so it's not good to
8133          * keep the tree lock
8134          */
8135         if (path->locks[level] && level > 0) {
8136                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8137                 path->locks[level] = 0;
8138         }
8139         return 0;
8140 }
8141
8142 /*
8143  * helper to process tree block pointer.
8144  *
8145  * when wc->stage == DROP_REFERENCE, this function checks
8146  * reference count of the block pointed to. if the block
8147  * is shared and we need update back refs for the subtree
8148  * rooted at the block, this function changes wc->stage to
8149  * UPDATE_BACKREF. if the block is shared and there is no
8150  * need to update back, this function drops the reference
8151  * to the block.
8152  *
8153  * NOTE: return value 1 means we should stop walking down.
8154  */
8155 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8156                                  struct btrfs_root *root,
8157                                  struct btrfs_path *path,
8158                                  struct walk_control *wc, int *lookup_info)
8159 {
8160         u64 bytenr;
8161         u64 generation;
8162         u64 parent;
8163         u32 blocksize;
8164         struct btrfs_key key;
8165         struct extent_buffer *next;
8166         int level = wc->level;
8167         int reada = 0;
8168         int ret = 0;
8169         bool need_account = false;
8170
8171         generation = btrfs_node_ptr_generation(path->nodes[level],
8172                                                path->slots[level]);
8173         /*
8174          * if the lower level block was created before the snapshot
8175          * was created, we know there is no need to update back refs
8176          * for the subtree
8177          */
8178         if (wc->stage == UPDATE_BACKREF &&
8179             generation <= root->root_key.offset) {
8180                 *lookup_info = 1;
8181                 return 1;
8182         }
8183
8184         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8185         blocksize = root->nodesize;
8186
8187         next = btrfs_find_tree_block(root->fs_info, bytenr);
8188         if (!next) {
8189                 next = btrfs_find_create_tree_block(root, bytenr);
8190                 if (!next)
8191                         return -ENOMEM;
8192                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8193                                                level - 1);
8194                 reada = 1;
8195         }
8196         btrfs_tree_lock(next);
8197         btrfs_set_lock_blocking(next);
8198
8199         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8200                                        &wc->refs[level - 1],
8201                                        &wc->flags[level - 1]);
8202         if (ret < 0) {
8203                 btrfs_tree_unlock(next);
8204                 return ret;
8205         }
8206
8207         if (unlikely(wc->refs[level - 1] == 0)) {
8208                 btrfs_err(root->fs_info, "Missing references.");
8209                 BUG();
8210         }
8211         *lookup_info = 0;
8212
8213         if (wc->stage == DROP_REFERENCE) {
8214                 if (wc->refs[level - 1] > 1) {
8215                         need_account = true;
8216                         if (level == 1 &&
8217                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8218                                 goto skip;
8219
8220                         if (!wc->update_ref ||
8221                             generation <= root->root_key.offset)
8222                                 goto skip;
8223
8224                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8225                                               path->slots[level]);
8226                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8227                         if (ret < 0)
8228                                 goto skip;
8229
8230                         wc->stage = UPDATE_BACKREF;
8231                         wc->shared_level = level - 1;
8232                 }
8233         } else {
8234                 if (level == 1 &&
8235                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8236                         goto skip;
8237         }
8238
8239         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8240                 btrfs_tree_unlock(next);
8241                 free_extent_buffer(next);
8242                 next = NULL;
8243                 *lookup_info = 1;
8244         }
8245
8246         if (!next) {
8247                 if (reada && level == 1)
8248                         reada_walk_down(trans, root, wc, path);
8249                 next = read_tree_block(root, bytenr, generation);
8250                 if (IS_ERR(next)) {
8251                         return PTR_ERR(next);
8252                 } else if (!extent_buffer_uptodate(next)) {
8253                         free_extent_buffer(next);
8254                         return -EIO;
8255                 }
8256                 btrfs_tree_lock(next);
8257                 btrfs_set_lock_blocking(next);
8258         }
8259
8260         level--;
8261         BUG_ON(level != btrfs_header_level(next));
8262         path->nodes[level] = next;
8263         path->slots[level] = 0;
8264         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8265         wc->level = level;
8266         if (wc->level == 1)
8267                 wc->reada_slot = 0;
8268         return 0;
8269 skip:
8270         wc->refs[level - 1] = 0;
8271         wc->flags[level - 1] = 0;
8272         if (wc->stage == DROP_REFERENCE) {
8273                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8274                         parent = path->nodes[level]->start;
8275                 } else {
8276                         BUG_ON(root->root_key.objectid !=
8277                                btrfs_header_owner(path->nodes[level]));
8278                         parent = 0;
8279                 }
8280
8281                 if (need_account) {
8282                         ret = account_shared_subtree(trans, root, next,
8283                                                      generation, level - 1);
8284                         if (ret) {
8285                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8286                                         "%d accounting shared subtree. Quota "
8287                                         "is out of sync, rescan required.\n",
8288                                         root->fs_info->sb->s_id, ret);
8289                         }
8290                 }
8291                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8292                                 root->root_key.objectid, level - 1, 0, 0);
8293                 BUG_ON(ret); /* -ENOMEM */
8294         }
8295         btrfs_tree_unlock(next);
8296         free_extent_buffer(next);
8297         *lookup_info = 1;
8298         return 1;
8299 }
8300
8301 /*
8302  * helper to process tree block while walking up the tree.
8303  *
8304  * when wc->stage == DROP_REFERENCE, this function drops
8305  * reference count on the block.
8306  *
8307  * when wc->stage == UPDATE_BACKREF, this function changes
8308  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8309  * to UPDATE_BACKREF previously while processing the block.
8310  *
8311  * NOTE: return value 1 means we should stop walking up.
8312  */
8313 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8314                                  struct btrfs_root *root,
8315                                  struct btrfs_path *path,
8316                                  struct walk_control *wc)
8317 {
8318         int ret;
8319         int level = wc->level;
8320         struct extent_buffer *eb = path->nodes[level];
8321         u64 parent = 0;
8322
8323         if (wc->stage == UPDATE_BACKREF) {
8324                 BUG_ON(wc->shared_level < level);
8325                 if (level < wc->shared_level)
8326                         goto out;
8327
8328                 ret = find_next_key(path, level + 1, &wc->update_progress);
8329                 if (ret > 0)
8330                         wc->update_ref = 0;
8331
8332                 wc->stage = DROP_REFERENCE;
8333                 wc->shared_level = -1;
8334                 path->slots[level] = 0;
8335
8336                 /*
8337                  * check reference count again if the block isn't locked.
8338                  * we should start walking down the tree again if reference
8339                  * count is one.
8340                  */
8341                 if (!path->locks[level]) {
8342                         BUG_ON(level == 0);
8343                         btrfs_tree_lock(eb);
8344                         btrfs_set_lock_blocking(eb);
8345                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8346
8347                         ret = btrfs_lookup_extent_info(trans, root,
8348                                                        eb->start, level, 1,
8349                                                        &wc->refs[level],
8350                                                        &wc->flags[level]);
8351                         if (ret < 0) {
8352                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8353                                 path->locks[level] = 0;
8354                                 return ret;
8355                         }
8356                         BUG_ON(wc->refs[level] == 0);
8357                         if (wc->refs[level] == 1) {
8358                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8359                                 path->locks[level] = 0;
8360                                 return 1;
8361                         }
8362                 }
8363         }
8364
8365         /* wc->stage == DROP_REFERENCE */
8366         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8367
8368         if (wc->refs[level] == 1) {
8369                 if (level == 0) {
8370                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8371                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8372                         else
8373                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8374                         BUG_ON(ret); /* -ENOMEM */
8375                         ret = account_leaf_items(trans, root, eb);
8376                         if (ret) {
8377                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8378                                         "%d accounting leaf items. Quota "
8379                                         "is out of sync, rescan required.\n",
8380                                         root->fs_info->sb->s_id, ret);
8381                         }
8382                 }
8383                 /* make block locked assertion in clean_tree_block happy */
8384                 if (!path->locks[level] &&
8385                     btrfs_header_generation(eb) == trans->transid) {
8386                         btrfs_tree_lock(eb);
8387                         btrfs_set_lock_blocking(eb);
8388                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8389                 }
8390                 clean_tree_block(trans, root->fs_info, eb);
8391         }
8392
8393         if (eb == root->node) {
8394                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8395                         parent = eb->start;
8396                 else
8397                         BUG_ON(root->root_key.objectid !=
8398                                btrfs_header_owner(eb));
8399         } else {
8400                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8401                         parent = path->nodes[level + 1]->start;
8402                 else
8403                         BUG_ON(root->root_key.objectid !=
8404                                btrfs_header_owner(path->nodes[level + 1]));
8405         }
8406
8407         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8408 out:
8409         wc->refs[level] = 0;
8410         wc->flags[level] = 0;
8411         return 0;
8412 }
8413
8414 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8415                                    struct btrfs_root *root,
8416                                    struct btrfs_path *path,
8417                                    struct walk_control *wc)
8418 {
8419         int level = wc->level;
8420         int lookup_info = 1;
8421         int ret;
8422
8423         while (level >= 0) {
8424                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8425                 if (ret > 0)
8426                         break;
8427
8428                 if (level == 0)
8429                         break;
8430
8431                 if (path->slots[level] >=
8432                     btrfs_header_nritems(path->nodes[level]))
8433                         break;
8434
8435                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8436                 if (ret > 0) {
8437                         path->slots[level]++;
8438                         continue;
8439                 } else if (ret < 0)
8440                         return ret;
8441                 level = wc->level;
8442         }
8443         return 0;
8444 }
8445
8446 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8447                                  struct btrfs_root *root,
8448                                  struct btrfs_path *path,
8449                                  struct walk_control *wc, int max_level)
8450 {
8451         int level = wc->level;
8452         int ret;
8453
8454         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8455         while (level < max_level && path->nodes[level]) {
8456                 wc->level = level;
8457                 if (path->slots[level] + 1 <
8458                     btrfs_header_nritems(path->nodes[level])) {
8459                         path->slots[level]++;
8460                         return 0;
8461                 } else {
8462                         ret = walk_up_proc(trans, root, path, wc);
8463                         if (ret > 0)
8464                                 return 0;
8465
8466                         if (path->locks[level]) {
8467                                 btrfs_tree_unlock_rw(path->nodes[level],
8468                                                      path->locks[level]);
8469                                 path->locks[level] = 0;
8470                         }
8471                         free_extent_buffer(path->nodes[level]);
8472                         path->nodes[level] = NULL;
8473                         level++;
8474                 }
8475         }
8476         return 1;
8477 }
8478
8479 /*
8480  * drop a subvolume tree.
8481  *
8482  * this function traverses the tree freeing any blocks that only
8483  * referenced by the tree.
8484  *
8485  * when a shared tree block is found. this function decreases its
8486  * reference count by one. if update_ref is true, this function
8487  * also make sure backrefs for the shared block and all lower level
8488  * blocks are properly updated.
8489  *
8490  * If called with for_reloc == 0, may exit early with -EAGAIN
8491  */
8492 int btrfs_drop_snapshot(struct btrfs_root *root,
8493                          struct btrfs_block_rsv *block_rsv, int update_ref,
8494                          int for_reloc)
8495 {
8496         struct btrfs_path *path;
8497         struct btrfs_trans_handle *trans;
8498         struct btrfs_root *tree_root = root->fs_info->tree_root;
8499         struct btrfs_root_item *root_item = &root->root_item;
8500         struct walk_control *wc;
8501         struct btrfs_key key;
8502         int err = 0;
8503         int ret;
8504         int level;
8505         bool root_dropped = false;
8506
8507         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8508
8509         path = btrfs_alloc_path();
8510         if (!path) {
8511                 err = -ENOMEM;
8512                 goto out;
8513         }
8514
8515         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8516         if (!wc) {
8517                 btrfs_free_path(path);
8518                 err = -ENOMEM;
8519                 goto out;
8520         }
8521
8522         trans = btrfs_start_transaction(tree_root, 0);
8523         if (IS_ERR(trans)) {
8524                 err = PTR_ERR(trans);
8525                 goto out_free;
8526         }
8527
8528         if (block_rsv)
8529                 trans->block_rsv = block_rsv;
8530
8531         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8532                 level = btrfs_header_level(root->node);
8533                 path->nodes[level] = btrfs_lock_root_node(root);
8534                 btrfs_set_lock_blocking(path->nodes[level]);
8535                 path->slots[level] = 0;
8536                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8537                 memset(&wc->update_progress, 0,
8538                        sizeof(wc->update_progress));
8539         } else {
8540                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8541                 memcpy(&wc->update_progress, &key,
8542                        sizeof(wc->update_progress));
8543
8544                 level = root_item->drop_level;
8545                 BUG_ON(level == 0);
8546                 path->lowest_level = level;
8547                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8548                 path->lowest_level = 0;
8549                 if (ret < 0) {
8550                         err = ret;
8551                         goto out_end_trans;
8552                 }
8553                 WARN_ON(ret > 0);
8554
8555                 /*
8556                  * unlock our path, this is safe because only this
8557                  * function is allowed to delete this snapshot
8558                  */
8559                 btrfs_unlock_up_safe(path, 0);
8560
8561                 level = btrfs_header_level(root->node);
8562                 while (1) {
8563                         btrfs_tree_lock(path->nodes[level]);
8564                         btrfs_set_lock_blocking(path->nodes[level]);
8565                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8566
8567                         ret = btrfs_lookup_extent_info(trans, root,
8568                                                 path->nodes[level]->start,
8569                                                 level, 1, &wc->refs[level],
8570                                                 &wc->flags[level]);
8571                         if (ret < 0) {
8572                                 err = ret;
8573                                 goto out_end_trans;
8574                         }
8575                         BUG_ON(wc->refs[level] == 0);
8576
8577                         if (level == root_item->drop_level)
8578                                 break;
8579
8580                         btrfs_tree_unlock(path->nodes[level]);
8581                         path->locks[level] = 0;
8582                         WARN_ON(wc->refs[level] != 1);
8583                         level--;
8584                 }
8585         }
8586
8587         wc->level = level;
8588         wc->shared_level = -1;
8589         wc->stage = DROP_REFERENCE;
8590         wc->update_ref = update_ref;
8591         wc->keep_locks = 0;
8592         wc->for_reloc = for_reloc;
8593         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8594
8595         while (1) {
8596
8597                 ret = walk_down_tree(trans, root, path, wc);
8598                 if (ret < 0) {
8599                         err = ret;
8600                         break;
8601                 }
8602
8603                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8604                 if (ret < 0) {
8605                         err = ret;
8606                         break;
8607                 }
8608
8609                 if (ret > 0) {
8610                         BUG_ON(wc->stage != DROP_REFERENCE);
8611                         break;
8612                 }
8613
8614                 if (wc->stage == DROP_REFERENCE) {
8615                         level = wc->level;
8616                         btrfs_node_key(path->nodes[level],
8617                                        &root_item->drop_progress,
8618                                        path->slots[level]);
8619                         root_item->drop_level = level;
8620                 }
8621
8622                 BUG_ON(wc->level == 0);
8623                 if (btrfs_should_end_transaction(trans, tree_root) ||
8624                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8625                         ret = btrfs_update_root(trans, tree_root,
8626                                                 &root->root_key,
8627                                                 root_item);
8628                         if (ret) {
8629                                 btrfs_abort_transaction(trans, tree_root, ret);
8630                                 err = ret;
8631                                 goto out_end_trans;
8632                         }
8633
8634                         btrfs_end_transaction_throttle(trans, tree_root);
8635                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8636                                 pr_debug("BTRFS: drop snapshot early exit\n");
8637                                 err = -EAGAIN;
8638                                 goto out_free;
8639                         }
8640
8641                         trans = btrfs_start_transaction(tree_root, 0);
8642                         if (IS_ERR(trans)) {
8643                                 err = PTR_ERR(trans);
8644                                 goto out_free;
8645                         }
8646                         if (block_rsv)
8647                                 trans->block_rsv = block_rsv;
8648                 }
8649         }
8650         btrfs_release_path(path);
8651         if (err)
8652                 goto out_end_trans;
8653
8654         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8655         if (ret) {
8656                 btrfs_abort_transaction(trans, tree_root, ret);
8657                 goto out_end_trans;
8658         }
8659
8660         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8661                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8662                                       NULL, NULL);
8663                 if (ret < 0) {
8664                         btrfs_abort_transaction(trans, tree_root, ret);
8665                         err = ret;
8666                         goto out_end_trans;
8667                 } else if (ret > 0) {
8668                         /* if we fail to delete the orphan item this time
8669                          * around, it'll get picked up the next time.
8670                          *
8671                          * The most common failure here is just -ENOENT.
8672                          */
8673                         btrfs_del_orphan_item(trans, tree_root,
8674                                               root->root_key.objectid);
8675                 }
8676         }
8677
8678         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8679                 btrfs_add_dropped_root(trans, root);
8680         } else {
8681                 free_extent_buffer(root->node);
8682                 free_extent_buffer(root->commit_root);
8683                 btrfs_put_fs_root(root);
8684         }
8685         root_dropped = true;
8686 out_end_trans:
8687         btrfs_end_transaction_throttle(trans, tree_root);
8688 out_free:
8689         kfree(wc);
8690         btrfs_free_path(path);
8691 out:
8692         /*
8693          * So if we need to stop dropping the snapshot for whatever reason we
8694          * need to make sure to add it back to the dead root list so that we
8695          * keep trying to do the work later.  This also cleans up roots if we
8696          * don't have it in the radix (like when we recover after a power fail
8697          * or unmount) so we don't leak memory.
8698          */
8699         if (!for_reloc && root_dropped == false)
8700                 btrfs_add_dead_root(root);
8701         if (err && err != -EAGAIN)
8702                 btrfs_std_error(root->fs_info, err);
8703         return err;
8704 }
8705
8706 /*
8707  * drop subtree rooted at tree block 'node'.
8708  *
8709  * NOTE: this function will unlock and release tree block 'node'
8710  * only used by relocation code
8711  */
8712 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8713                         struct btrfs_root *root,
8714                         struct extent_buffer *node,
8715                         struct extent_buffer *parent)
8716 {
8717         struct btrfs_path *path;
8718         struct walk_control *wc;
8719         int level;
8720         int parent_level;
8721         int ret = 0;
8722         int wret;
8723
8724         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8725
8726         path = btrfs_alloc_path();
8727         if (!path)
8728                 return -ENOMEM;
8729
8730         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8731         if (!wc) {
8732                 btrfs_free_path(path);
8733                 return -ENOMEM;
8734         }
8735
8736         btrfs_assert_tree_locked(parent);
8737         parent_level = btrfs_header_level(parent);
8738         extent_buffer_get(parent);
8739         path->nodes[parent_level] = parent;
8740         path->slots[parent_level] = btrfs_header_nritems(parent);
8741
8742         btrfs_assert_tree_locked(node);
8743         level = btrfs_header_level(node);
8744         path->nodes[level] = node;
8745         path->slots[level] = 0;
8746         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8747
8748         wc->refs[parent_level] = 1;
8749         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8750         wc->level = level;
8751         wc->shared_level = -1;
8752         wc->stage = DROP_REFERENCE;
8753         wc->update_ref = 0;
8754         wc->keep_locks = 1;
8755         wc->for_reloc = 1;
8756         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8757
8758         while (1) {
8759                 wret = walk_down_tree(trans, root, path, wc);
8760                 if (wret < 0) {
8761                         ret = wret;
8762                         break;
8763                 }
8764
8765                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8766                 if (wret < 0)
8767                         ret = wret;
8768                 if (wret != 0)
8769                         break;
8770         }
8771
8772         kfree(wc);
8773         btrfs_free_path(path);
8774         return ret;
8775 }
8776
8777 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8778 {
8779         u64 num_devices;
8780         u64 stripped;
8781
8782         /*
8783          * if restripe for this chunk_type is on pick target profile and
8784          * return, otherwise do the usual balance
8785          */
8786         stripped = get_restripe_target(root->fs_info, flags);
8787         if (stripped)
8788                 return extended_to_chunk(stripped);
8789
8790         num_devices = root->fs_info->fs_devices->rw_devices;
8791
8792         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8793                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8794                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8795
8796         if (num_devices == 1) {
8797                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8798                 stripped = flags & ~stripped;
8799
8800                 /* turn raid0 into single device chunks */
8801                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8802                         return stripped;
8803
8804                 /* turn mirroring into duplication */
8805                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8806                              BTRFS_BLOCK_GROUP_RAID10))
8807                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8808         } else {
8809                 /* they already had raid on here, just return */
8810                 if (flags & stripped)
8811                         return flags;
8812
8813                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8814                 stripped = flags & ~stripped;
8815
8816                 /* switch duplicated blocks with raid1 */
8817                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8818                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8819
8820                 /* this is drive concat, leave it alone */
8821         }
8822
8823         return flags;
8824 }
8825
8826 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8827 {
8828         struct btrfs_space_info *sinfo = cache->space_info;
8829         u64 num_bytes;
8830         u64 min_allocable_bytes;
8831         int ret = -ENOSPC;
8832
8833         /*
8834          * We need some metadata space and system metadata space for
8835          * allocating chunks in some corner cases until we force to set
8836          * it to be readonly.
8837          */
8838         if ((sinfo->flags &
8839              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8840             !force)
8841                 min_allocable_bytes = 1 * 1024 * 1024;
8842         else
8843                 min_allocable_bytes = 0;
8844
8845         spin_lock(&sinfo->lock);
8846         spin_lock(&cache->lock);
8847
8848         if (cache->ro) {
8849                 cache->ro++;
8850                 ret = 0;
8851                 goto out;
8852         }
8853
8854         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8855                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8856
8857         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8858             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8859             min_allocable_bytes <= sinfo->total_bytes) {
8860                 sinfo->bytes_readonly += num_bytes;
8861                 cache->ro++;
8862                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8863                 ret = 0;
8864         }
8865 out:
8866         spin_unlock(&cache->lock);
8867         spin_unlock(&sinfo->lock);
8868         return ret;
8869 }
8870
8871 int btrfs_inc_block_group_ro(struct btrfs_root *root,
8872                              struct btrfs_block_group_cache *cache)
8873
8874 {
8875         struct btrfs_trans_handle *trans;
8876         u64 alloc_flags;
8877         int ret;
8878
8879 again:
8880         trans = btrfs_join_transaction(root);
8881         if (IS_ERR(trans))
8882                 return PTR_ERR(trans);
8883
8884         /*
8885          * we're not allowed to set block groups readonly after the dirty
8886          * block groups cache has started writing.  If it already started,
8887          * back off and let this transaction commit
8888          */
8889         mutex_lock(&root->fs_info->ro_block_group_mutex);
8890         if (trans->transaction->dirty_bg_run) {
8891                 u64 transid = trans->transid;
8892
8893                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8894                 btrfs_end_transaction(trans, root);
8895
8896                 ret = btrfs_wait_for_commit(root, transid);
8897                 if (ret)
8898                         return ret;
8899                 goto again;
8900         }
8901
8902         /*
8903          * if we are changing raid levels, try to allocate a corresponding
8904          * block group with the new raid level.
8905          */
8906         alloc_flags = update_block_group_flags(root, cache->flags);
8907         if (alloc_flags != cache->flags) {
8908                 ret = do_chunk_alloc(trans, root, alloc_flags,
8909                                      CHUNK_ALLOC_FORCE);
8910                 /*
8911                  * ENOSPC is allowed here, we may have enough space
8912                  * already allocated at the new raid level to
8913                  * carry on
8914                  */
8915                 if (ret == -ENOSPC)
8916                         ret = 0;
8917                 if (ret < 0)
8918                         goto out;
8919         }
8920
8921         ret = inc_block_group_ro(cache, 0);
8922         if (!ret)
8923                 goto out;
8924         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8925         ret = do_chunk_alloc(trans, root, alloc_flags,
8926                              CHUNK_ALLOC_FORCE);
8927         if (ret < 0)
8928                 goto out;
8929         ret = inc_block_group_ro(cache, 0);
8930 out:
8931         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8932                 alloc_flags = update_block_group_flags(root, cache->flags);
8933                 lock_chunks(root->fs_info->chunk_root);
8934                 check_system_chunk(trans, root, alloc_flags);
8935                 unlock_chunks(root->fs_info->chunk_root);
8936         }
8937         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8938
8939         btrfs_end_transaction(trans, root);
8940         return ret;
8941 }
8942
8943 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8944                             struct btrfs_root *root, u64 type)
8945 {
8946         u64 alloc_flags = get_alloc_profile(root, type);
8947         return do_chunk_alloc(trans, root, alloc_flags,
8948                               CHUNK_ALLOC_FORCE);
8949 }
8950
8951 /*
8952  * helper to account the unused space of all the readonly block group in the
8953  * space_info. takes mirrors into account.
8954  */
8955 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8956 {
8957         struct btrfs_block_group_cache *block_group;
8958         u64 free_bytes = 0;
8959         int factor;
8960
8961         /* It's df, we don't care if it's racey */
8962         if (list_empty(&sinfo->ro_bgs))
8963                 return 0;
8964
8965         spin_lock(&sinfo->lock);
8966         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8967                 spin_lock(&block_group->lock);
8968
8969                 if (!block_group->ro) {
8970                         spin_unlock(&block_group->lock);
8971                         continue;
8972                 }
8973
8974                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8975                                           BTRFS_BLOCK_GROUP_RAID10 |
8976                                           BTRFS_BLOCK_GROUP_DUP))
8977                         factor = 2;
8978                 else
8979                         factor = 1;
8980
8981                 free_bytes += (block_group->key.offset -
8982                                btrfs_block_group_used(&block_group->item)) *
8983                                factor;
8984
8985                 spin_unlock(&block_group->lock);
8986         }
8987         spin_unlock(&sinfo->lock);
8988
8989         return free_bytes;
8990 }
8991
8992 void btrfs_dec_block_group_ro(struct btrfs_root *root,
8993                               struct btrfs_block_group_cache *cache)
8994 {
8995         struct btrfs_space_info *sinfo = cache->space_info;
8996         u64 num_bytes;
8997
8998         BUG_ON(!cache->ro);
8999
9000         spin_lock(&sinfo->lock);
9001         spin_lock(&cache->lock);
9002         if (!--cache->ro) {
9003                 num_bytes = cache->key.offset - cache->reserved -
9004                             cache->pinned - cache->bytes_super -
9005                             btrfs_block_group_used(&cache->item);
9006                 sinfo->bytes_readonly -= num_bytes;
9007                 list_del_init(&cache->ro_list);
9008         }
9009         spin_unlock(&cache->lock);
9010         spin_unlock(&sinfo->lock);
9011 }
9012
9013 /*
9014  * checks to see if its even possible to relocate this block group.
9015  *
9016  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9017  * ok to go ahead and try.
9018  */
9019 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9020 {
9021         struct btrfs_block_group_cache *block_group;
9022         struct btrfs_space_info *space_info;
9023         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9024         struct btrfs_device *device;
9025         struct btrfs_trans_handle *trans;
9026         u64 min_free;
9027         u64 dev_min = 1;
9028         u64 dev_nr = 0;
9029         u64 target;
9030         int index;
9031         int full = 0;
9032         int ret = 0;
9033
9034         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9035
9036         /* odd, couldn't find the block group, leave it alone */
9037         if (!block_group)
9038                 return -1;
9039
9040         min_free = btrfs_block_group_used(&block_group->item);
9041
9042         /* no bytes used, we're good */
9043         if (!min_free)
9044                 goto out;
9045
9046         space_info = block_group->space_info;
9047         spin_lock(&space_info->lock);
9048
9049         full = space_info->full;
9050
9051         /*
9052          * if this is the last block group we have in this space, we can't
9053          * relocate it unless we're able to allocate a new chunk below.
9054          *
9055          * Otherwise, we need to make sure we have room in the space to handle
9056          * all of the extents from this block group.  If we can, we're good
9057          */
9058         if ((space_info->total_bytes != block_group->key.offset) &&
9059             (space_info->bytes_used + space_info->bytes_reserved +
9060              space_info->bytes_pinned + space_info->bytes_readonly +
9061              min_free < space_info->total_bytes)) {
9062                 spin_unlock(&space_info->lock);
9063                 goto out;
9064         }
9065         spin_unlock(&space_info->lock);
9066
9067         /*
9068          * ok we don't have enough space, but maybe we have free space on our
9069          * devices to allocate new chunks for relocation, so loop through our
9070          * alloc devices and guess if we have enough space.  if this block
9071          * group is going to be restriped, run checks against the target
9072          * profile instead of the current one.
9073          */
9074         ret = -1;
9075
9076         /*
9077          * index:
9078          *      0: raid10
9079          *      1: raid1
9080          *      2: dup
9081          *      3: raid0
9082          *      4: single
9083          */
9084         target = get_restripe_target(root->fs_info, block_group->flags);
9085         if (target) {
9086                 index = __get_raid_index(extended_to_chunk(target));
9087         } else {
9088                 /*
9089                  * this is just a balance, so if we were marked as full
9090                  * we know there is no space for a new chunk
9091                  */
9092                 if (full)
9093                         goto out;
9094
9095                 index = get_block_group_index(block_group);
9096         }
9097
9098         if (index == BTRFS_RAID_RAID10) {
9099                 dev_min = 4;
9100                 /* Divide by 2 */
9101                 min_free >>= 1;
9102         } else if (index == BTRFS_RAID_RAID1) {
9103                 dev_min = 2;
9104         } else if (index == BTRFS_RAID_DUP) {
9105                 /* Multiply by 2 */
9106                 min_free <<= 1;
9107         } else if (index == BTRFS_RAID_RAID0) {
9108                 dev_min = fs_devices->rw_devices;
9109                 min_free = div64_u64(min_free, dev_min);
9110         }
9111
9112         /* We need to do this so that we can look at pending chunks */
9113         trans = btrfs_join_transaction(root);
9114         if (IS_ERR(trans)) {
9115                 ret = PTR_ERR(trans);
9116                 goto out;
9117         }
9118
9119         mutex_lock(&root->fs_info->chunk_mutex);
9120         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9121                 u64 dev_offset;
9122
9123                 /*
9124                  * check to make sure we can actually find a chunk with enough
9125                  * space to fit our block group in.
9126                  */
9127                 if (device->total_bytes > device->bytes_used + min_free &&
9128                     !device->is_tgtdev_for_dev_replace) {
9129                         ret = find_free_dev_extent(trans, device, min_free,
9130                                                    &dev_offset, NULL);
9131                         if (!ret)
9132                                 dev_nr++;
9133
9134                         if (dev_nr >= dev_min)
9135                                 break;
9136
9137                         ret = -1;
9138                 }
9139         }
9140         mutex_unlock(&root->fs_info->chunk_mutex);
9141         btrfs_end_transaction(trans, root);
9142 out:
9143         btrfs_put_block_group(block_group);
9144         return ret;
9145 }
9146
9147 static int find_first_block_group(struct btrfs_root *root,
9148                 struct btrfs_path *path, struct btrfs_key *key)
9149 {
9150         int ret = 0;
9151         struct btrfs_key found_key;
9152         struct extent_buffer *leaf;
9153         int slot;
9154
9155         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9156         if (ret < 0)
9157                 goto out;
9158
9159         while (1) {
9160                 slot = path->slots[0];
9161                 leaf = path->nodes[0];
9162                 if (slot >= btrfs_header_nritems(leaf)) {
9163                         ret = btrfs_next_leaf(root, path);
9164                         if (ret == 0)
9165                                 continue;
9166                         if (ret < 0)
9167                                 goto out;
9168                         break;
9169                 }
9170                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9171
9172                 if (found_key.objectid >= key->objectid &&
9173                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9174                         ret = 0;
9175                         goto out;
9176                 }
9177                 path->slots[0]++;
9178         }
9179 out:
9180         return ret;
9181 }
9182
9183 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9184 {
9185         struct btrfs_block_group_cache *block_group;
9186         u64 last = 0;
9187
9188         while (1) {
9189                 struct inode *inode;
9190
9191                 block_group = btrfs_lookup_first_block_group(info, last);
9192                 while (block_group) {
9193                         spin_lock(&block_group->lock);
9194                         if (block_group->iref)
9195                                 break;
9196                         spin_unlock(&block_group->lock);
9197                         block_group = next_block_group(info->tree_root,
9198                                                        block_group);
9199                 }
9200                 if (!block_group) {
9201                         if (last == 0)
9202                                 break;
9203                         last = 0;
9204                         continue;
9205                 }
9206
9207                 inode = block_group->inode;
9208                 block_group->iref = 0;
9209                 block_group->inode = NULL;
9210                 spin_unlock(&block_group->lock);
9211                 iput(inode);
9212                 last = block_group->key.objectid + block_group->key.offset;
9213                 btrfs_put_block_group(block_group);
9214         }
9215 }
9216
9217 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9218 {
9219         struct btrfs_block_group_cache *block_group;
9220         struct btrfs_space_info *space_info;
9221         struct btrfs_caching_control *caching_ctl;
9222         struct rb_node *n;
9223
9224         down_write(&info->commit_root_sem);
9225         while (!list_empty(&info->caching_block_groups)) {
9226                 caching_ctl = list_entry(info->caching_block_groups.next,
9227                                          struct btrfs_caching_control, list);
9228                 list_del(&caching_ctl->list);
9229                 put_caching_control(caching_ctl);
9230         }
9231         up_write(&info->commit_root_sem);
9232
9233         spin_lock(&info->unused_bgs_lock);
9234         while (!list_empty(&info->unused_bgs)) {
9235                 block_group = list_first_entry(&info->unused_bgs,
9236                                                struct btrfs_block_group_cache,
9237                                                bg_list);
9238                 list_del_init(&block_group->bg_list);
9239                 btrfs_put_block_group(block_group);
9240         }
9241         spin_unlock(&info->unused_bgs_lock);
9242
9243         spin_lock(&info->block_group_cache_lock);
9244         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9245                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9246                                        cache_node);
9247                 rb_erase(&block_group->cache_node,
9248                          &info->block_group_cache_tree);
9249                 RB_CLEAR_NODE(&block_group->cache_node);
9250                 spin_unlock(&info->block_group_cache_lock);
9251
9252                 down_write(&block_group->space_info->groups_sem);
9253                 list_del(&block_group->list);
9254                 up_write(&block_group->space_info->groups_sem);
9255
9256                 if (block_group->cached == BTRFS_CACHE_STARTED)
9257                         wait_block_group_cache_done(block_group);
9258
9259                 /*
9260                  * We haven't cached this block group, which means we could
9261                  * possibly have excluded extents on this block group.
9262                  */
9263                 if (block_group->cached == BTRFS_CACHE_NO ||
9264                     block_group->cached == BTRFS_CACHE_ERROR)
9265                         free_excluded_extents(info->extent_root, block_group);
9266
9267                 btrfs_remove_free_space_cache(block_group);
9268                 btrfs_put_block_group(block_group);
9269
9270                 spin_lock(&info->block_group_cache_lock);
9271         }
9272         spin_unlock(&info->block_group_cache_lock);
9273
9274         /* now that all the block groups are freed, go through and
9275          * free all the space_info structs.  This is only called during
9276          * the final stages of unmount, and so we know nobody is
9277          * using them.  We call synchronize_rcu() once before we start,
9278          * just to be on the safe side.
9279          */
9280         synchronize_rcu();
9281
9282         release_global_block_rsv(info);
9283
9284         while (!list_empty(&info->space_info)) {
9285                 int i;
9286
9287                 space_info = list_entry(info->space_info.next,
9288                                         struct btrfs_space_info,
9289                                         list);
9290                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9291                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9292                             space_info->bytes_reserved > 0 ||
9293                             space_info->bytes_may_use > 0)) {
9294                                 dump_space_info(space_info, 0, 0);
9295                         }
9296                 }
9297                 list_del(&space_info->list);
9298                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9299                         struct kobject *kobj;
9300                         kobj = space_info->block_group_kobjs[i];
9301                         space_info->block_group_kobjs[i] = NULL;
9302                         if (kobj) {
9303                                 kobject_del(kobj);
9304                                 kobject_put(kobj);
9305                         }
9306                 }
9307                 kobject_del(&space_info->kobj);
9308                 kobject_put(&space_info->kobj);
9309         }
9310         return 0;
9311 }
9312
9313 static void __link_block_group(struct btrfs_space_info *space_info,
9314                                struct btrfs_block_group_cache *cache)
9315 {
9316         int index = get_block_group_index(cache);
9317         bool first = false;
9318
9319         down_write(&space_info->groups_sem);
9320         if (list_empty(&space_info->block_groups[index]))
9321                 first = true;
9322         list_add_tail(&cache->list, &space_info->block_groups[index]);
9323         up_write(&space_info->groups_sem);
9324
9325         if (first) {
9326                 struct raid_kobject *rkobj;
9327                 int ret;
9328
9329                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9330                 if (!rkobj)
9331                         goto out_err;
9332                 rkobj->raid_type = index;
9333                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9334                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9335                                   "%s", get_raid_name(index));
9336                 if (ret) {
9337                         kobject_put(&rkobj->kobj);
9338                         goto out_err;
9339                 }
9340                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9341         }
9342
9343         return;
9344 out_err:
9345         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9346 }
9347
9348 static struct btrfs_block_group_cache *
9349 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9350 {
9351         struct btrfs_block_group_cache *cache;
9352
9353         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9354         if (!cache)
9355                 return NULL;
9356
9357         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9358                                         GFP_NOFS);
9359         if (!cache->free_space_ctl) {
9360                 kfree(cache);
9361                 return NULL;
9362         }
9363
9364         cache->key.objectid = start;
9365         cache->key.offset = size;
9366         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9367
9368         cache->sectorsize = root->sectorsize;
9369         cache->fs_info = root->fs_info;
9370         cache->full_stripe_len = btrfs_full_stripe_len(root,
9371                                                &root->fs_info->mapping_tree,
9372                                                start);
9373         atomic_set(&cache->count, 1);
9374         spin_lock_init(&cache->lock);
9375         init_rwsem(&cache->data_rwsem);
9376         INIT_LIST_HEAD(&cache->list);
9377         INIT_LIST_HEAD(&cache->cluster_list);
9378         INIT_LIST_HEAD(&cache->bg_list);
9379         INIT_LIST_HEAD(&cache->ro_list);
9380         INIT_LIST_HEAD(&cache->dirty_list);
9381         INIT_LIST_HEAD(&cache->io_list);
9382         btrfs_init_free_space_ctl(cache);
9383         atomic_set(&cache->trimming, 0);
9384         mutex_init(&cache->free_space_lock);
9385
9386         return cache;
9387 }
9388
9389 int btrfs_read_block_groups(struct btrfs_root *root)
9390 {
9391         struct btrfs_path *path;
9392         int ret;
9393         struct btrfs_block_group_cache *cache;
9394         struct btrfs_fs_info *info = root->fs_info;
9395         struct btrfs_space_info *space_info;
9396         struct btrfs_key key;
9397         struct btrfs_key found_key;
9398         struct extent_buffer *leaf;
9399         int need_clear = 0;
9400         u64 cache_gen;
9401
9402         root = info->extent_root;
9403         key.objectid = 0;
9404         key.offset = 0;
9405         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9406         path = btrfs_alloc_path();
9407         if (!path)
9408                 return -ENOMEM;
9409         path->reada = 1;
9410
9411         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9412         if (btrfs_test_opt(root, SPACE_CACHE) &&
9413             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9414                 need_clear = 1;
9415         if (btrfs_test_opt(root, CLEAR_CACHE))
9416                 need_clear = 1;
9417
9418         while (1) {
9419                 ret = find_first_block_group(root, path, &key);
9420                 if (ret > 0)
9421                         break;
9422                 if (ret != 0)
9423                         goto error;
9424
9425                 leaf = path->nodes[0];
9426                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9427
9428                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9429                                                        found_key.offset);
9430                 if (!cache) {
9431                         ret = -ENOMEM;
9432                         goto error;
9433                 }
9434
9435                 if (need_clear) {
9436                         /*
9437                          * When we mount with old space cache, we need to
9438                          * set BTRFS_DC_CLEAR and set dirty flag.
9439                          *
9440                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9441                          *    truncate the old free space cache inode and
9442                          *    setup a new one.
9443                          * b) Setting 'dirty flag' makes sure that we flush
9444                          *    the new space cache info onto disk.
9445                          */
9446                         if (btrfs_test_opt(root, SPACE_CACHE))
9447                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9448                 }
9449
9450                 read_extent_buffer(leaf, &cache->item,
9451                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9452                                    sizeof(cache->item));
9453                 cache->flags = btrfs_block_group_flags(&cache->item);
9454
9455                 key.objectid = found_key.objectid + found_key.offset;
9456                 btrfs_release_path(path);
9457
9458                 /*
9459                  * We need to exclude the super stripes now so that the space
9460                  * info has super bytes accounted for, otherwise we'll think
9461                  * we have more space than we actually do.
9462                  */
9463                 ret = exclude_super_stripes(root, cache);
9464                 if (ret) {
9465                         /*
9466                          * We may have excluded something, so call this just in
9467                          * case.
9468                          */
9469                         free_excluded_extents(root, cache);
9470                         btrfs_put_block_group(cache);
9471                         goto error;
9472                 }
9473
9474                 /*
9475                  * check for two cases, either we are full, and therefore
9476                  * don't need to bother with the caching work since we won't
9477                  * find any space, or we are empty, and we can just add all
9478                  * the space in and be done with it.  This saves us _alot_ of
9479                  * time, particularly in the full case.
9480                  */
9481                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9482                         cache->last_byte_to_unpin = (u64)-1;
9483                         cache->cached = BTRFS_CACHE_FINISHED;
9484                         free_excluded_extents(root, cache);
9485                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9486                         cache->last_byte_to_unpin = (u64)-1;
9487                         cache->cached = BTRFS_CACHE_FINISHED;
9488                         add_new_free_space(cache, root->fs_info,
9489                                            found_key.objectid,
9490                                            found_key.objectid +
9491                                            found_key.offset);
9492                         free_excluded_extents(root, cache);
9493                 }
9494
9495                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9496                 if (ret) {
9497                         btrfs_remove_free_space_cache(cache);
9498                         btrfs_put_block_group(cache);
9499                         goto error;
9500                 }
9501
9502                 ret = update_space_info(info, cache->flags, found_key.offset,
9503                                         btrfs_block_group_used(&cache->item),
9504                                         &space_info);
9505                 if (ret) {
9506                         btrfs_remove_free_space_cache(cache);
9507                         spin_lock(&info->block_group_cache_lock);
9508                         rb_erase(&cache->cache_node,
9509                                  &info->block_group_cache_tree);
9510                         RB_CLEAR_NODE(&cache->cache_node);
9511                         spin_unlock(&info->block_group_cache_lock);
9512                         btrfs_put_block_group(cache);
9513                         goto error;
9514                 }
9515
9516                 cache->space_info = space_info;
9517                 spin_lock(&cache->space_info->lock);
9518                 cache->space_info->bytes_readonly += cache->bytes_super;
9519                 spin_unlock(&cache->space_info->lock);
9520
9521                 __link_block_group(space_info, cache);
9522
9523                 set_avail_alloc_bits(root->fs_info, cache->flags);
9524                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9525                         inc_block_group_ro(cache, 1);
9526                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9527                         spin_lock(&info->unused_bgs_lock);
9528                         /* Should always be true but just in case. */
9529                         if (list_empty(&cache->bg_list)) {
9530                                 btrfs_get_block_group(cache);
9531                                 list_add_tail(&cache->bg_list,
9532                                               &info->unused_bgs);
9533                         }
9534                         spin_unlock(&info->unused_bgs_lock);
9535                 }
9536         }
9537
9538         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9539                 if (!(get_alloc_profile(root, space_info->flags) &
9540                       (BTRFS_BLOCK_GROUP_RAID10 |
9541                        BTRFS_BLOCK_GROUP_RAID1 |
9542                        BTRFS_BLOCK_GROUP_RAID5 |
9543                        BTRFS_BLOCK_GROUP_RAID6 |
9544                        BTRFS_BLOCK_GROUP_DUP)))
9545                         continue;
9546                 /*
9547                  * avoid allocating from un-mirrored block group if there are
9548                  * mirrored block groups.
9549                  */
9550                 list_for_each_entry(cache,
9551                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9552                                 list)
9553                         inc_block_group_ro(cache, 1);
9554                 list_for_each_entry(cache,
9555                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9556                                 list)
9557                         inc_block_group_ro(cache, 1);
9558         }
9559
9560         init_global_block_rsv(info);
9561         ret = 0;
9562 error:
9563         btrfs_free_path(path);
9564         return ret;
9565 }
9566
9567 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9568                                        struct btrfs_root *root)
9569 {
9570         struct btrfs_block_group_cache *block_group, *tmp;
9571         struct btrfs_root *extent_root = root->fs_info->extent_root;
9572         struct btrfs_block_group_item item;
9573         struct btrfs_key key;
9574         int ret = 0;
9575         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9576
9577         trans->can_flush_pending_bgs = false;
9578         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9579                 if (ret)
9580                         goto next;
9581
9582                 spin_lock(&block_group->lock);
9583                 memcpy(&item, &block_group->item, sizeof(item));
9584                 memcpy(&key, &block_group->key, sizeof(key));
9585                 spin_unlock(&block_group->lock);
9586
9587                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9588                                         sizeof(item));
9589                 if (ret)
9590                         btrfs_abort_transaction(trans, extent_root, ret);
9591                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9592                                                key.objectid, key.offset);
9593                 if (ret)
9594                         btrfs_abort_transaction(trans, extent_root, ret);
9595 next:
9596                 list_del_init(&block_group->bg_list);
9597         }
9598         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9599 }
9600
9601 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9602                            struct btrfs_root *root, u64 bytes_used,
9603                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9604                            u64 size)
9605 {
9606         int ret;
9607         struct btrfs_root *extent_root;
9608         struct btrfs_block_group_cache *cache;
9609
9610         extent_root = root->fs_info->extent_root;
9611
9612         btrfs_set_log_full_commit(root->fs_info, trans);
9613
9614         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9615         if (!cache)
9616                 return -ENOMEM;
9617
9618         btrfs_set_block_group_used(&cache->item, bytes_used);
9619         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9620         btrfs_set_block_group_flags(&cache->item, type);
9621
9622         cache->flags = type;
9623         cache->last_byte_to_unpin = (u64)-1;
9624         cache->cached = BTRFS_CACHE_FINISHED;
9625         ret = exclude_super_stripes(root, cache);
9626         if (ret) {
9627                 /*
9628                  * We may have excluded something, so call this just in
9629                  * case.
9630                  */
9631                 free_excluded_extents(root, cache);
9632                 btrfs_put_block_group(cache);
9633                 return ret;
9634         }
9635
9636         add_new_free_space(cache, root->fs_info, chunk_offset,
9637                            chunk_offset + size);
9638
9639         free_excluded_extents(root, cache);
9640
9641         /*
9642          * Call to ensure the corresponding space_info object is created and
9643          * assigned to our block group, but don't update its counters just yet.
9644          * We want our bg to be added to the rbtree with its ->space_info set.
9645          */
9646         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9647                                 &cache->space_info);
9648         if (ret) {
9649                 btrfs_remove_free_space_cache(cache);
9650                 btrfs_put_block_group(cache);
9651                 return ret;
9652         }
9653
9654         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9655         if (ret) {
9656                 btrfs_remove_free_space_cache(cache);
9657                 btrfs_put_block_group(cache);
9658                 return ret;
9659         }
9660
9661         /*
9662          * Now that our block group has its ->space_info set and is inserted in
9663          * the rbtree, update the space info's counters.
9664          */
9665         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9666                                 &cache->space_info);
9667         if (ret) {
9668                 btrfs_remove_free_space_cache(cache);
9669                 spin_lock(&root->fs_info->block_group_cache_lock);
9670                 rb_erase(&cache->cache_node,
9671                          &root->fs_info->block_group_cache_tree);
9672                 RB_CLEAR_NODE(&cache->cache_node);
9673                 spin_unlock(&root->fs_info->block_group_cache_lock);
9674                 btrfs_put_block_group(cache);
9675                 return ret;
9676         }
9677         update_global_block_rsv(root->fs_info);
9678
9679         spin_lock(&cache->space_info->lock);
9680         cache->space_info->bytes_readonly += cache->bytes_super;
9681         spin_unlock(&cache->space_info->lock);
9682
9683         __link_block_group(cache->space_info, cache);
9684
9685         list_add_tail(&cache->bg_list, &trans->new_bgs);
9686
9687         set_avail_alloc_bits(extent_root->fs_info, type);
9688
9689         return 0;
9690 }
9691
9692 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9693 {
9694         u64 extra_flags = chunk_to_extended(flags) &
9695                                 BTRFS_EXTENDED_PROFILE_MASK;
9696
9697         write_seqlock(&fs_info->profiles_lock);
9698         if (flags & BTRFS_BLOCK_GROUP_DATA)
9699                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9700         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9701                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9702         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9703                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9704         write_sequnlock(&fs_info->profiles_lock);
9705 }
9706
9707 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9708                              struct btrfs_root *root, u64 group_start,
9709                              struct extent_map *em)
9710 {
9711         struct btrfs_path *path;
9712         struct btrfs_block_group_cache *block_group;
9713         struct btrfs_free_cluster *cluster;
9714         struct btrfs_root *tree_root = root->fs_info->tree_root;
9715         struct btrfs_key key;
9716         struct inode *inode;
9717         struct kobject *kobj = NULL;
9718         int ret;
9719         int index;
9720         int factor;
9721         struct btrfs_caching_control *caching_ctl = NULL;
9722         bool remove_em;
9723
9724         root = root->fs_info->extent_root;
9725
9726         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9727         BUG_ON(!block_group);
9728         BUG_ON(!block_group->ro);
9729
9730         /*
9731          * Free the reserved super bytes from this block group before
9732          * remove it.
9733          */
9734         free_excluded_extents(root, block_group);
9735
9736         memcpy(&key, &block_group->key, sizeof(key));
9737         index = get_block_group_index(block_group);
9738         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9739                                   BTRFS_BLOCK_GROUP_RAID1 |
9740                                   BTRFS_BLOCK_GROUP_RAID10))
9741                 factor = 2;
9742         else
9743                 factor = 1;
9744
9745         /* make sure this block group isn't part of an allocation cluster */
9746         cluster = &root->fs_info->data_alloc_cluster;
9747         spin_lock(&cluster->refill_lock);
9748         btrfs_return_cluster_to_free_space(block_group, cluster);
9749         spin_unlock(&cluster->refill_lock);
9750
9751         /*
9752          * make sure this block group isn't part of a metadata
9753          * allocation cluster
9754          */
9755         cluster = &root->fs_info->meta_alloc_cluster;
9756         spin_lock(&cluster->refill_lock);
9757         btrfs_return_cluster_to_free_space(block_group, cluster);
9758         spin_unlock(&cluster->refill_lock);
9759
9760         path = btrfs_alloc_path();
9761         if (!path) {
9762                 ret = -ENOMEM;
9763                 goto out;
9764         }
9765
9766         /*
9767          * get the inode first so any iput calls done for the io_list
9768          * aren't the final iput (no unlinks allowed now)
9769          */
9770         inode = lookup_free_space_inode(tree_root, block_group, path);
9771
9772         mutex_lock(&trans->transaction->cache_write_mutex);
9773         /*
9774          * make sure our free spache cache IO is done before remove the
9775          * free space inode
9776          */
9777         spin_lock(&trans->transaction->dirty_bgs_lock);
9778         if (!list_empty(&block_group->io_list)) {
9779                 list_del_init(&block_group->io_list);
9780
9781                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9782
9783                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9784                 btrfs_wait_cache_io(root, trans, block_group,
9785                                     &block_group->io_ctl, path,
9786                                     block_group->key.objectid);
9787                 btrfs_put_block_group(block_group);
9788                 spin_lock(&trans->transaction->dirty_bgs_lock);
9789         }
9790
9791         if (!list_empty(&block_group->dirty_list)) {
9792                 list_del_init(&block_group->dirty_list);
9793                 btrfs_put_block_group(block_group);
9794         }
9795         spin_unlock(&trans->transaction->dirty_bgs_lock);
9796         mutex_unlock(&trans->transaction->cache_write_mutex);
9797
9798         if (!IS_ERR(inode)) {
9799                 ret = btrfs_orphan_add(trans, inode);
9800                 if (ret) {
9801                         btrfs_add_delayed_iput(inode);
9802                         goto out;
9803                 }
9804                 clear_nlink(inode);
9805                 /* One for the block groups ref */
9806                 spin_lock(&block_group->lock);
9807                 if (block_group->iref) {
9808                         block_group->iref = 0;
9809                         block_group->inode = NULL;
9810                         spin_unlock(&block_group->lock);
9811                         iput(inode);
9812                 } else {
9813                         spin_unlock(&block_group->lock);
9814                 }
9815                 /* One for our lookup ref */
9816                 btrfs_add_delayed_iput(inode);
9817         }
9818
9819         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9820         key.offset = block_group->key.objectid;
9821         key.type = 0;
9822
9823         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9824         if (ret < 0)
9825                 goto out;
9826         if (ret > 0)
9827                 btrfs_release_path(path);
9828         if (ret == 0) {
9829                 ret = btrfs_del_item(trans, tree_root, path);
9830                 if (ret)
9831                         goto out;
9832                 btrfs_release_path(path);
9833         }
9834
9835         spin_lock(&root->fs_info->block_group_cache_lock);
9836         rb_erase(&block_group->cache_node,
9837                  &root->fs_info->block_group_cache_tree);
9838         RB_CLEAR_NODE(&block_group->cache_node);
9839
9840         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9841                 root->fs_info->first_logical_byte = (u64)-1;
9842         spin_unlock(&root->fs_info->block_group_cache_lock);
9843
9844         down_write(&block_group->space_info->groups_sem);
9845         /*
9846          * we must use list_del_init so people can check to see if they
9847          * are still on the list after taking the semaphore
9848          */
9849         list_del_init(&block_group->list);
9850         if (list_empty(&block_group->space_info->block_groups[index])) {
9851                 kobj = block_group->space_info->block_group_kobjs[index];
9852                 block_group->space_info->block_group_kobjs[index] = NULL;
9853                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9854         }
9855         up_write(&block_group->space_info->groups_sem);
9856         if (kobj) {
9857                 kobject_del(kobj);
9858                 kobject_put(kobj);
9859         }
9860
9861         if (block_group->has_caching_ctl)
9862                 caching_ctl = get_caching_control(block_group);
9863         if (block_group->cached == BTRFS_CACHE_STARTED)
9864                 wait_block_group_cache_done(block_group);
9865         if (block_group->has_caching_ctl) {
9866                 down_write(&root->fs_info->commit_root_sem);
9867                 if (!caching_ctl) {
9868                         struct btrfs_caching_control *ctl;
9869
9870                         list_for_each_entry(ctl,
9871                                     &root->fs_info->caching_block_groups, list)
9872                                 if (ctl->block_group == block_group) {
9873                                         caching_ctl = ctl;
9874                                         atomic_inc(&caching_ctl->count);
9875                                         break;
9876                                 }
9877                 }
9878                 if (caching_ctl)
9879                         list_del_init(&caching_ctl->list);
9880                 up_write(&root->fs_info->commit_root_sem);
9881                 if (caching_ctl) {
9882                         /* Once for the caching bgs list and once for us. */
9883                         put_caching_control(caching_ctl);
9884                         put_caching_control(caching_ctl);
9885                 }
9886         }
9887
9888         spin_lock(&trans->transaction->dirty_bgs_lock);
9889         if (!list_empty(&block_group->dirty_list)) {
9890                 WARN_ON(1);
9891         }
9892         if (!list_empty(&block_group->io_list)) {
9893                 WARN_ON(1);
9894         }
9895         spin_unlock(&trans->transaction->dirty_bgs_lock);
9896         btrfs_remove_free_space_cache(block_group);
9897
9898         spin_lock(&block_group->space_info->lock);
9899         list_del_init(&block_group->ro_list);
9900
9901         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9902                 WARN_ON(block_group->space_info->total_bytes
9903                         < block_group->key.offset);
9904                 WARN_ON(block_group->space_info->bytes_readonly
9905                         < block_group->key.offset);
9906                 WARN_ON(block_group->space_info->disk_total
9907                         < block_group->key.offset * factor);
9908         }
9909         block_group->space_info->total_bytes -= block_group->key.offset;
9910         block_group->space_info->bytes_readonly -= block_group->key.offset;
9911         block_group->space_info->disk_total -= block_group->key.offset * factor;
9912
9913         spin_unlock(&block_group->space_info->lock);
9914
9915         memcpy(&key, &block_group->key, sizeof(key));
9916
9917         lock_chunks(root);
9918         if (!list_empty(&em->list)) {
9919                 /* We're in the transaction->pending_chunks list. */
9920                 free_extent_map(em);
9921         }
9922         spin_lock(&block_group->lock);
9923         block_group->removed = 1;
9924         /*
9925          * At this point trimming can't start on this block group, because we
9926          * removed the block group from the tree fs_info->block_group_cache_tree
9927          * so no one can't find it anymore and even if someone already got this
9928          * block group before we removed it from the rbtree, they have already
9929          * incremented block_group->trimming - if they didn't, they won't find
9930          * any free space entries because we already removed them all when we
9931          * called btrfs_remove_free_space_cache().
9932          *
9933          * And we must not remove the extent map from the fs_info->mapping_tree
9934          * to prevent the same logical address range and physical device space
9935          * ranges from being reused for a new block group. This is because our
9936          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9937          * completely transactionless, so while it is trimming a range the
9938          * currently running transaction might finish and a new one start,
9939          * allowing for new block groups to be created that can reuse the same
9940          * physical device locations unless we take this special care.
9941          *
9942          * There may also be an implicit trim operation if the file system
9943          * is mounted with -odiscard. The same protections must remain
9944          * in place until the extents have been discarded completely when
9945          * the transaction commit has completed.
9946          */
9947         remove_em = (atomic_read(&block_group->trimming) == 0);
9948         /*
9949          * Make sure a trimmer task always sees the em in the pinned_chunks list
9950          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9951          * before checking block_group->removed).
9952          */
9953         if (!remove_em) {
9954                 /*
9955                  * Our em might be in trans->transaction->pending_chunks which
9956                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9957                  * and so is the fs_info->pinned_chunks list.
9958                  *
9959                  * So at this point we must be holding the chunk_mutex to avoid
9960                  * any races with chunk allocation (more specifically at
9961                  * volumes.c:contains_pending_extent()), to ensure it always
9962                  * sees the em, either in the pending_chunks list or in the
9963                  * pinned_chunks list.
9964                  */
9965                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9966         }
9967         spin_unlock(&block_group->lock);
9968
9969         if (remove_em) {
9970                 struct extent_map_tree *em_tree;
9971
9972                 em_tree = &root->fs_info->mapping_tree.map_tree;
9973                 write_lock(&em_tree->lock);
9974                 /*
9975                  * The em might be in the pending_chunks list, so make sure the
9976                  * chunk mutex is locked, since remove_extent_mapping() will
9977                  * delete us from that list.
9978                  */
9979                 remove_extent_mapping(em_tree, em);
9980                 write_unlock(&em_tree->lock);
9981                 /* once for the tree */
9982                 free_extent_map(em);
9983         }
9984
9985         unlock_chunks(root);
9986
9987         btrfs_put_block_group(block_group);
9988         btrfs_put_block_group(block_group);
9989
9990         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9991         if (ret > 0)
9992                 ret = -EIO;
9993         if (ret < 0)
9994                 goto out;
9995
9996         ret = btrfs_del_item(trans, root, path);
9997 out:
9998         btrfs_free_path(path);
9999         return ret;
10000 }
10001
10002 /*
10003  * Process the unused_bgs list and remove any that don't have any allocated
10004  * space inside of them.
10005  */
10006 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10007 {
10008         struct btrfs_block_group_cache *block_group;
10009         struct btrfs_space_info *space_info;
10010         struct btrfs_root *root = fs_info->extent_root;
10011         struct btrfs_trans_handle *trans;
10012         int ret = 0;
10013
10014         if (!fs_info->open)
10015                 return;
10016
10017         spin_lock(&fs_info->unused_bgs_lock);
10018         while (!list_empty(&fs_info->unused_bgs)) {
10019                 u64 start, end;
10020                 int trimming;
10021
10022                 block_group = list_first_entry(&fs_info->unused_bgs,
10023                                                struct btrfs_block_group_cache,
10024                                                bg_list);
10025                 space_info = block_group->space_info;
10026                 list_del_init(&block_group->bg_list);
10027                 if (ret || btrfs_mixed_space_info(space_info)) {
10028                         btrfs_put_block_group(block_group);
10029                         continue;
10030                 }
10031                 spin_unlock(&fs_info->unused_bgs_lock);
10032
10033                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10034
10035                 /* Don't want to race with allocators so take the groups_sem */
10036                 down_write(&space_info->groups_sem);
10037                 spin_lock(&block_group->lock);
10038                 if (block_group->reserved ||
10039                     btrfs_block_group_used(&block_group->item) ||
10040                     block_group->ro) {
10041                         /*
10042                          * We want to bail if we made new allocations or have
10043                          * outstanding allocations in this block group.  We do
10044                          * the ro check in case balance is currently acting on
10045                          * this block group.
10046                          */
10047                         spin_unlock(&block_group->lock);
10048                         up_write(&space_info->groups_sem);
10049                         goto next;
10050                 }
10051                 spin_unlock(&block_group->lock);
10052
10053                 /* We don't want to force the issue, only flip if it's ok. */
10054                 ret = inc_block_group_ro(block_group, 0);
10055                 up_write(&space_info->groups_sem);
10056                 if (ret < 0) {
10057                         ret = 0;
10058                         goto next;
10059                 }
10060
10061                 /*
10062                  * Want to do this before we do anything else so we can recover
10063                  * properly if we fail to join the transaction.
10064                  */
10065                 /* 1 for btrfs_orphan_reserve_metadata() */
10066                 trans = btrfs_start_transaction(root, 1);
10067                 if (IS_ERR(trans)) {
10068                         btrfs_dec_block_group_ro(root, block_group);
10069                         ret = PTR_ERR(trans);
10070                         goto next;
10071                 }
10072
10073                 /*
10074                  * We could have pending pinned extents for this block group,
10075                  * just delete them, we don't care about them anymore.
10076                  */
10077                 start = block_group->key.objectid;
10078                 end = start + block_group->key.offset - 1;
10079                 /*
10080                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10081                  * btrfs_finish_extent_commit(). If we are at transaction N,
10082                  * another task might be running finish_extent_commit() for the
10083                  * previous transaction N - 1, and have seen a range belonging
10084                  * to the block group in freed_extents[] before we were able to
10085                  * clear the whole block group range from freed_extents[]. This
10086                  * means that task can lookup for the block group after we
10087                  * unpinned it from freed_extents[] and removed it, leading to
10088                  * a BUG_ON() at btrfs_unpin_extent_range().
10089                  */
10090                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10091                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10092                                   EXTENT_DIRTY, GFP_NOFS);
10093                 if (ret) {
10094                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10095                         btrfs_dec_block_group_ro(root, block_group);
10096                         goto end_trans;
10097                 }
10098                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10099                                   EXTENT_DIRTY, GFP_NOFS);
10100                 if (ret) {
10101                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10102                         btrfs_dec_block_group_ro(root, block_group);
10103                         goto end_trans;
10104                 }
10105                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10106
10107                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10108                 spin_lock(&space_info->lock);
10109                 spin_lock(&block_group->lock);
10110
10111                 space_info->bytes_pinned -= block_group->pinned;
10112                 space_info->bytes_readonly += block_group->pinned;
10113                 percpu_counter_add(&space_info->total_bytes_pinned,
10114                                    -block_group->pinned);
10115                 block_group->pinned = 0;
10116
10117                 spin_unlock(&block_group->lock);
10118                 spin_unlock(&space_info->lock);
10119
10120                 /* DISCARD can flip during remount */
10121                 trimming = btrfs_test_opt(root, DISCARD);
10122
10123                 /* Implicit trim during transaction commit. */
10124                 if (trimming)
10125                         btrfs_get_block_group_trimming(block_group);
10126
10127                 /*
10128                  * Btrfs_remove_chunk will abort the transaction if things go
10129                  * horribly wrong.
10130                  */
10131                 ret = btrfs_remove_chunk(trans, root,
10132                                          block_group->key.objectid);
10133
10134                 if (ret) {
10135                         if (trimming)
10136                                 btrfs_put_block_group_trimming(block_group);
10137                         goto end_trans;
10138                 }
10139
10140                 /*
10141                  * If we're not mounted with -odiscard, we can just forget
10142                  * about this block group. Otherwise we'll need to wait
10143                  * until transaction commit to do the actual discard.
10144                  */
10145                 if (trimming) {
10146                         WARN_ON(!list_empty(&block_group->bg_list));
10147                         spin_lock(&trans->transaction->deleted_bgs_lock);
10148                         list_move(&block_group->bg_list,
10149                                   &trans->transaction->deleted_bgs);
10150                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10151                         btrfs_get_block_group(block_group);
10152                 }
10153 end_trans:
10154                 btrfs_end_transaction(trans, root);
10155 next:
10156                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10157                 btrfs_put_block_group(block_group);
10158                 spin_lock(&fs_info->unused_bgs_lock);
10159         }
10160         spin_unlock(&fs_info->unused_bgs_lock);
10161 }
10162
10163 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10164 {
10165         struct btrfs_space_info *space_info;
10166         struct btrfs_super_block *disk_super;
10167         u64 features;
10168         u64 flags;
10169         int mixed = 0;
10170         int ret;
10171
10172         disk_super = fs_info->super_copy;
10173         if (!btrfs_super_root(disk_super))
10174                 return 1;
10175
10176         features = btrfs_super_incompat_flags(disk_super);
10177         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10178                 mixed = 1;
10179
10180         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10181         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10182         if (ret)
10183                 goto out;
10184
10185         if (mixed) {
10186                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10187                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10188         } else {
10189                 flags = BTRFS_BLOCK_GROUP_METADATA;
10190                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10191                 if (ret)
10192                         goto out;
10193
10194                 flags = BTRFS_BLOCK_GROUP_DATA;
10195                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10196         }
10197 out:
10198         return ret;
10199 }
10200
10201 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10202 {
10203         return unpin_extent_range(root, start, end, false);
10204 }
10205
10206 /*
10207  * It used to be that old block groups would be left around forever.
10208  * Iterating over them would be enough to trim unused space.  Since we
10209  * now automatically remove them, we also need to iterate over unallocated
10210  * space.
10211  *
10212  * We don't want a transaction for this since the discard may take a
10213  * substantial amount of time.  We don't require that a transaction be
10214  * running, but we do need to take a running transaction into account
10215  * to ensure that we're not discarding chunks that were released in
10216  * the current transaction.
10217  *
10218  * Holding the chunks lock will prevent other threads from allocating
10219  * or releasing chunks, but it won't prevent a running transaction
10220  * from committing and releasing the memory that the pending chunks
10221  * list head uses.  For that, we need to take a reference to the
10222  * transaction.
10223  */
10224 static int btrfs_trim_free_extents(struct btrfs_device *device,
10225                                    u64 minlen, u64 *trimmed)
10226 {
10227         u64 start = 0, len = 0;
10228         int ret;
10229
10230         *trimmed = 0;
10231
10232         /* Not writeable = nothing to do. */
10233         if (!device->writeable)
10234                 return 0;
10235
10236         /* No free space = nothing to do. */
10237         if (device->total_bytes <= device->bytes_used)
10238                 return 0;
10239
10240         ret = 0;
10241
10242         while (1) {
10243                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10244                 struct btrfs_transaction *trans;
10245                 u64 bytes;
10246
10247                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10248                 if (ret)
10249                         return ret;
10250
10251                 down_read(&fs_info->commit_root_sem);
10252
10253                 spin_lock(&fs_info->trans_lock);
10254                 trans = fs_info->running_transaction;
10255                 if (trans)
10256                         atomic_inc(&trans->use_count);
10257                 spin_unlock(&fs_info->trans_lock);
10258
10259                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10260                                                  &start, &len);
10261                 if (trans)
10262                         btrfs_put_transaction(trans);
10263
10264                 if (ret) {
10265                         up_read(&fs_info->commit_root_sem);
10266                         mutex_unlock(&fs_info->chunk_mutex);
10267                         if (ret == -ENOSPC)
10268                                 ret = 0;
10269                         break;
10270                 }
10271
10272                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10273                 up_read(&fs_info->commit_root_sem);
10274                 mutex_unlock(&fs_info->chunk_mutex);
10275
10276                 if (ret)
10277                         break;
10278
10279                 start += len;
10280                 *trimmed += bytes;
10281
10282                 if (fatal_signal_pending(current)) {
10283                         ret = -ERESTARTSYS;
10284                         break;
10285                 }
10286
10287                 cond_resched();
10288         }
10289
10290         return ret;
10291 }
10292
10293 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10294 {
10295         struct btrfs_fs_info *fs_info = root->fs_info;
10296         struct btrfs_block_group_cache *cache = NULL;
10297         struct btrfs_device *device;
10298         struct list_head *devices;
10299         u64 group_trimmed;
10300         u64 start;
10301         u64 end;
10302         u64 trimmed = 0;
10303         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10304         int ret = 0;
10305
10306         /*
10307          * try to trim all FS space, our block group may start from non-zero.
10308          */
10309         if (range->len == total_bytes)
10310                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10311         else
10312                 cache = btrfs_lookup_block_group(fs_info, range->start);
10313
10314         while (cache) {
10315                 if (cache->key.objectid >= (range->start + range->len)) {
10316                         btrfs_put_block_group(cache);
10317                         break;
10318                 }
10319
10320                 start = max(range->start, cache->key.objectid);
10321                 end = min(range->start + range->len,
10322                                 cache->key.objectid + cache->key.offset);
10323
10324                 if (end - start >= range->minlen) {
10325                         if (!block_group_cache_done(cache)) {
10326                                 ret = cache_block_group(cache, 0);
10327                                 if (ret) {
10328                                         btrfs_put_block_group(cache);
10329                                         break;
10330                                 }
10331                                 ret = wait_block_group_cache_done(cache);
10332                                 if (ret) {
10333                                         btrfs_put_block_group(cache);
10334                                         break;
10335                                 }
10336                         }
10337                         ret = btrfs_trim_block_group(cache,
10338                                                      &group_trimmed,
10339                                                      start,
10340                                                      end,
10341                                                      range->minlen);
10342
10343                         trimmed += group_trimmed;
10344                         if (ret) {
10345                                 btrfs_put_block_group(cache);
10346                                 break;
10347                         }
10348                 }
10349
10350                 cache = next_block_group(fs_info->tree_root, cache);
10351         }
10352
10353         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10354         devices = &root->fs_info->fs_devices->alloc_list;
10355         list_for_each_entry(device, devices, dev_alloc_list) {
10356                 ret = btrfs_trim_free_extents(device, range->minlen,
10357                                               &group_trimmed);
10358                 if (ret)
10359                         break;
10360
10361                 trimmed += group_trimmed;
10362         }
10363         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10364
10365         range->len = trimmed;
10366         return ret;
10367 }
10368
10369 /*
10370  * btrfs_{start,end}_write_no_snapshoting() are similar to
10371  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10372  * data into the page cache through nocow before the subvolume is snapshoted,
10373  * but flush the data into disk after the snapshot creation, or to prevent
10374  * operations while snapshoting is ongoing and that cause the snapshot to be
10375  * inconsistent (writes followed by expanding truncates for example).
10376  */
10377 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10378 {
10379         percpu_counter_dec(&root->subv_writers->counter);
10380         /*
10381          * Make sure counter is updated before we wake up
10382          * waiters.
10383          */
10384         smp_mb();
10385         if (waitqueue_active(&root->subv_writers->wait))
10386                 wake_up(&root->subv_writers->wait);
10387 }
10388
10389 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10390 {
10391         if (atomic_read(&root->will_be_snapshoted))
10392                 return 0;
10393
10394         percpu_counter_inc(&root->subv_writers->counter);
10395         /*
10396          * Make sure counter is updated before we check for snapshot creation.
10397          */
10398         smp_mb();
10399         if (atomic_read(&root->will_be_snapshoted)) {
10400                 btrfs_end_write_no_snapshoting(root);
10401                 return 0;
10402         }
10403         return 1;
10404 }