2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer; /* referenced */
59 u64 rfer_cmpr; /* referenced compressed */
60 u64 excl; /* exclusive */
61 u64 excl_cmpr; /* exclusive compressed */
66 u64 lim_flags; /* which limits are set */
73 * reservation tracking
80 struct list_head groups; /* groups this group is member of */
81 struct list_head members; /* groups that are members of this group */
82 struct list_head dirty; /* dirty groups */
83 struct rb_node node; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list {
96 struct list_head next_group;
97 struct list_head next_member;
98 struct btrfs_qgroup *group;
99 struct btrfs_qgroup *member;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
114 struct rb_node *n = fs_info->qgroup_tree.rb_node;
115 struct btrfs_qgroup *qgroup;
118 qgroup = rb_entry(n, struct btrfs_qgroup, node);
119 if (qgroup->qgroupid < qgroupid)
121 else if (qgroup->qgroupid > qgroupid)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
133 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
134 struct rb_node *parent = NULL;
135 struct btrfs_qgroup *qgroup;
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
141 if (qgroup->qgroupid < qgroupid)
143 else if (qgroup->qgroupid > qgroupid)
149 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
151 return ERR_PTR(-ENOMEM);
153 qgroup->qgroupid = qgroupid;
154 INIT_LIST_HEAD(&qgroup->groups);
155 INIT_LIST_HEAD(&qgroup->members);
156 INIT_LIST_HEAD(&qgroup->dirty);
158 rb_link_node(&qgroup->node, parent, p);
159 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
164 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
166 struct btrfs_qgroup_list *list;
168 list_del(&qgroup->dirty);
169 while (!list_empty(&qgroup->groups)) {
170 list = list_first_entry(&qgroup->groups,
171 struct btrfs_qgroup_list, next_group);
172 list_del(&list->next_group);
173 list_del(&list->next_member);
177 while (!list_empty(&qgroup->members)) {
178 list = list_first_entry(&qgroup->members,
179 struct btrfs_qgroup_list, next_member);
180 list_del(&list->next_group);
181 list_del(&list->next_member);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
190 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
195 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
196 __del_qgroup_rb(qgroup);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info *fs_info,
202 u64 memberid, u64 parentid)
204 struct btrfs_qgroup *member;
205 struct btrfs_qgroup *parent;
206 struct btrfs_qgroup_list *list;
208 member = find_qgroup_rb(fs_info, memberid);
209 parent = find_qgroup_rb(fs_info, parentid);
210 if (!member || !parent)
213 list = kzalloc(sizeof(*list), GFP_ATOMIC);
217 list->group = parent;
218 list->member = member;
219 list_add_tail(&list->next_group, &member->groups);
220 list_add_tail(&list->next_member, &parent->members);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info *fs_info,
227 u64 memberid, u64 parentid)
229 struct btrfs_qgroup *member;
230 struct btrfs_qgroup *parent;
231 struct btrfs_qgroup_list *list;
233 member = find_qgroup_rb(fs_info, memberid);
234 parent = find_qgroup_rb(fs_info, parentid);
235 if (!member || !parent)
238 list_for_each_entry(list, &member->groups, next_group) {
239 if (list->group == parent) {
240 list_del(&list->next_group);
241 list_del(&list->next_member);
249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
253 struct btrfs_qgroup *qgroup;
255 qgroup = find_qgroup_rb(fs_info, qgroupid);
258 if (qgroup->rfer != rfer || qgroup->excl != excl)
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
268 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
270 struct btrfs_key key;
271 struct btrfs_key found_key;
272 struct btrfs_root *quota_root = fs_info->quota_root;
273 struct btrfs_path *path = NULL;
274 struct extent_buffer *l;
278 u64 rescan_progress = 0;
280 if (!fs_info->quota_enabled)
283 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
284 if (!fs_info->qgroup_ulist) {
289 path = btrfs_alloc_path();
295 /* default this to quota off, in case no status key is found */
296 fs_info->qgroup_flags = 0;
299 * pass 1: read status, all qgroup infos and limits
304 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
309 struct btrfs_qgroup *qgroup;
311 slot = path->slots[0];
313 btrfs_item_key_to_cpu(l, &found_key, slot);
315 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
316 struct btrfs_qgroup_status_item *ptr;
318 ptr = btrfs_item_ptr(l, slot,
319 struct btrfs_qgroup_status_item);
321 if (btrfs_qgroup_status_version(l, ptr) !=
322 BTRFS_QGROUP_STATUS_VERSION) {
324 "old qgroup version, quota disabled");
327 if (btrfs_qgroup_status_generation(l, ptr) !=
328 fs_info->generation) {
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
334 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
336 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
340 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
341 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
344 qgroup = find_qgroup_rb(fs_info, found_key.offset);
345 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
346 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
347 btrfs_err(fs_info, "inconsitent qgroup config");
348 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
351 qgroup = add_qgroup_rb(fs_info, found_key.offset);
352 if (IS_ERR(qgroup)) {
353 ret = PTR_ERR(qgroup);
357 switch (found_key.type) {
358 case BTRFS_QGROUP_INFO_KEY: {
359 struct btrfs_qgroup_info_item *ptr;
361 ptr = btrfs_item_ptr(l, slot,
362 struct btrfs_qgroup_info_item);
363 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
364 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
365 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
366 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
367 /* generation currently unused */
370 case BTRFS_QGROUP_LIMIT_KEY: {
371 struct btrfs_qgroup_limit_item *ptr;
373 ptr = btrfs_item_ptr(l, slot,
374 struct btrfs_qgroup_limit_item);
375 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
376 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
377 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
378 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
379 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
384 ret = btrfs_next_item(quota_root, path);
390 btrfs_release_path(path);
393 * pass 2: read all qgroup relations
396 key.type = BTRFS_QGROUP_RELATION_KEY;
398 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
402 slot = path->slots[0];
404 btrfs_item_key_to_cpu(l, &found_key, slot);
406 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
409 if (found_key.objectid > found_key.offset) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
415 ret = add_relation_rb(fs_info, found_key.objectid,
417 if (ret == -ENOENT) {
419 "orphan qgroup relation 0x%llx->0x%llx",
420 found_key.objectid, found_key.offset);
421 ret = 0; /* ignore the error */
426 ret = btrfs_next_item(quota_root, path);
433 fs_info->qgroup_flags |= flags;
434 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
435 fs_info->quota_enabled = 0;
436 fs_info->pending_quota_state = 0;
437 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
439 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
441 btrfs_free_path(path);
444 ulist_free(fs_info->qgroup_ulist);
445 fs_info->qgroup_ulist = NULL;
446 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
449 return ret < 0 ? ret : 0;
453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
458 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
461 struct btrfs_qgroup *qgroup;
463 while ((n = rb_first(&fs_info->qgroup_tree))) {
464 qgroup = rb_entry(n, struct btrfs_qgroup, node);
465 rb_erase(n, &fs_info->qgroup_tree);
466 __del_qgroup_rb(qgroup);
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
473 ulist_free(fs_info->qgroup_ulist);
474 fs_info->qgroup_ulist = NULL;
477 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
478 struct btrfs_root *quota_root,
482 struct btrfs_path *path;
483 struct btrfs_key key;
485 path = btrfs_alloc_path();
490 key.type = BTRFS_QGROUP_RELATION_KEY;
493 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
495 btrfs_mark_buffer_dirty(path->nodes[0]);
497 btrfs_free_path(path);
501 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
502 struct btrfs_root *quota_root,
506 struct btrfs_path *path;
507 struct btrfs_key key;
509 path = btrfs_alloc_path();
514 key.type = BTRFS_QGROUP_RELATION_KEY;
517 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
526 ret = btrfs_del_item(trans, quota_root, path);
528 btrfs_free_path(path);
532 static int add_qgroup_item(struct btrfs_trans_handle *trans,
533 struct btrfs_root *quota_root, u64 qgroupid)
536 struct btrfs_path *path;
537 struct btrfs_qgroup_info_item *qgroup_info;
538 struct btrfs_qgroup_limit_item *qgroup_limit;
539 struct extent_buffer *leaf;
540 struct btrfs_key key;
542 if (btrfs_test_is_dummy_root(quota_root))
545 path = btrfs_alloc_path();
550 key.type = BTRFS_QGROUP_INFO_KEY;
551 key.offset = qgroupid;
554 * Avoid a transaction abort by catching -EEXIST here. In that
555 * case, we proceed by re-initializing the existing structure
559 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
560 sizeof(*qgroup_info));
561 if (ret && ret != -EEXIST)
564 leaf = path->nodes[0];
565 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
566 struct btrfs_qgroup_info_item);
567 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
568 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
569 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
570 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
571 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
573 btrfs_mark_buffer_dirty(leaf);
575 btrfs_release_path(path);
577 key.type = BTRFS_QGROUP_LIMIT_KEY;
578 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
579 sizeof(*qgroup_limit));
580 if (ret && ret != -EEXIST)
583 leaf = path->nodes[0];
584 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
585 struct btrfs_qgroup_limit_item);
586 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
587 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
588 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
589 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
590 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
592 btrfs_mark_buffer_dirty(leaf);
596 btrfs_free_path(path);
600 static int del_qgroup_item(struct btrfs_trans_handle *trans,
601 struct btrfs_root *quota_root, u64 qgroupid)
604 struct btrfs_path *path;
605 struct btrfs_key key;
607 path = btrfs_alloc_path();
612 key.type = BTRFS_QGROUP_INFO_KEY;
613 key.offset = qgroupid;
614 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
623 ret = btrfs_del_item(trans, quota_root, path);
627 btrfs_release_path(path);
629 key.type = BTRFS_QGROUP_LIMIT_KEY;
630 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
639 ret = btrfs_del_item(trans, quota_root, path);
642 btrfs_free_path(path);
646 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
647 struct btrfs_root *root,
648 struct btrfs_qgroup *qgroup)
650 struct btrfs_path *path;
651 struct btrfs_key key;
652 struct extent_buffer *l;
653 struct btrfs_qgroup_limit_item *qgroup_limit;
658 key.type = BTRFS_QGROUP_LIMIT_KEY;
659 key.offset = qgroup->qgroupid;
661 path = btrfs_alloc_path();
665 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
673 slot = path->slots[0];
674 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
675 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
676 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
677 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
678 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
679 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
681 btrfs_mark_buffer_dirty(l);
684 btrfs_free_path(path);
688 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
689 struct btrfs_root *root,
690 struct btrfs_qgroup *qgroup)
692 struct btrfs_path *path;
693 struct btrfs_key key;
694 struct extent_buffer *l;
695 struct btrfs_qgroup_info_item *qgroup_info;
699 if (btrfs_test_is_dummy_root(root))
703 key.type = BTRFS_QGROUP_INFO_KEY;
704 key.offset = qgroup->qgroupid;
706 path = btrfs_alloc_path();
710 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
718 slot = path->slots[0];
719 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
720 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
721 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
722 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
723 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
724 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
726 btrfs_mark_buffer_dirty(l);
729 btrfs_free_path(path);
733 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
734 struct btrfs_fs_info *fs_info,
735 struct btrfs_root *root)
737 struct btrfs_path *path;
738 struct btrfs_key key;
739 struct extent_buffer *l;
740 struct btrfs_qgroup_status_item *ptr;
745 key.type = BTRFS_QGROUP_STATUS_KEY;
748 path = btrfs_alloc_path();
752 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
760 slot = path->slots[0];
761 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
762 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
763 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
764 btrfs_set_qgroup_status_rescan(l, ptr,
765 fs_info->qgroup_rescan_progress.objectid);
767 btrfs_mark_buffer_dirty(l);
770 btrfs_free_path(path);
775 * called with qgroup_lock held
777 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
778 struct btrfs_root *root)
780 struct btrfs_path *path;
781 struct btrfs_key key;
782 struct extent_buffer *leaf = NULL;
786 path = btrfs_alloc_path();
790 path->leave_spinning = 1;
797 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
800 leaf = path->nodes[0];
801 nr = btrfs_header_nritems(leaf);
805 * delete the leaf one by one
806 * since the whole tree is going
810 ret = btrfs_del_items(trans, root, path, 0, nr);
814 btrfs_release_path(path);
818 root->fs_info->pending_quota_state = 0;
819 btrfs_free_path(path);
823 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
824 struct btrfs_fs_info *fs_info)
826 struct btrfs_root *quota_root;
827 struct btrfs_root *tree_root = fs_info->tree_root;
828 struct btrfs_path *path = NULL;
829 struct btrfs_qgroup_status_item *ptr;
830 struct extent_buffer *leaf;
831 struct btrfs_key key;
832 struct btrfs_key found_key;
833 struct btrfs_qgroup *qgroup = NULL;
837 mutex_lock(&fs_info->qgroup_ioctl_lock);
838 if (fs_info->quota_root) {
839 fs_info->pending_quota_state = 1;
843 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
844 if (!fs_info->qgroup_ulist) {
850 * initially create the quota tree
852 quota_root = btrfs_create_tree(trans, fs_info,
853 BTRFS_QUOTA_TREE_OBJECTID);
854 if (IS_ERR(quota_root)) {
855 ret = PTR_ERR(quota_root);
859 path = btrfs_alloc_path();
866 key.type = BTRFS_QGROUP_STATUS_KEY;
869 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
874 leaf = path->nodes[0];
875 ptr = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_qgroup_status_item);
877 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
878 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
879 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
880 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
881 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
882 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
884 btrfs_mark_buffer_dirty(leaf);
887 key.type = BTRFS_ROOT_REF_KEY;
890 btrfs_release_path(path);
891 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
899 slot = path->slots[0];
900 leaf = path->nodes[0];
901 btrfs_item_key_to_cpu(leaf, &found_key, slot);
903 if (found_key.type == BTRFS_ROOT_REF_KEY) {
904 ret = add_qgroup_item(trans, quota_root,
909 qgroup = add_qgroup_rb(fs_info, found_key.offset);
910 if (IS_ERR(qgroup)) {
911 ret = PTR_ERR(qgroup);
915 ret = btrfs_next_item(tree_root, path);
923 btrfs_release_path(path);
924 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
928 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
929 if (IS_ERR(qgroup)) {
930 ret = PTR_ERR(qgroup);
933 spin_lock(&fs_info->qgroup_lock);
934 fs_info->quota_root = quota_root;
935 fs_info->pending_quota_state = 1;
936 spin_unlock(&fs_info->qgroup_lock);
938 btrfs_free_path(path);
941 free_extent_buffer(quota_root->node);
942 free_extent_buffer(quota_root->commit_root);
947 ulist_free(fs_info->qgroup_ulist);
948 fs_info->qgroup_ulist = NULL;
950 mutex_unlock(&fs_info->qgroup_ioctl_lock);
954 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
955 struct btrfs_fs_info *fs_info)
957 struct btrfs_root *tree_root = fs_info->tree_root;
958 struct btrfs_root *quota_root;
961 mutex_lock(&fs_info->qgroup_ioctl_lock);
962 if (!fs_info->quota_root)
964 spin_lock(&fs_info->qgroup_lock);
965 fs_info->quota_enabled = 0;
966 fs_info->pending_quota_state = 0;
967 quota_root = fs_info->quota_root;
968 fs_info->quota_root = NULL;
969 spin_unlock(&fs_info->qgroup_lock);
971 btrfs_free_qgroup_config(fs_info);
973 ret = btrfs_clean_quota_tree(trans, quota_root);
977 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
981 list_del("a_root->dirty_list);
983 btrfs_tree_lock(quota_root->node);
984 clean_tree_block(trans, tree_root->fs_info, quota_root->node);
985 btrfs_tree_unlock(quota_root->node);
986 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
988 free_extent_buffer(quota_root->node);
989 free_extent_buffer(quota_root->commit_root);
992 mutex_unlock(&fs_info->qgroup_ioctl_lock);
996 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
997 struct btrfs_qgroup *qgroup)
999 if (list_empty(&qgroup->dirty))
1000 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1003 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1004 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1006 struct btrfs_root *quota_root;
1007 struct btrfs_qgroup *parent;
1008 struct btrfs_qgroup *member;
1009 struct btrfs_qgroup_list *list;
1012 mutex_lock(&fs_info->qgroup_ioctl_lock);
1013 quota_root = fs_info->quota_root;
1018 member = find_qgroup_rb(fs_info, src);
1019 parent = find_qgroup_rb(fs_info, dst);
1020 if (!member || !parent) {
1025 /* check if such qgroup relation exist firstly */
1026 list_for_each_entry(list, &member->groups, next_group) {
1027 if (list->group == parent) {
1033 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1037 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1039 del_qgroup_relation_item(trans, quota_root, src, dst);
1043 spin_lock(&fs_info->qgroup_lock);
1044 ret = add_relation_rb(quota_root->fs_info, src, dst);
1045 spin_unlock(&fs_info->qgroup_lock);
1047 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1051 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1052 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1054 struct btrfs_root *quota_root;
1055 struct btrfs_qgroup *parent;
1056 struct btrfs_qgroup *member;
1057 struct btrfs_qgroup_list *list;
1061 mutex_lock(&fs_info->qgroup_ioctl_lock);
1062 quota_root = fs_info->quota_root;
1068 member = find_qgroup_rb(fs_info, src);
1069 parent = find_qgroup_rb(fs_info, dst);
1070 if (!member || !parent) {
1075 /* check if such qgroup relation exist firstly */
1076 list_for_each_entry(list, &member->groups, next_group) {
1077 if (list->group == parent)
1083 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1084 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1088 spin_lock(&fs_info->qgroup_lock);
1089 del_relation_rb(fs_info, src, dst);
1090 spin_unlock(&fs_info->qgroup_lock);
1092 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1096 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1097 struct btrfs_fs_info *fs_info, u64 qgroupid)
1099 struct btrfs_root *quota_root;
1100 struct btrfs_qgroup *qgroup;
1103 mutex_lock(&fs_info->qgroup_ioctl_lock);
1104 quota_root = fs_info->quota_root;
1109 qgroup = find_qgroup_rb(fs_info, qgroupid);
1115 ret = add_qgroup_item(trans, quota_root, qgroupid);
1119 spin_lock(&fs_info->qgroup_lock);
1120 qgroup = add_qgroup_rb(fs_info, qgroupid);
1121 spin_unlock(&fs_info->qgroup_lock);
1124 ret = PTR_ERR(qgroup);
1126 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1130 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1131 struct btrfs_fs_info *fs_info, u64 qgroupid)
1133 struct btrfs_root *quota_root;
1134 struct btrfs_qgroup *qgroup;
1137 mutex_lock(&fs_info->qgroup_ioctl_lock);
1138 quota_root = fs_info->quota_root;
1144 qgroup = find_qgroup_rb(fs_info, qgroupid);
1149 /* check if there are no relations to this qgroup */
1150 if (!list_empty(&qgroup->groups) ||
1151 !list_empty(&qgroup->members)) {
1156 ret = del_qgroup_item(trans, quota_root, qgroupid);
1158 spin_lock(&fs_info->qgroup_lock);
1159 del_qgroup_rb(quota_root->fs_info, qgroupid);
1160 spin_unlock(&fs_info->qgroup_lock);
1162 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1166 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1167 struct btrfs_fs_info *fs_info, u64 qgroupid,
1168 struct btrfs_qgroup_limit *limit)
1170 struct btrfs_root *quota_root;
1171 struct btrfs_qgroup *qgroup;
1174 mutex_lock(&fs_info->qgroup_ioctl_lock);
1175 quota_root = fs_info->quota_root;
1181 qgroup = find_qgroup_rb(fs_info, qgroupid);
1187 spin_lock(&fs_info->qgroup_lock);
1188 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
1189 qgroup->max_rfer = limit->max_rfer;
1190 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
1191 qgroup->max_excl = limit->max_excl;
1192 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
1193 qgroup->rsv_rfer = limit->rsv_rfer;
1194 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
1195 qgroup->rsv_excl = limit->rsv_excl;
1196 qgroup->lim_flags |= limit->flags;
1198 spin_unlock(&fs_info->qgroup_lock);
1200 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1202 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1203 btrfs_info(fs_info, "unable to update quota limit for %llu",
1208 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1212 static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
1213 struct btrfs_qgroup_operation *oper2)
1216 * Ignore seq and type here, we're looking for any operation
1217 * at all related to this extent on that root.
1219 if (oper1->bytenr < oper2->bytenr)
1221 if (oper1->bytenr > oper2->bytenr)
1223 if (oper1->ref_root < oper2->ref_root)
1225 if (oper1->ref_root > oper2->ref_root)
1230 static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
1231 struct btrfs_qgroup_operation *oper)
1234 struct btrfs_qgroup_operation *cur;
1237 spin_lock(&fs_info->qgroup_op_lock);
1238 n = fs_info->qgroup_op_tree.rb_node;
1240 cur = rb_entry(n, struct btrfs_qgroup_operation, n);
1241 cmp = comp_oper_exist(cur, oper);
1247 spin_unlock(&fs_info->qgroup_op_lock);
1251 spin_unlock(&fs_info->qgroup_op_lock);
1255 static int comp_oper(struct btrfs_qgroup_operation *oper1,
1256 struct btrfs_qgroup_operation *oper2)
1258 if (oper1->bytenr < oper2->bytenr)
1260 if (oper1->bytenr > oper2->bytenr)
1262 if (oper1->ref_root < oper2->ref_root)
1264 if (oper1->ref_root > oper2->ref_root)
1266 if (oper1->seq < oper2->seq)
1268 if (oper1->seq > oper2->seq)
1270 if (oper1->type < oper2->type)
1272 if (oper1->type > oper2->type)
1277 static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
1278 struct btrfs_qgroup_operation *oper)
1281 struct rb_node *parent = NULL;
1282 struct btrfs_qgroup_operation *cur;
1285 spin_lock(&fs_info->qgroup_op_lock);
1286 p = &fs_info->qgroup_op_tree.rb_node;
1289 cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
1290 cmp = comp_oper(cur, oper);
1292 p = &(*p)->rb_right;
1296 spin_unlock(&fs_info->qgroup_op_lock);
1300 rb_link_node(&oper->n, parent, p);
1301 rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
1302 spin_unlock(&fs_info->qgroup_op_lock);
1307 * Record a quota operation for processing later on.
1308 * @trans: the transaction we are adding the delayed op to.
1309 * @fs_info: the fs_info for this fs.
1310 * @ref_root: the root of the reference we are acting on,
1311 * @bytenr: the bytenr we are acting on.
1312 * @num_bytes: the number of bytes in the reference.
1313 * @type: the type of operation this is.
1314 * @mod_seq: do we need to get a sequence number for looking up roots.
1316 * We just add it to our trans qgroup_ref_list and carry on and process these
1317 * operations in order at some later point. If the reference root isn't a fs
1318 * root then we don't bother with doing anything.
1320 * MUST BE HOLDING THE REF LOCK.
1322 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1323 struct btrfs_fs_info *fs_info, u64 ref_root,
1324 u64 bytenr, u64 num_bytes,
1325 enum btrfs_qgroup_operation_type type, int mod_seq)
1327 struct btrfs_qgroup_operation *oper;
1330 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
1333 oper = kmalloc(sizeof(*oper), GFP_NOFS);
1337 oper->ref_root = ref_root;
1338 oper->bytenr = bytenr;
1339 oper->num_bytes = num_bytes;
1341 oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
1342 INIT_LIST_HEAD(&oper->elem.list);
1345 trace_btrfs_qgroup_record_ref(oper);
1347 if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
1349 * If any operation for this bytenr/ref_root combo
1350 * exists, then we know it's not exclusively owned and
1351 * shouldn't be queued up.
1353 * This also catches the case where we have a cloned
1354 * extent that gets queued up multiple times during
1357 if (qgroup_oper_exists(fs_info, oper)) {
1363 ret = insert_qgroup_oper(fs_info, oper);
1365 /* Shouldn't happen so have an assert for developers */
1370 list_add_tail(&oper->list, &trans->qgroup_ref_list);
1373 btrfs_get_tree_mod_seq(fs_info, &oper->elem);
1379 * The easy accounting, if we are adding/removing the only ref for an extent
1380 * then this qgroup and all of the parent qgroups get their refrence and
1381 * exclusive counts adjusted.
1383 static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1384 struct btrfs_qgroup_operation *oper)
1386 struct btrfs_qgroup *qgroup;
1388 struct btrfs_qgroup_list *glist;
1389 struct ulist_node *unode;
1390 struct ulist_iterator uiter;
1394 tmp = ulist_alloc(GFP_NOFS);
1398 spin_lock(&fs_info->qgroup_lock);
1399 if (!fs_info->quota_root)
1401 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1404 switch (oper->type) {
1405 case BTRFS_QGROUP_OPER_ADD_EXCL:
1408 case BTRFS_QGROUP_OPER_SUB_EXCL:
1414 qgroup->rfer += sign * oper->num_bytes;
1415 qgroup->rfer_cmpr += sign * oper->num_bytes;
1417 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1418 qgroup->excl += sign * oper->num_bytes;
1419 qgroup->excl_cmpr += sign * oper->num_bytes;
1421 qgroup->reserved -= oper->num_bytes;
1423 qgroup_dirty(fs_info, qgroup);
1425 /* Get all of the parent groups that contain this qgroup */
1426 list_for_each_entry(glist, &qgroup->groups, next_group) {
1427 ret = ulist_add(tmp, glist->group->qgroupid,
1428 ptr_to_u64(glist->group), GFP_ATOMIC);
1433 /* Iterate all of the parents and adjust their reference counts */
1434 ULIST_ITER_INIT(&uiter);
1435 while ((unode = ulist_next(tmp, &uiter))) {
1436 qgroup = u64_to_ptr(unode->aux);
1437 qgroup->rfer += sign * oper->num_bytes;
1438 qgroup->rfer_cmpr += sign * oper->num_bytes;
1439 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1440 qgroup->excl += sign * oper->num_bytes;
1442 qgroup->reserved -= oper->num_bytes;
1443 qgroup->excl_cmpr += sign * oper->num_bytes;
1444 qgroup_dirty(fs_info, qgroup);
1446 /* Add any parents of the parents */
1447 list_for_each_entry(glist, &qgroup->groups, next_group) {
1448 ret = ulist_add(tmp, glist->group->qgroupid,
1449 ptr_to_u64(glist->group), GFP_ATOMIC);
1456 spin_unlock(&fs_info->qgroup_lock);
1462 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1465 static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
1466 u64 root_to_skip, struct ulist *tmp,
1467 struct ulist *roots, struct ulist *qgroups,
1468 u64 seq, int *old_roots, int rescan)
1470 struct ulist_node *unode;
1471 struct ulist_iterator uiter;
1472 struct ulist_node *tmp_unode;
1473 struct ulist_iterator tmp_uiter;
1474 struct btrfs_qgroup *qg;
1477 ULIST_ITER_INIT(&uiter);
1478 while ((unode = ulist_next(roots, &uiter))) {
1479 /* We don't count our current root here */
1480 if (unode->val == root_to_skip)
1482 qg = find_qgroup_rb(fs_info, unode->val);
1486 * We could have a pending removal of this same ref so we may
1487 * not have actually found our ref root when doing
1488 * btrfs_find_all_roots, so we need to keep track of how many
1489 * old roots we find in case we removed ours and added a
1490 * different one at the same time. I don't think this could
1491 * happen in practice but that sort of thinking leads to pain
1492 * and suffering and to the dark side.
1497 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1501 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1504 ULIST_ITER_INIT(&tmp_uiter);
1505 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1506 struct btrfs_qgroup_list *glist;
1508 qg = u64_to_ptr(tmp_unode->aux);
1510 * We use this sequence number to keep from having to
1511 * run the whole list and 0 out the refcnt every time.
1512 * We basically use sequnce as the known 0 count and
1513 * then add 1 everytime we see a qgroup. This is how we
1514 * get how many of the roots actually point up to the
1515 * upper level qgroups in order to determine exclusive
1518 * For rescan we want to set old_refcnt to seq so our
1519 * exclusive calculations end up correct.
1522 qg->old_refcnt = seq;
1523 else if (qg->old_refcnt < seq)
1524 qg->old_refcnt = seq + 1;
1528 if (qg->new_refcnt < seq)
1529 qg->new_refcnt = seq + 1;
1532 list_for_each_entry(glist, &qg->groups, next_group) {
1533 ret = ulist_add(qgroups, glist->group->qgroupid,
1534 ptr_to_u64(glist->group),
1538 ret = ulist_add(tmp, glist->group->qgroupid,
1539 ptr_to_u64(glist->group),
1550 * We need to walk forward in our operation tree and account for any roots that
1551 * were deleted after we made this operation.
1553 static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
1554 struct btrfs_qgroup_operation *oper,
1556 struct ulist *qgroups, u64 seq,
1559 struct ulist_node *unode;
1560 struct ulist_iterator uiter;
1561 struct btrfs_qgroup *qg;
1562 struct btrfs_qgroup_operation *tmp_oper;
1569 * We only walk forward in the tree since we're only interested in
1570 * removals that happened _after_ our operation.
1572 spin_lock(&fs_info->qgroup_op_lock);
1573 n = rb_next(&oper->n);
1574 spin_unlock(&fs_info->qgroup_op_lock);
1577 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1578 while (tmp_oper->bytenr == oper->bytenr) {
1580 * If it's not a removal we don't care, additions work out
1581 * properly with our refcnt tracking.
1583 if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
1584 tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
1586 qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
1589 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1595 * We only want to increase old_roots if this qgroup is
1596 * not already in the list of qgroups. If it is already
1597 * there then that means it must have been re-added or
1598 * the delete will be discarded because we had an
1599 * existing ref that we haven't looked up yet. In this
1600 * case we don't want to increase old_roots. So if ret
1601 * == 1 then we know that this is the first time we've
1602 * seen this qgroup and we can bump the old_roots.
1605 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
1611 spin_lock(&fs_info->qgroup_op_lock);
1612 n = rb_next(&tmp_oper->n);
1613 spin_unlock(&fs_info->qgroup_op_lock);
1616 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1619 /* Ok now process the qgroups we found */
1620 ULIST_ITER_INIT(&uiter);
1621 while ((unode = ulist_next(tmp, &uiter))) {
1622 struct btrfs_qgroup_list *glist;
1624 qg = u64_to_ptr(unode->aux);
1625 if (qg->old_refcnt < seq)
1626 qg->old_refcnt = seq + 1;
1629 if (qg->new_refcnt < seq)
1630 qg->new_refcnt = seq + 1;
1633 list_for_each_entry(glist, &qg->groups, next_group) {
1634 ret = ulist_add(qgroups, glist->group->qgroupid,
1635 ptr_to_u64(glist->group), GFP_ATOMIC);
1638 ret = ulist_add(tmp, glist->group->qgroupid,
1639 ptr_to_u64(glist->group), GFP_ATOMIC);
1647 /* Add refcnt for the newly added reference. */
1648 static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
1649 struct btrfs_qgroup_operation *oper,
1650 struct btrfs_qgroup *qgroup,
1651 struct ulist *tmp, struct ulist *qgroups,
1654 struct ulist_node *unode;
1655 struct ulist_iterator uiter;
1656 struct btrfs_qgroup *qg;
1660 ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
1664 ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
1668 ULIST_ITER_INIT(&uiter);
1669 while ((unode = ulist_next(tmp, &uiter))) {
1670 struct btrfs_qgroup_list *glist;
1672 qg = u64_to_ptr(unode->aux);
1673 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1674 if (qg->new_refcnt < seq)
1675 qg->new_refcnt = seq + 1;
1679 if (qg->old_refcnt < seq)
1680 qg->old_refcnt = seq + 1;
1684 list_for_each_entry(glist, &qg->groups, next_group) {
1685 ret = ulist_add(tmp, glist->group->qgroupid,
1686 ptr_to_u64(glist->group), GFP_ATOMIC);
1689 ret = ulist_add(qgroups, glist->group->qgroupid,
1690 ptr_to_u64(glist->group), GFP_ATOMIC);
1699 * This adjusts the counters for all referenced qgroups if need be.
1701 static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
1702 u64 root_to_skip, u64 num_bytes,
1703 struct ulist *qgroups, u64 seq,
1704 int old_roots, int new_roots, int rescan)
1706 struct ulist_node *unode;
1707 struct ulist_iterator uiter;
1708 struct btrfs_qgroup *qg;
1709 u64 cur_new_count, cur_old_count;
1711 ULIST_ITER_INIT(&uiter);
1712 while ((unode = ulist_next(qgroups, &uiter))) {
1715 qg = u64_to_ptr(unode->aux);
1717 * Wasn't referenced before but is now, add to the reference
1720 if (qg->old_refcnt <= seq && qg->new_refcnt > seq) {
1721 qg->rfer += num_bytes;
1722 qg->rfer_cmpr += num_bytes;
1727 * Was referenced before but isn't now, subtract from the
1728 * reference counters.
1730 if (qg->old_refcnt > seq && qg->new_refcnt <= seq) {
1731 qg->rfer -= num_bytes;
1732 qg->rfer_cmpr -= num_bytes;
1736 if (qg->old_refcnt < seq)
1739 cur_old_count = qg->old_refcnt - seq;
1740 if (qg->new_refcnt < seq)
1743 cur_new_count = qg->new_refcnt - seq;
1746 * If our refcount was the same as the roots previously but our
1747 * new count isn't the same as the number of roots now then we
1748 * went from having a exclusive reference on this range to not.
1750 if (old_roots && cur_old_count == old_roots &&
1751 (cur_new_count != new_roots || new_roots == 0)) {
1752 WARN_ON(cur_new_count != new_roots && new_roots == 0);
1753 qg->excl -= num_bytes;
1754 qg->excl_cmpr -= num_bytes;
1759 * If we didn't reference all the roots before but now we do we
1760 * have an exclusive reference to this range.
1762 if ((!old_roots || (old_roots && cur_old_count != old_roots))
1763 && cur_new_count == new_roots) {
1764 qg->excl += num_bytes;
1765 qg->excl_cmpr += num_bytes;
1770 qgroup_dirty(fs_info, qg);
1776 * If we removed a data extent and there were other references for that bytenr
1777 * then we need to lookup all referenced roots to make sure we still don't
1778 * reference this bytenr. If we do then we can just discard this operation.
1780 static int check_existing_refs(struct btrfs_trans_handle *trans,
1781 struct btrfs_fs_info *fs_info,
1782 struct btrfs_qgroup_operation *oper)
1784 struct ulist *roots = NULL;
1785 struct ulist_node *unode;
1786 struct ulist_iterator uiter;
1789 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1790 oper->elem.seq, &roots);
1795 ULIST_ITER_INIT(&uiter);
1796 while ((unode = ulist_next(roots, &uiter))) {
1797 if (unode->val == oper->ref_root) {
1803 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
1809 * If we share a reference across multiple roots then we may need to adjust
1810 * various qgroups referenced and exclusive counters. The basic premise is this
1812 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1813 * qgroups and resetting their refcount to 0 we just constantly bump this
1814 * sequence number to act as the base reference count. This means that if
1815 * anybody is equal to or below this sequence they were never referenced. We
1816 * jack this sequence up by the number of roots we found each time in order to
1817 * make sure we don't have any overlap.
1819 * 2) We first search all the roots that reference the area _except_ the root
1820 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1823 * 3) We walk all of the qgroups referenced by the root we are currently acting
1824 * on, and will either adjust old_refcnt in the case of a removal or the
1825 * new_refcnt in the case of an addition.
1827 * 4) Finally we walk all the qgroups that are referenced by this range
1828 * including the root we are acting on currently. We will adjust the counters
1829 * based on the number of roots we had and will have after this operation.
1831 * Take this example as an illustration
1835 * [qg 0/0] [qg 0/1] [qg 0/2]
1839 * Say we are adding a reference that is covered by qg 0/0. The first step
1840 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1841 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1842 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1843 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1844 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1845 * reference and thus must add the size to the referenced bytes. Everything
1846 * else is the same so nothing else changes.
1848 static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1849 struct btrfs_fs_info *fs_info,
1850 struct btrfs_qgroup_operation *oper)
1852 struct ulist *roots = NULL;
1853 struct ulist *qgroups, *tmp;
1854 struct btrfs_qgroup *qgroup;
1855 struct seq_list elem = SEQ_LIST_INIT(elem);
1861 if (oper->elem.seq) {
1862 ret = check_existing_refs(trans, fs_info, oper);
1869 qgroups = ulist_alloc(GFP_NOFS);
1873 tmp = ulist_alloc(GFP_NOFS);
1875 ulist_free(qgroups);
1879 btrfs_get_tree_mod_seq(fs_info, &elem);
1880 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
1882 btrfs_put_tree_mod_seq(fs_info, &elem);
1884 ulist_free(qgroups);
1888 spin_lock(&fs_info->qgroup_lock);
1889 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1892 seq = fs_info->qgroup_seq;
1895 * So roots is the list of all the roots currently pointing at the
1896 * bytenr, including the ref we are adding if we are adding, or not if
1897 * we are removing a ref. So we pass in the ref_root to skip that root
1898 * in our calculations. We set old_refnct and new_refcnt cause who the
1899 * hell knows what everything looked like before, and it doesn't matter
1902 ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
1903 seq, &old_roots, 0);
1908 * Now adjust the refcounts of the qgroups that care about this
1909 * reference, either the old_count in the case of removal or new_count
1910 * in the case of an addition.
1912 ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
1918 * ...in the case of removals. If we had a removal before we got around
1919 * to processing this operation then we need to find that guy and count
1920 * his references as if they really existed so we don't end up screwing
1921 * up the exclusive counts. Then whenever we go to process the delete
1922 * everything will be grand and we can account for whatever exclusive
1923 * changes need to be made there. We also have to pass in old_roots so
1924 * we have an accurate count of the roots as it pertains to this
1925 * operations view of the world.
1927 ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
1933 * We are adding our root, need to adjust up the number of roots,
1934 * otherwise old_roots is the number of roots we want.
1936 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1937 new_roots = old_roots + 1;
1939 new_roots = old_roots;
1942 fs_info->qgroup_seq += old_roots + 1;
1946 * And now the magic happens, bless Arne for having a pretty elegant
1947 * solution for this.
1949 qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
1950 qgroups, seq, old_roots, new_roots, 0);
1952 spin_unlock(&fs_info->qgroup_lock);
1953 ulist_free(qgroups);
1960 * Process a reference to a shared subtree. This type of operation is
1961 * queued during snapshot removal when we encounter extents which are
1962 * shared between more than one root.
1964 static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
1965 struct btrfs_fs_info *fs_info,
1966 struct btrfs_qgroup_operation *oper)
1968 struct ulist *roots = NULL;
1969 struct ulist_node *unode;
1970 struct ulist_iterator uiter;
1971 struct btrfs_qgroup_list *glist;
1972 struct ulist *parents;
1975 struct btrfs_qgroup *qg;
1977 struct seq_list elem = SEQ_LIST_INIT(elem);
1979 parents = ulist_alloc(GFP_NOFS);
1983 btrfs_get_tree_mod_seq(fs_info, &elem);
1984 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1986 btrfs_put_tree_mod_seq(fs_info, &elem);
1990 if (roots->nnodes != 1)
1993 ULIST_ITER_INIT(&uiter);
1994 unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
1996 * If we find our ref root then that means all refs
1997 * this extent has to the root have not yet been
1998 * deleted. In that case, we do nothing and let the
1999 * last ref for this bytenr drive our update.
2001 * This can happen for example if an extent is
2002 * referenced multiple times in a snapshot (clone,
2003 * etc). If we are in the middle of snapshot removal,
2004 * queued updates for such an extent will find the
2005 * root if we have not yet finished removing the
2008 if (unode->val == oper->ref_root)
2011 root_obj = unode->val;
2014 spin_lock(&fs_info->qgroup_lock);
2015 qg = find_qgroup_rb(fs_info, root_obj);
2019 qg->excl += oper->num_bytes;
2020 qg->excl_cmpr += oper->num_bytes;
2021 qgroup_dirty(fs_info, qg);
2024 * Adjust counts for parent groups. First we find all
2025 * parents, then in the 2nd loop we do the adjustment
2026 * while adding parents of the parents to our ulist.
2028 list_for_each_entry(glist, &qg->groups, next_group) {
2029 err = ulist_add(parents, glist->group->qgroupid,
2030 ptr_to_u64(glist->group), GFP_ATOMIC);
2037 ULIST_ITER_INIT(&uiter);
2038 while ((unode = ulist_next(parents, &uiter))) {
2039 qg = u64_to_ptr(unode->aux);
2040 qg->excl += oper->num_bytes;
2041 qg->excl_cmpr += oper->num_bytes;
2042 qgroup_dirty(fs_info, qg);
2044 /* Add any parents of the parents */
2045 list_for_each_entry(glist, &qg->groups, next_group) {
2046 err = ulist_add(parents, glist->group->qgroupid,
2047 ptr_to_u64(glist->group), GFP_ATOMIC);
2056 spin_unlock(&fs_info->qgroup_lock);
2060 ulist_free(parents);
2065 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2066 * from the fs. First, all roots referencing the extent are searched, and
2067 * then the space is accounted accordingly to the different roots. The
2068 * accounting algorithm works in 3 steps documented inline.
2070 static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
2071 struct btrfs_fs_info *fs_info,
2072 struct btrfs_qgroup_operation *oper)
2076 if (!fs_info->quota_enabled)
2079 BUG_ON(!fs_info->quota_root);
2081 mutex_lock(&fs_info->qgroup_rescan_lock);
2082 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2083 if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
2084 mutex_unlock(&fs_info->qgroup_rescan_lock);
2088 mutex_unlock(&fs_info->qgroup_rescan_lock);
2090 ASSERT(is_fstree(oper->ref_root));
2092 trace_btrfs_qgroup_account(oper);
2094 switch (oper->type) {
2095 case BTRFS_QGROUP_OPER_ADD_EXCL:
2096 case BTRFS_QGROUP_OPER_SUB_EXCL:
2097 ret = qgroup_excl_accounting(fs_info, oper);
2099 case BTRFS_QGROUP_OPER_ADD_SHARED:
2100 case BTRFS_QGROUP_OPER_SUB_SHARED:
2101 ret = qgroup_shared_accounting(trans, fs_info, oper);
2103 case BTRFS_QGROUP_OPER_SUB_SUBTREE:
2104 ret = qgroup_subtree_accounting(trans, fs_info, oper);
2113 * Needs to be called everytime we run delayed refs, even if there is an error
2114 * in order to cleanup outstanding operations.
2116 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
2117 struct btrfs_fs_info *fs_info)
2119 struct btrfs_qgroup_operation *oper;
2122 while (!list_empty(&trans->qgroup_ref_list)) {
2123 oper = list_first_entry(&trans->qgroup_ref_list,
2124 struct btrfs_qgroup_operation, list);
2125 list_del_init(&oper->list);
2126 if (!ret || !trans->aborted)
2127 ret = btrfs_qgroup_account(trans, fs_info, oper);
2128 spin_lock(&fs_info->qgroup_op_lock);
2129 rb_erase(&oper->n, &fs_info->qgroup_op_tree);
2130 spin_unlock(&fs_info->qgroup_op_lock);
2131 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
2138 * called from commit_transaction. Writes all changed qgroups to disk.
2140 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2141 struct btrfs_fs_info *fs_info)
2143 struct btrfs_root *quota_root = fs_info->quota_root;
2145 int start_rescan_worker = 0;
2150 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
2151 start_rescan_worker = 1;
2153 fs_info->quota_enabled = fs_info->pending_quota_state;
2155 spin_lock(&fs_info->qgroup_lock);
2156 while (!list_empty(&fs_info->dirty_qgroups)) {
2157 struct btrfs_qgroup *qgroup;
2158 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2159 struct btrfs_qgroup, dirty);
2160 list_del_init(&qgroup->dirty);
2161 spin_unlock(&fs_info->qgroup_lock);
2162 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2164 fs_info->qgroup_flags |=
2165 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2166 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2168 fs_info->qgroup_flags |=
2169 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2170 spin_lock(&fs_info->qgroup_lock);
2172 if (fs_info->quota_enabled)
2173 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2175 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2176 spin_unlock(&fs_info->qgroup_lock);
2178 ret = update_qgroup_status_item(trans, fs_info, quota_root);
2180 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2182 if (!ret && start_rescan_worker) {
2183 ret = qgroup_rescan_init(fs_info, 0, 1);
2185 qgroup_rescan_zero_tracking(fs_info);
2186 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2187 &fs_info->qgroup_rescan_work);
2198 * copy the acounting information between qgroups. This is necessary when a
2199 * snapshot or a subvolume is created
2201 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2202 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2203 struct btrfs_qgroup_inherit *inherit)
2208 struct btrfs_root *quota_root = fs_info->quota_root;
2209 struct btrfs_qgroup *srcgroup;
2210 struct btrfs_qgroup *dstgroup;
2214 mutex_lock(&fs_info->qgroup_ioctl_lock);
2215 if (!fs_info->quota_enabled)
2224 i_qgroups = (u64 *)(inherit + 1);
2225 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2226 2 * inherit->num_excl_copies;
2227 for (i = 0; i < nums; ++i) {
2228 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2238 * create a tracking group for the subvol itself
2240 ret = add_qgroup_item(trans, quota_root, objectid);
2245 struct btrfs_root *srcroot;
2246 struct btrfs_key srckey;
2248 srckey.objectid = srcid;
2249 srckey.type = BTRFS_ROOT_ITEM_KEY;
2250 srckey.offset = (u64)-1;
2251 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2252 if (IS_ERR(srcroot)) {
2253 ret = PTR_ERR(srcroot);
2258 level_size = srcroot->nodesize;
2263 * add qgroup to all inherited groups
2266 i_qgroups = (u64 *)(inherit + 1);
2267 for (i = 0; i < inherit->num_qgroups; ++i) {
2268 ret = add_qgroup_relation_item(trans, quota_root,
2269 objectid, *i_qgroups);
2272 ret = add_qgroup_relation_item(trans, quota_root,
2273 *i_qgroups, objectid);
2281 spin_lock(&fs_info->qgroup_lock);
2283 dstgroup = add_qgroup_rb(fs_info, objectid);
2284 if (IS_ERR(dstgroup)) {
2285 ret = PTR_ERR(dstgroup);
2289 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2290 dstgroup->lim_flags = inherit->lim.flags;
2291 dstgroup->max_rfer = inherit->lim.max_rfer;
2292 dstgroup->max_excl = inherit->lim.max_excl;
2293 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2294 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2296 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2298 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2299 btrfs_info(fs_info, "unable to update quota limit for %llu",
2300 dstgroup->qgroupid);
2306 srcgroup = find_qgroup_rb(fs_info, srcid);
2311 * We call inherit after we clone the root in order to make sure
2312 * our counts don't go crazy, so at this point the only
2313 * difference between the two roots should be the root node.
2315 dstgroup->rfer = srcgroup->rfer;
2316 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2317 dstgroup->excl = level_size;
2318 dstgroup->excl_cmpr = level_size;
2319 srcgroup->excl = level_size;
2320 srcgroup->excl_cmpr = level_size;
2322 /* inherit the limit info */
2323 dstgroup->lim_flags = srcgroup->lim_flags;
2324 dstgroup->max_rfer = srcgroup->max_rfer;
2325 dstgroup->max_excl = srcgroup->max_excl;
2326 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2327 dstgroup->rsv_excl = srcgroup->rsv_excl;
2329 qgroup_dirty(fs_info, dstgroup);
2330 qgroup_dirty(fs_info, srcgroup);
2336 i_qgroups = (u64 *)(inherit + 1);
2337 for (i = 0; i < inherit->num_qgroups; ++i) {
2338 ret = add_relation_rb(quota_root->fs_info, objectid,
2345 for (i = 0; i < inherit->num_ref_copies; ++i) {
2346 struct btrfs_qgroup *src;
2347 struct btrfs_qgroup *dst;
2349 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2350 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2357 dst->rfer = src->rfer - level_size;
2358 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2361 for (i = 0; i < inherit->num_excl_copies; ++i) {
2362 struct btrfs_qgroup *src;
2363 struct btrfs_qgroup *dst;
2365 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2366 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2373 dst->excl = src->excl + level_size;
2374 dst->excl_cmpr = src->excl_cmpr + level_size;
2379 spin_unlock(&fs_info->qgroup_lock);
2381 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2385 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2387 struct btrfs_root *quota_root;
2388 struct btrfs_qgroup *qgroup;
2389 struct btrfs_fs_info *fs_info = root->fs_info;
2390 u64 ref_root = root->root_key.objectid;
2392 struct ulist_node *unode;
2393 struct ulist_iterator uiter;
2395 if (!is_fstree(ref_root))
2401 spin_lock(&fs_info->qgroup_lock);
2402 quota_root = fs_info->quota_root;
2406 qgroup = find_qgroup_rb(fs_info, ref_root);
2411 * in a first step, we check all affected qgroups if any limits would
2414 ulist_reinit(fs_info->qgroup_ulist);
2415 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2416 (uintptr_t)qgroup, GFP_ATOMIC);
2419 ULIST_ITER_INIT(&uiter);
2420 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2421 struct btrfs_qgroup *qg;
2422 struct btrfs_qgroup_list *glist;
2424 qg = u64_to_ptr(unode->aux);
2426 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2427 qg->reserved + (s64)qg->rfer + num_bytes >
2433 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2434 qg->reserved + (s64)qg->excl + num_bytes >
2440 list_for_each_entry(glist, &qg->groups, next_group) {
2441 ret = ulist_add(fs_info->qgroup_ulist,
2442 glist->group->qgroupid,
2443 (uintptr_t)glist->group, GFP_ATOMIC);
2450 * no limits exceeded, now record the reservation into all qgroups
2452 ULIST_ITER_INIT(&uiter);
2453 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2454 struct btrfs_qgroup *qg;
2456 qg = u64_to_ptr(unode->aux);
2458 qg->reserved += num_bytes;
2462 spin_unlock(&fs_info->qgroup_lock);
2466 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
2468 struct btrfs_root *quota_root;
2469 struct btrfs_qgroup *qgroup;
2470 struct btrfs_fs_info *fs_info = root->fs_info;
2471 struct ulist_node *unode;
2472 struct ulist_iterator uiter;
2473 u64 ref_root = root->root_key.objectid;
2476 if (!is_fstree(ref_root))
2482 spin_lock(&fs_info->qgroup_lock);
2484 quota_root = fs_info->quota_root;
2488 qgroup = find_qgroup_rb(fs_info, ref_root);
2492 ulist_reinit(fs_info->qgroup_ulist);
2493 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2494 (uintptr_t)qgroup, GFP_ATOMIC);
2497 ULIST_ITER_INIT(&uiter);
2498 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2499 struct btrfs_qgroup *qg;
2500 struct btrfs_qgroup_list *glist;
2502 qg = u64_to_ptr(unode->aux);
2504 qg->reserved -= num_bytes;
2506 list_for_each_entry(glist, &qg->groups, next_group) {
2507 ret = ulist_add(fs_info->qgroup_ulist,
2508 glist->group->qgroupid,
2509 (uintptr_t)glist->group, GFP_ATOMIC);
2516 spin_unlock(&fs_info->qgroup_lock);
2519 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2521 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2523 btrfs_err(trans->root->fs_info,
2524 "qgroups not uptodate in trans handle %p: list is%s empty, "
2526 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2527 (u32)(trans->delayed_ref_elem.seq >> 32),
2528 (u32)trans->delayed_ref_elem.seq);
2533 * returns < 0 on error, 0 when more leafs are to be scanned.
2534 * returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
2537 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2538 struct btrfs_trans_handle *trans, struct ulist *qgroups,
2539 struct ulist *tmp, struct extent_buffer *scratch_leaf)
2541 struct btrfs_key found;
2542 struct ulist *roots = NULL;
2543 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2550 path->leave_spinning = 1;
2551 mutex_lock(&fs_info->qgroup_rescan_lock);
2552 ret = btrfs_search_slot_for_read(fs_info->extent_root,
2553 &fs_info->qgroup_rescan_progress,
2556 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2557 fs_info->qgroup_rescan_progress.objectid,
2558 fs_info->qgroup_rescan_progress.type,
2559 fs_info->qgroup_rescan_progress.offset, ret);
2563 * The rescan is about to end, we will not be scanning any
2564 * further blocks. We cannot unset the RESCAN flag here, because
2565 * we want to commit the transaction if everything went well.
2566 * To make the live accounting work in this phase, we set our
2567 * scan progress pointer such that every real extent objectid
2570 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2571 btrfs_release_path(path);
2572 mutex_unlock(&fs_info->qgroup_rescan_lock);
2576 btrfs_item_key_to_cpu(path->nodes[0], &found,
2577 btrfs_header_nritems(path->nodes[0]) - 1);
2578 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2580 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2581 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
2582 slot = path->slots[0];
2583 btrfs_release_path(path);
2584 mutex_unlock(&fs_info->qgroup_rescan_lock);
2586 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2587 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2588 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2589 found.type != BTRFS_METADATA_ITEM_KEY)
2591 if (found.type == BTRFS_METADATA_ITEM_KEY)
2592 num_bytes = fs_info->extent_root->nodesize;
2594 num_bytes = found.offset;
2596 ulist_reinit(qgroups);
2597 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2601 spin_lock(&fs_info->qgroup_lock);
2602 seq = fs_info->qgroup_seq;
2603 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
2606 ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
2607 seq, &new_roots, 1);
2609 spin_unlock(&fs_info->qgroup_lock);
2614 ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
2615 seq, 0, new_roots, 1);
2617 spin_unlock(&fs_info->qgroup_lock);
2621 spin_unlock(&fs_info->qgroup_lock);
2625 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2630 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2632 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2633 qgroup_rescan_work);
2634 struct btrfs_path *path;
2635 struct btrfs_trans_handle *trans = NULL;
2636 struct ulist *tmp = NULL, *qgroups = NULL;
2637 struct extent_buffer *scratch_leaf = NULL;
2640 path = btrfs_alloc_path();
2643 qgroups = ulist_alloc(GFP_NOFS);
2646 tmp = ulist_alloc(GFP_NOFS);
2649 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2655 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2656 if (IS_ERR(trans)) {
2657 err = PTR_ERR(trans);
2660 if (!fs_info->quota_enabled) {
2663 err = qgroup_rescan_leaf(fs_info, path, trans,
2664 qgroups, tmp, scratch_leaf);
2667 btrfs_commit_transaction(trans, fs_info->fs_root);
2669 btrfs_end_transaction(trans, fs_info->fs_root);
2673 kfree(scratch_leaf);
2674 ulist_free(qgroups);
2676 btrfs_free_path(path);
2678 mutex_lock(&fs_info->qgroup_rescan_lock);
2679 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2682 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2683 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2684 } else if (err < 0) {
2685 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2687 mutex_unlock(&fs_info->qgroup_rescan_lock);
2690 btrfs_info(fs_info, "qgroup scan completed%s",
2691 err == 2 ? " (inconsistency flag cleared)" : "");
2693 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2696 complete_all(&fs_info->qgroup_rescan_completion);
2700 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2701 * memory required for the rescan context.
2704 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2710 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2711 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2716 mutex_lock(&fs_info->qgroup_rescan_lock);
2717 spin_lock(&fs_info->qgroup_lock);
2720 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2722 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2726 spin_unlock(&fs_info->qgroup_lock);
2727 mutex_unlock(&fs_info->qgroup_rescan_lock);
2731 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2734 memset(&fs_info->qgroup_rescan_progress, 0,
2735 sizeof(fs_info->qgroup_rescan_progress));
2736 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2738 spin_unlock(&fs_info->qgroup_lock);
2739 mutex_unlock(&fs_info->qgroup_rescan_lock);
2741 init_completion(&fs_info->qgroup_rescan_completion);
2743 memset(&fs_info->qgroup_rescan_work, 0,
2744 sizeof(fs_info->qgroup_rescan_work));
2745 btrfs_init_work(&fs_info->qgroup_rescan_work,
2746 btrfs_qgroup_rescan_helper,
2747 btrfs_qgroup_rescan_worker, NULL, NULL);
2751 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2759 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2762 struct btrfs_qgroup *qgroup;
2764 spin_lock(&fs_info->qgroup_lock);
2765 /* clear all current qgroup tracking information */
2766 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2767 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2769 qgroup->rfer_cmpr = 0;
2771 qgroup->excl_cmpr = 0;
2773 spin_unlock(&fs_info->qgroup_lock);
2777 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2780 struct btrfs_trans_handle *trans;
2782 ret = qgroup_rescan_init(fs_info, 0, 1);
2787 * We have set the rescan_progress to 0, which means no more
2788 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2789 * However, btrfs_qgroup_account_ref may be right after its call
2790 * to btrfs_find_all_roots, in which case it would still do the
2792 * To solve this, we're committing the transaction, which will
2793 * ensure we run all delayed refs and only after that, we are
2794 * going to clear all tracking information for a clean start.
2797 trans = btrfs_join_transaction(fs_info->fs_root);
2798 if (IS_ERR(trans)) {
2799 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2800 return PTR_ERR(trans);
2802 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2804 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2808 qgroup_rescan_zero_tracking(fs_info);
2810 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2811 &fs_info->qgroup_rescan_work);
2816 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2821 mutex_lock(&fs_info->qgroup_rescan_lock);
2822 spin_lock(&fs_info->qgroup_lock);
2823 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2824 spin_unlock(&fs_info->qgroup_lock);
2825 mutex_unlock(&fs_info->qgroup_rescan_lock);
2828 ret = wait_for_completion_interruptible(
2829 &fs_info->qgroup_rescan_completion);
2835 * this is only called from open_ctree where we're still single threaded, thus
2836 * locking is omitted here.
2839 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2841 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2842 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2843 &fs_info->qgroup_rescan_work);