2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/btrfs.h>
29 #include "transaction.h"
34 #include "extent_io.h"
38 * - subvol delete -> delete when ref goes to 0? delete limits also?
42 * - copy also limits on subvol creation
44 * - caches fuer ulists
45 * - performance benchmarks
46 * - check all ioctl parameters
50 * one struct for each qgroup, organized in fs_info->qgroup_tree.
58 u64 rfer; /* referenced */
59 u64 rfer_cmpr; /* referenced compressed */
60 u64 excl; /* exclusive */
61 u64 excl_cmpr; /* exclusive compressed */
66 u64 lim_flags; /* which limits are set */
73 * reservation tracking
80 struct list_head groups; /* groups this group is member of */
81 struct list_head members; /* groups that are members of this group */
82 struct list_head dirty; /* dirty groups */
83 struct rb_node node; /* tree of qgroups */
86 * temp variables for accounting operations
93 * glue structure to represent the relations between qgroups.
95 struct btrfs_qgroup_list {
96 struct list_head next_group;
97 struct list_head next_member;
98 struct btrfs_qgroup *group;
99 struct btrfs_qgroup *member;
102 #define ptr_to_u64(x) ((u64)(uintptr_t)x)
103 #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
106 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
108 static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
110 /* must be called with qgroup_ioctl_lock held */
111 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
114 struct rb_node *n = fs_info->qgroup_tree.rb_node;
115 struct btrfs_qgroup *qgroup;
118 qgroup = rb_entry(n, struct btrfs_qgroup, node);
119 if (qgroup->qgroupid < qgroupid)
121 else if (qgroup->qgroupid > qgroupid)
129 /* must be called with qgroup_lock held */
130 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
133 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
134 struct rb_node *parent = NULL;
135 struct btrfs_qgroup *qgroup;
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
141 if (qgroup->qgroupid < qgroupid)
143 else if (qgroup->qgroupid > qgroupid)
149 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
151 return ERR_PTR(-ENOMEM);
153 qgroup->qgroupid = qgroupid;
154 INIT_LIST_HEAD(&qgroup->groups);
155 INIT_LIST_HEAD(&qgroup->members);
156 INIT_LIST_HEAD(&qgroup->dirty);
158 rb_link_node(&qgroup->node, parent, p);
159 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
164 static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
166 struct btrfs_qgroup_list *list;
168 list_del(&qgroup->dirty);
169 while (!list_empty(&qgroup->groups)) {
170 list = list_first_entry(&qgroup->groups,
171 struct btrfs_qgroup_list, next_group);
172 list_del(&list->next_group);
173 list_del(&list->next_member);
177 while (!list_empty(&qgroup->members)) {
178 list = list_first_entry(&qgroup->members,
179 struct btrfs_qgroup_list, next_member);
180 list_del(&list->next_group);
181 list_del(&list->next_member);
187 /* must be called with qgroup_lock held */
188 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
190 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
195 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
196 __del_qgroup_rb(qgroup);
200 /* must be called with qgroup_lock held */
201 static int add_relation_rb(struct btrfs_fs_info *fs_info,
202 u64 memberid, u64 parentid)
204 struct btrfs_qgroup *member;
205 struct btrfs_qgroup *parent;
206 struct btrfs_qgroup_list *list;
208 member = find_qgroup_rb(fs_info, memberid);
209 parent = find_qgroup_rb(fs_info, parentid);
210 if (!member || !parent)
213 list = kzalloc(sizeof(*list), GFP_ATOMIC);
217 list->group = parent;
218 list->member = member;
219 list_add_tail(&list->next_group, &member->groups);
220 list_add_tail(&list->next_member, &parent->members);
225 /* must be called with qgroup_lock held */
226 static int del_relation_rb(struct btrfs_fs_info *fs_info,
227 u64 memberid, u64 parentid)
229 struct btrfs_qgroup *member;
230 struct btrfs_qgroup *parent;
231 struct btrfs_qgroup_list *list;
233 member = find_qgroup_rb(fs_info, memberid);
234 parent = find_qgroup_rb(fs_info, parentid);
235 if (!member || !parent)
238 list_for_each_entry(list, &member->groups, next_group) {
239 if (list->group == parent) {
240 list_del(&list->next_group);
241 list_del(&list->next_member);
249 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
250 int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
253 struct btrfs_qgroup *qgroup;
255 qgroup = find_qgroup_rb(fs_info, qgroupid);
258 if (qgroup->rfer != rfer || qgroup->excl != excl)
265 * The full config is read in one go, only called from open_ctree()
266 * It doesn't use any locking, as at this point we're still single-threaded
268 int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
270 struct btrfs_key key;
271 struct btrfs_key found_key;
272 struct btrfs_root *quota_root = fs_info->quota_root;
273 struct btrfs_path *path = NULL;
274 struct extent_buffer *l;
278 u64 rescan_progress = 0;
280 if (!fs_info->quota_enabled)
283 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
284 if (!fs_info->qgroup_ulist) {
289 path = btrfs_alloc_path();
295 /* default this to quota off, in case no status key is found */
296 fs_info->qgroup_flags = 0;
299 * pass 1: read status, all qgroup infos and limits
304 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
309 struct btrfs_qgroup *qgroup;
311 slot = path->slots[0];
313 btrfs_item_key_to_cpu(l, &found_key, slot);
315 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
316 struct btrfs_qgroup_status_item *ptr;
318 ptr = btrfs_item_ptr(l, slot,
319 struct btrfs_qgroup_status_item);
321 if (btrfs_qgroup_status_version(l, ptr) !=
322 BTRFS_QGROUP_STATUS_VERSION) {
324 "old qgroup version, quota disabled");
327 if (btrfs_qgroup_status_generation(l, ptr) !=
328 fs_info->generation) {
329 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
331 "qgroup generation mismatch, "
332 "marked as inconsistent");
334 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
336 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
340 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
341 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
344 qgroup = find_qgroup_rb(fs_info, found_key.offset);
345 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
346 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
347 btrfs_err(fs_info, "inconsitent qgroup config");
348 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
351 qgroup = add_qgroup_rb(fs_info, found_key.offset);
352 if (IS_ERR(qgroup)) {
353 ret = PTR_ERR(qgroup);
357 switch (found_key.type) {
358 case BTRFS_QGROUP_INFO_KEY: {
359 struct btrfs_qgroup_info_item *ptr;
361 ptr = btrfs_item_ptr(l, slot,
362 struct btrfs_qgroup_info_item);
363 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
364 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
365 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
366 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
367 /* generation currently unused */
370 case BTRFS_QGROUP_LIMIT_KEY: {
371 struct btrfs_qgroup_limit_item *ptr;
373 ptr = btrfs_item_ptr(l, slot,
374 struct btrfs_qgroup_limit_item);
375 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
376 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
377 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
378 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
379 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
384 ret = btrfs_next_item(quota_root, path);
390 btrfs_release_path(path);
393 * pass 2: read all qgroup relations
396 key.type = BTRFS_QGROUP_RELATION_KEY;
398 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
402 slot = path->slots[0];
404 btrfs_item_key_to_cpu(l, &found_key, slot);
406 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
409 if (found_key.objectid > found_key.offset) {
410 /* parent <- member, not needed to build config */
411 /* FIXME should we omit the key completely? */
415 ret = add_relation_rb(fs_info, found_key.objectid,
417 if (ret == -ENOENT) {
419 "orphan qgroup relation 0x%llx->0x%llx",
420 found_key.objectid, found_key.offset);
421 ret = 0; /* ignore the error */
426 ret = btrfs_next_item(quota_root, path);
433 fs_info->qgroup_flags |= flags;
434 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
435 fs_info->quota_enabled = 0;
436 fs_info->pending_quota_state = 0;
437 } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
439 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
441 btrfs_free_path(path);
444 ulist_free(fs_info->qgroup_ulist);
445 fs_info->qgroup_ulist = NULL;
446 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
449 return ret < 0 ? ret : 0;
453 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
454 * first two are in single-threaded paths.And for the third one, we have set
455 * quota_root to be null with qgroup_lock held before, so it is safe to clean
456 * up the in-memory structures without qgroup_lock held.
458 void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
461 struct btrfs_qgroup *qgroup;
463 while ((n = rb_first(&fs_info->qgroup_tree))) {
464 qgroup = rb_entry(n, struct btrfs_qgroup, node);
465 rb_erase(n, &fs_info->qgroup_tree);
466 __del_qgroup_rb(qgroup);
469 * we call btrfs_free_qgroup_config() when umounting
470 * filesystem and disabling quota, so we set qgroup_ulit
471 * to be null here to avoid double free.
473 ulist_free(fs_info->qgroup_ulist);
474 fs_info->qgroup_ulist = NULL;
477 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
478 struct btrfs_root *quota_root,
482 struct btrfs_path *path;
483 struct btrfs_key key;
485 path = btrfs_alloc_path();
490 key.type = BTRFS_QGROUP_RELATION_KEY;
493 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
495 btrfs_mark_buffer_dirty(path->nodes[0]);
497 btrfs_free_path(path);
501 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
502 struct btrfs_root *quota_root,
506 struct btrfs_path *path;
507 struct btrfs_key key;
509 path = btrfs_alloc_path();
514 key.type = BTRFS_QGROUP_RELATION_KEY;
517 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
526 ret = btrfs_del_item(trans, quota_root, path);
528 btrfs_free_path(path);
532 static int add_qgroup_item(struct btrfs_trans_handle *trans,
533 struct btrfs_root *quota_root, u64 qgroupid)
536 struct btrfs_path *path;
537 struct btrfs_qgroup_info_item *qgroup_info;
538 struct btrfs_qgroup_limit_item *qgroup_limit;
539 struct extent_buffer *leaf;
540 struct btrfs_key key;
542 if (btrfs_test_is_dummy_root(quota_root))
545 path = btrfs_alloc_path();
550 key.type = BTRFS_QGROUP_INFO_KEY;
551 key.offset = qgroupid;
554 * Avoid a transaction abort by catching -EEXIST here. In that
555 * case, we proceed by re-initializing the existing structure
559 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
560 sizeof(*qgroup_info));
561 if (ret && ret != -EEXIST)
564 leaf = path->nodes[0];
565 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
566 struct btrfs_qgroup_info_item);
567 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
568 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
569 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
570 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
571 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
573 btrfs_mark_buffer_dirty(leaf);
575 btrfs_release_path(path);
577 key.type = BTRFS_QGROUP_LIMIT_KEY;
578 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
579 sizeof(*qgroup_limit));
580 if (ret && ret != -EEXIST)
583 leaf = path->nodes[0];
584 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
585 struct btrfs_qgroup_limit_item);
586 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
587 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
588 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
589 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
590 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
592 btrfs_mark_buffer_dirty(leaf);
596 btrfs_free_path(path);
600 static int del_qgroup_item(struct btrfs_trans_handle *trans,
601 struct btrfs_root *quota_root, u64 qgroupid)
604 struct btrfs_path *path;
605 struct btrfs_key key;
607 path = btrfs_alloc_path();
612 key.type = BTRFS_QGROUP_INFO_KEY;
613 key.offset = qgroupid;
614 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
623 ret = btrfs_del_item(trans, quota_root, path);
627 btrfs_release_path(path);
629 key.type = BTRFS_QGROUP_LIMIT_KEY;
630 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
639 ret = btrfs_del_item(trans, quota_root, path);
642 btrfs_free_path(path);
646 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
647 struct btrfs_root *root,
648 struct btrfs_qgroup *qgroup)
650 struct btrfs_path *path;
651 struct btrfs_key key;
652 struct extent_buffer *l;
653 struct btrfs_qgroup_limit_item *qgroup_limit;
658 key.type = BTRFS_QGROUP_LIMIT_KEY;
659 key.offset = qgroup->qgroupid;
661 path = btrfs_alloc_path();
665 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
673 slot = path->slots[0];
674 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
675 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
676 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
677 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
678 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
679 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
681 btrfs_mark_buffer_dirty(l);
684 btrfs_free_path(path);
688 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
689 struct btrfs_root *root,
690 struct btrfs_qgroup *qgroup)
692 struct btrfs_path *path;
693 struct btrfs_key key;
694 struct extent_buffer *l;
695 struct btrfs_qgroup_info_item *qgroup_info;
699 if (btrfs_test_is_dummy_root(root))
703 key.type = BTRFS_QGROUP_INFO_KEY;
704 key.offset = qgroup->qgroupid;
706 path = btrfs_alloc_path();
710 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
718 slot = path->slots[0];
719 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
720 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
721 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
722 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
723 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
724 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
726 btrfs_mark_buffer_dirty(l);
729 btrfs_free_path(path);
733 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
734 struct btrfs_fs_info *fs_info,
735 struct btrfs_root *root)
737 struct btrfs_path *path;
738 struct btrfs_key key;
739 struct extent_buffer *l;
740 struct btrfs_qgroup_status_item *ptr;
745 key.type = BTRFS_QGROUP_STATUS_KEY;
748 path = btrfs_alloc_path();
752 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
760 slot = path->slots[0];
761 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
762 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
763 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
764 btrfs_set_qgroup_status_rescan(l, ptr,
765 fs_info->qgroup_rescan_progress.objectid);
767 btrfs_mark_buffer_dirty(l);
770 btrfs_free_path(path);
775 * called with qgroup_lock held
777 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
778 struct btrfs_root *root)
780 struct btrfs_path *path;
781 struct btrfs_key key;
782 struct extent_buffer *leaf = NULL;
786 path = btrfs_alloc_path();
790 path->leave_spinning = 1;
797 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
800 leaf = path->nodes[0];
801 nr = btrfs_header_nritems(leaf);
805 * delete the leaf one by one
806 * since the whole tree is going
810 ret = btrfs_del_items(trans, root, path, 0, nr);
814 btrfs_release_path(path);
818 root->fs_info->pending_quota_state = 0;
819 btrfs_free_path(path);
823 int btrfs_quota_enable(struct btrfs_trans_handle *trans,
824 struct btrfs_fs_info *fs_info)
826 struct btrfs_root *quota_root;
827 struct btrfs_root *tree_root = fs_info->tree_root;
828 struct btrfs_path *path = NULL;
829 struct btrfs_qgroup_status_item *ptr;
830 struct extent_buffer *leaf;
831 struct btrfs_key key;
832 struct btrfs_key found_key;
833 struct btrfs_qgroup *qgroup = NULL;
837 mutex_lock(&fs_info->qgroup_ioctl_lock);
838 if (fs_info->quota_root) {
839 fs_info->pending_quota_state = 1;
843 fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
844 if (!fs_info->qgroup_ulist) {
850 * initially create the quota tree
852 quota_root = btrfs_create_tree(trans, fs_info,
853 BTRFS_QUOTA_TREE_OBJECTID);
854 if (IS_ERR(quota_root)) {
855 ret = PTR_ERR(quota_root);
859 path = btrfs_alloc_path();
866 key.type = BTRFS_QGROUP_STATUS_KEY;
869 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
874 leaf = path->nodes[0];
875 ptr = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_qgroup_status_item);
877 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
878 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
879 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
880 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
881 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
882 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
884 btrfs_mark_buffer_dirty(leaf);
887 key.type = BTRFS_ROOT_REF_KEY;
890 btrfs_release_path(path);
891 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
899 slot = path->slots[0];
900 leaf = path->nodes[0];
901 btrfs_item_key_to_cpu(leaf, &found_key, slot);
903 if (found_key.type == BTRFS_ROOT_REF_KEY) {
904 ret = add_qgroup_item(trans, quota_root,
909 qgroup = add_qgroup_rb(fs_info, found_key.offset);
910 if (IS_ERR(qgroup)) {
911 ret = PTR_ERR(qgroup);
915 ret = btrfs_next_item(tree_root, path);
923 btrfs_release_path(path);
924 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
928 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
929 if (IS_ERR(qgroup)) {
930 ret = PTR_ERR(qgroup);
933 spin_lock(&fs_info->qgroup_lock);
934 fs_info->quota_root = quota_root;
935 fs_info->pending_quota_state = 1;
936 spin_unlock(&fs_info->qgroup_lock);
938 btrfs_free_path(path);
941 free_extent_buffer(quota_root->node);
942 free_extent_buffer(quota_root->commit_root);
947 ulist_free(fs_info->qgroup_ulist);
948 fs_info->qgroup_ulist = NULL;
950 mutex_unlock(&fs_info->qgroup_ioctl_lock);
954 int btrfs_quota_disable(struct btrfs_trans_handle *trans,
955 struct btrfs_fs_info *fs_info)
957 struct btrfs_root *tree_root = fs_info->tree_root;
958 struct btrfs_root *quota_root;
961 mutex_lock(&fs_info->qgroup_ioctl_lock);
962 if (!fs_info->quota_root)
964 spin_lock(&fs_info->qgroup_lock);
965 fs_info->quota_enabled = 0;
966 fs_info->pending_quota_state = 0;
967 quota_root = fs_info->quota_root;
968 fs_info->quota_root = NULL;
969 spin_unlock(&fs_info->qgroup_lock);
971 btrfs_free_qgroup_config(fs_info);
973 ret = btrfs_clean_quota_tree(trans, quota_root);
977 ret = btrfs_del_root(trans, tree_root, "a_root->root_key);
981 list_del("a_root->dirty_list);
983 btrfs_tree_lock(quota_root->node);
984 clean_tree_block(trans, tree_root->fs_info, quota_root->node);
985 btrfs_tree_unlock(quota_root->node);
986 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
988 free_extent_buffer(quota_root->node);
989 free_extent_buffer(quota_root->commit_root);
992 mutex_unlock(&fs_info->qgroup_ioctl_lock);
996 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
997 struct btrfs_qgroup *qgroup)
999 if (list_empty(&qgroup->dirty))
1000 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1003 int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
1004 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1006 struct btrfs_root *quota_root;
1007 struct btrfs_qgroup *parent;
1008 struct btrfs_qgroup *member;
1009 struct btrfs_qgroup_list *list;
1012 /* Check the level of src and dst first */
1013 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1016 mutex_lock(&fs_info->qgroup_ioctl_lock);
1017 quota_root = fs_info->quota_root;
1022 member = find_qgroup_rb(fs_info, src);
1023 parent = find_qgroup_rb(fs_info, dst);
1024 if (!member || !parent) {
1029 /* check if such qgroup relation exist firstly */
1030 list_for_each_entry(list, &member->groups, next_group) {
1031 if (list->group == parent) {
1037 ret = add_qgroup_relation_item(trans, quota_root, src, dst);
1041 ret = add_qgroup_relation_item(trans, quota_root, dst, src);
1043 del_qgroup_relation_item(trans, quota_root, src, dst);
1047 spin_lock(&fs_info->qgroup_lock);
1048 ret = add_relation_rb(quota_root->fs_info, src, dst);
1049 spin_unlock(&fs_info->qgroup_lock);
1051 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1055 int __del_qgroup_relation(struct btrfs_trans_handle *trans,
1056 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1058 struct btrfs_root *quota_root;
1059 struct btrfs_qgroup *parent;
1060 struct btrfs_qgroup *member;
1061 struct btrfs_qgroup_list *list;
1065 quota_root = fs_info->quota_root;
1071 member = find_qgroup_rb(fs_info, src);
1072 parent = find_qgroup_rb(fs_info, dst);
1073 if (!member || !parent) {
1078 /* check if such qgroup relation exist firstly */
1079 list_for_each_entry(list, &member->groups, next_group) {
1080 if (list->group == parent)
1086 ret = del_qgroup_relation_item(trans, quota_root, src, dst);
1087 err = del_qgroup_relation_item(trans, quota_root, dst, src);
1091 spin_lock(&fs_info->qgroup_lock);
1092 del_relation_rb(fs_info, src, dst);
1093 spin_unlock(&fs_info->qgroup_lock);
1098 int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
1099 struct btrfs_fs_info *fs_info, u64 src, u64 dst)
1103 mutex_lock(&fs_info->qgroup_ioctl_lock);
1104 ret = __del_qgroup_relation(trans, fs_info, src, dst);
1105 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1110 int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
1111 struct btrfs_fs_info *fs_info, u64 qgroupid)
1113 struct btrfs_root *quota_root;
1114 struct btrfs_qgroup *qgroup;
1117 mutex_lock(&fs_info->qgroup_ioctl_lock);
1118 quota_root = fs_info->quota_root;
1123 qgroup = find_qgroup_rb(fs_info, qgroupid);
1129 ret = add_qgroup_item(trans, quota_root, qgroupid);
1133 spin_lock(&fs_info->qgroup_lock);
1134 qgroup = add_qgroup_rb(fs_info, qgroupid);
1135 spin_unlock(&fs_info->qgroup_lock);
1138 ret = PTR_ERR(qgroup);
1140 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1144 int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1145 struct btrfs_fs_info *fs_info, u64 qgroupid)
1147 struct btrfs_root *quota_root;
1148 struct btrfs_qgroup *qgroup;
1149 struct btrfs_qgroup_list *list;
1152 mutex_lock(&fs_info->qgroup_ioctl_lock);
1153 quota_root = fs_info->quota_root;
1159 qgroup = find_qgroup_rb(fs_info, qgroupid);
1164 /* check if there are no children of this qgroup */
1165 if (!list_empty(&qgroup->members)) {
1170 ret = del_qgroup_item(trans, quota_root, qgroupid);
1172 while (!list_empty(&qgroup->groups)) {
1173 list = list_first_entry(&qgroup->groups,
1174 struct btrfs_qgroup_list, next_group);
1175 ret = __del_qgroup_relation(trans, fs_info,
1177 list->group->qgroupid);
1182 spin_lock(&fs_info->qgroup_lock);
1183 del_qgroup_rb(quota_root->fs_info, qgroupid);
1184 spin_unlock(&fs_info->qgroup_lock);
1186 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1190 int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1191 struct btrfs_fs_info *fs_info, u64 qgroupid,
1192 struct btrfs_qgroup_limit *limit)
1194 struct btrfs_root *quota_root;
1195 struct btrfs_qgroup *qgroup;
1198 mutex_lock(&fs_info->qgroup_ioctl_lock);
1199 quota_root = fs_info->quota_root;
1205 qgroup = find_qgroup_rb(fs_info, qgroupid);
1211 spin_lock(&fs_info->qgroup_lock);
1212 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER)
1213 qgroup->max_rfer = limit->max_rfer;
1214 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
1215 qgroup->max_excl = limit->max_excl;
1216 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER)
1217 qgroup->rsv_rfer = limit->rsv_rfer;
1218 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL)
1219 qgroup->rsv_excl = limit->rsv_excl;
1220 qgroup->lim_flags |= limit->flags;
1222 spin_unlock(&fs_info->qgroup_lock);
1224 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
1226 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1227 btrfs_info(fs_info, "unable to update quota limit for %llu",
1232 mutex_unlock(&fs_info->qgroup_ioctl_lock);
1236 static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
1237 struct btrfs_qgroup_operation *oper2)
1240 * Ignore seq and type here, we're looking for any operation
1241 * at all related to this extent on that root.
1243 if (oper1->bytenr < oper2->bytenr)
1245 if (oper1->bytenr > oper2->bytenr)
1247 if (oper1->ref_root < oper2->ref_root)
1249 if (oper1->ref_root > oper2->ref_root)
1254 static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
1255 struct btrfs_qgroup_operation *oper)
1258 struct btrfs_qgroup_operation *cur;
1261 spin_lock(&fs_info->qgroup_op_lock);
1262 n = fs_info->qgroup_op_tree.rb_node;
1264 cur = rb_entry(n, struct btrfs_qgroup_operation, n);
1265 cmp = comp_oper_exist(cur, oper);
1271 spin_unlock(&fs_info->qgroup_op_lock);
1275 spin_unlock(&fs_info->qgroup_op_lock);
1279 static int comp_oper(struct btrfs_qgroup_operation *oper1,
1280 struct btrfs_qgroup_operation *oper2)
1282 if (oper1->bytenr < oper2->bytenr)
1284 if (oper1->bytenr > oper2->bytenr)
1286 if (oper1->ref_root < oper2->ref_root)
1288 if (oper1->ref_root > oper2->ref_root)
1290 if (oper1->seq < oper2->seq)
1292 if (oper1->seq > oper2->seq)
1294 if (oper1->type < oper2->type)
1296 if (oper1->type > oper2->type)
1301 static int insert_qgroup_oper(struct btrfs_fs_info *fs_info,
1302 struct btrfs_qgroup_operation *oper)
1305 struct rb_node *parent = NULL;
1306 struct btrfs_qgroup_operation *cur;
1309 spin_lock(&fs_info->qgroup_op_lock);
1310 p = &fs_info->qgroup_op_tree.rb_node;
1313 cur = rb_entry(parent, struct btrfs_qgroup_operation, n);
1314 cmp = comp_oper(cur, oper);
1316 p = &(*p)->rb_right;
1320 spin_unlock(&fs_info->qgroup_op_lock);
1324 rb_link_node(&oper->n, parent, p);
1325 rb_insert_color(&oper->n, &fs_info->qgroup_op_tree);
1326 spin_unlock(&fs_info->qgroup_op_lock);
1331 * Record a quota operation for processing later on.
1332 * @trans: the transaction we are adding the delayed op to.
1333 * @fs_info: the fs_info for this fs.
1334 * @ref_root: the root of the reference we are acting on,
1335 * @bytenr: the bytenr we are acting on.
1336 * @num_bytes: the number of bytes in the reference.
1337 * @type: the type of operation this is.
1338 * @mod_seq: do we need to get a sequence number for looking up roots.
1340 * We just add it to our trans qgroup_ref_list and carry on and process these
1341 * operations in order at some later point. If the reference root isn't a fs
1342 * root then we don't bother with doing anything.
1344 * MUST BE HOLDING THE REF LOCK.
1346 int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1347 struct btrfs_fs_info *fs_info, u64 ref_root,
1348 u64 bytenr, u64 num_bytes,
1349 enum btrfs_qgroup_operation_type type, int mod_seq)
1351 struct btrfs_qgroup_operation *oper;
1354 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
1357 oper = kmalloc(sizeof(*oper), GFP_NOFS);
1361 oper->ref_root = ref_root;
1362 oper->bytenr = bytenr;
1363 oper->num_bytes = num_bytes;
1365 oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
1366 INIT_LIST_HEAD(&oper->elem.list);
1369 trace_btrfs_qgroup_record_ref(oper);
1371 if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
1373 * If any operation for this bytenr/ref_root combo
1374 * exists, then we know it's not exclusively owned and
1375 * shouldn't be queued up.
1377 * This also catches the case where we have a cloned
1378 * extent that gets queued up multiple times during
1381 if (qgroup_oper_exists(fs_info, oper)) {
1387 ret = insert_qgroup_oper(fs_info, oper);
1389 /* Shouldn't happen so have an assert for developers */
1394 list_add_tail(&oper->list, &trans->qgroup_ref_list);
1397 btrfs_get_tree_mod_seq(fs_info, &oper->elem);
1403 * The easy accounting, if we are adding/removing the only ref for an extent
1404 * then this qgroup and all of the parent qgroups get their refrence and
1405 * exclusive counts adjusted.
1407 static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1408 struct btrfs_qgroup_operation *oper)
1410 struct btrfs_qgroup *qgroup;
1412 struct btrfs_qgroup_list *glist;
1413 struct ulist_node *unode;
1414 struct ulist_iterator uiter;
1418 tmp = ulist_alloc(GFP_NOFS);
1422 spin_lock(&fs_info->qgroup_lock);
1423 if (!fs_info->quota_root)
1425 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1428 switch (oper->type) {
1429 case BTRFS_QGROUP_OPER_ADD_EXCL:
1432 case BTRFS_QGROUP_OPER_SUB_EXCL:
1438 qgroup->rfer += sign * oper->num_bytes;
1439 qgroup->rfer_cmpr += sign * oper->num_bytes;
1441 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1442 qgroup->excl += sign * oper->num_bytes;
1443 qgroup->excl_cmpr += sign * oper->num_bytes;
1445 qgroup->reserved -= oper->num_bytes;
1447 qgroup_dirty(fs_info, qgroup);
1449 /* Get all of the parent groups that contain this qgroup */
1450 list_for_each_entry(glist, &qgroup->groups, next_group) {
1451 ret = ulist_add(tmp, glist->group->qgroupid,
1452 ptr_to_u64(glist->group), GFP_ATOMIC);
1457 /* Iterate all of the parents and adjust their reference counts */
1458 ULIST_ITER_INIT(&uiter);
1459 while ((unode = ulist_next(tmp, &uiter))) {
1460 qgroup = u64_to_ptr(unode->aux);
1461 qgroup->rfer += sign * oper->num_bytes;
1462 qgroup->rfer_cmpr += sign * oper->num_bytes;
1463 WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
1464 qgroup->excl += sign * oper->num_bytes;
1466 qgroup->reserved -= oper->num_bytes;
1467 qgroup->excl_cmpr += sign * oper->num_bytes;
1468 qgroup_dirty(fs_info, qgroup);
1470 /* Add any parents of the parents */
1471 list_for_each_entry(glist, &qgroup->groups, next_group) {
1472 ret = ulist_add(tmp, glist->group->qgroupid,
1473 ptr_to_u64(glist->group), GFP_ATOMIC);
1480 spin_unlock(&fs_info->qgroup_lock);
1486 * Walk all of the roots that pointed to our bytenr and adjust their refcnts as
1489 static int qgroup_calc_old_refcnt(struct btrfs_fs_info *fs_info,
1490 u64 root_to_skip, struct ulist *tmp,
1491 struct ulist *roots, struct ulist *qgroups,
1492 u64 seq, int *old_roots, int rescan)
1494 struct ulist_node *unode;
1495 struct ulist_iterator uiter;
1496 struct ulist_node *tmp_unode;
1497 struct ulist_iterator tmp_uiter;
1498 struct btrfs_qgroup *qg;
1501 ULIST_ITER_INIT(&uiter);
1502 while ((unode = ulist_next(roots, &uiter))) {
1503 /* We don't count our current root here */
1504 if (unode->val == root_to_skip)
1506 qg = find_qgroup_rb(fs_info, unode->val);
1510 * We could have a pending removal of this same ref so we may
1511 * not have actually found our ref root when doing
1512 * btrfs_find_all_roots, so we need to keep track of how many
1513 * old roots we find in case we removed ours and added a
1514 * different one at the same time. I don't think this could
1515 * happen in practice but that sort of thinking leads to pain
1516 * and suffering and to the dark side.
1521 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1525 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
1528 ULIST_ITER_INIT(&tmp_uiter);
1529 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1530 struct btrfs_qgroup_list *glist;
1532 qg = u64_to_ptr(tmp_unode->aux);
1534 * We use this sequence number to keep from having to
1535 * run the whole list and 0 out the refcnt every time.
1536 * We basically use sequnce as the known 0 count and
1537 * then add 1 everytime we see a qgroup. This is how we
1538 * get how many of the roots actually point up to the
1539 * upper level qgroups in order to determine exclusive
1542 * For rescan we want to set old_refcnt to seq so our
1543 * exclusive calculations end up correct.
1546 qg->old_refcnt = seq;
1547 else if (qg->old_refcnt < seq)
1548 qg->old_refcnt = seq + 1;
1552 if (qg->new_refcnt < seq)
1553 qg->new_refcnt = seq + 1;
1556 list_for_each_entry(glist, &qg->groups, next_group) {
1557 ret = ulist_add(qgroups, glist->group->qgroupid,
1558 ptr_to_u64(glist->group),
1562 ret = ulist_add(tmp, glist->group->qgroupid,
1563 ptr_to_u64(glist->group),
1574 * We need to walk forward in our operation tree and account for any roots that
1575 * were deleted after we made this operation.
1577 static int qgroup_account_deleted_refs(struct btrfs_fs_info *fs_info,
1578 struct btrfs_qgroup_operation *oper,
1580 struct ulist *qgroups, u64 seq,
1583 struct ulist_node *unode;
1584 struct ulist_iterator uiter;
1585 struct btrfs_qgroup *qg;
1586 struct btrfs_qgroup_operation *tmp_oper;
1593 * We only walk forward in the tree since we're only interested in
1594 * removals that happened _after_ our operation.
1596 spin_lock(&fs_info->qgroup_op_lock);
1597 n = rb_next(&oper->n);
1598 spin_unlock(&fs_info->qgroup_op_lock);
1601 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1602 while (tmp_oper->bytenr == oper->bytenr) {
1604 * If it's not a removal we don't care, additions work out
1605 * properly with our refcnt tracking.
1607 if (tmp_oper->type != BTRFS_QGROUP_OPER_SUB_SHARED &&
1608 tmp_oper->type != BTRFS_QGROUP_OPER_SUB_EXCL)
1610 qg = find_qgroup_rb(fs_info, tmp_oper->ref_root);
1613 ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
1619 * We only want to increase old_roots if this qgroup is
1620 * not already in the list of qgroups. If it is already
1621 * there then that means it must have been re-added or
1622 * the delete will be discarded because we had an
1623 * existing ref that we haven't looked up yet. In this
1624 * case we don't want to increase old_roots. So if ret
1625 * == 1 then we know that this is the first time we've
1626 * seen this qgroup and we can bump the old_roots.
1629 ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg),
1635 spin_lock(&fs_info->qgroup_op_lock);
1636 n = rb_next(&tmp_oper->n);
1637 spin_unlock(&fs_info->qgroup_op_lock);
1640 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n);
1643 /* Ok now process the qgroups we found */
1644 ULIST_ITER_INIT(&uiter);
1645 while ((unode = ulist_next(tmp, &uiter))) {
1646 struct btrfs_qgroup_list *glist;
1648 qg = u64_to_ptr(unode->aux);
1649 if (qg->old_refcnt < seq)
1650 qg->old_refcnt = seq + 1;
1653 if (qg->new_refcnt < seq)
1654 qg->new_refcnt = seq + 1;
1657 list_for_each_entry(glist, &qg->groups, next_group) {
1658 ret = ulist_add(qgroups, glist->group->qgroupid,
1659 ptr_to_u64(glist->group), GFP_ATOMIC);
1662 ret = ulist_add(tmp, glist->group->qgroupid,
1663 ptr_to_u64(glist->group), GFP_ATOMIC);
1671 /* Add refcnt for the newly added reference. */
1672 static int qgroup_calc_new_refcnt(struct btrfs_fs_info *fs_info,
1673 struct btrfs_qgroup_operation *oper,
1674 struct btrfs_qgroup *qgroup,
1675 struct ulist *tmp, struct ulist *qgroups,
1678 struct ulist_node *unode;
1679 struct ulist_iterator uiter;
1680 struct btrfs_qgroup *qg;
1684 ret = ulist_add(qgroups, qgroup->qgroupid, ptr_to_u64(qgroup),
1688 ret = ulist_add(tmp, qgroup->qgroupid, ptr_to_u64(qgroup),
1692 ULIST_ITER_INIT(&uiter);
1693 while ((unode = ulist_next(tmp, &uiter))) {
1694 struct btrfs_qgroup_list *glist;
1696 qg = u64_to_ptr(unode->aux);
1697 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1698 if (qg->new_refcnt < seq)
1699 qg->new_refcnt = seq + 1;
1703 if (qg->old_refcnt < seq)
1704 qg->old_refcnt = seq + 1;
1708 list_for_each_entry(glist, &qg->groups, next_group) {
1709 ret = ulist_add(tmp, glist->group->qgroupid,
1710 ptr_to_u64(glist->group), GFP_ATOMIC);
1713 ret = ulist_add(qgroups, glist->group->qgroupid,
1714 ptr_to_u64(glist->group), GFP_ATOMIC);
1723 * This adjusts the counters for all referenced qgroups if need be.
1725 static int qgroup_adjust_counters(struct btrfs_fs_info *fs_info,
1726 u64 root_to_skip, u64 num_bytes,
1727 struct ulist *qgroups, u64 seq,
1728 int old_roots, int new_roots, int rescan)
1730 struct ulist_node *unode;
1731 struct ulist_iterator uiter;
1732 struct btrfs_qgroup *qg;
1733 u64 cur_new_count, cur_old_count;
1735 ULIST_ITER_INIT(&uiter);
1736 while ((unode = ulist_next(qgroups, &uiter))) {
1739 qg = u64_to_ptr(unode->aux);
1741 * Wasn't referenced before but is now, add to the reference
1744 if (qg->old_refcnt <= seq && qg->new_refcnt > seq) {
1745 qg->rfer += num_bytes;
1746 qg->rfer_cmpr += num_bytes;
1751 * Was referenced before but isn't now, subtract from the
1752 * reference counters.
1754 if (qg->old_refcnt > seq && qg->new_refcnt <= seq) {
1755 qg->rfer -= num_bytes;
1756 qg->rfer_cmpr -= num_bytes;
1760 if (qg->old_refcnt < seq)
1763 cur_old_count = qg->old_refcnt - seq;
1764 if (qg->new_refcnt < seq)
1767 cur_new_count = qg->new_refcnt - seq;
1770 * If our refcount was the same as the roots previously but our
1771 * new count isn't the same as the number of roots now then we
1772 * went from having a exclusive reference on this range to not.
1774 if (old_roots && cur_old_count == old_roots &&
1775 (cur_new_count != new_roots || new_roots == 0)) {
1776 WARN_ON(cur_new_count != new_roots && new_roots == 0);
1777 qg->excl -= num_bytes;
1778 qg->excl_cmpr -= num_bytes;
1783 * If we didn't reference all the roots before but now we do we
1784 * have an exclusive reference to this range.
1786 if ((!old_roots || (old_roots && cur_old_count != old_roots))
1787 && cur_new_count == new_roots) {
1788 qg->excl += num_bytes;
1789 qg->excl_cmpr += num_bytes;
1794 qgroup_dirty(fs_info, qg);
1800 * If we removed a data extent and there were other references for that bytenr
1801 * then we need to lookup all referenced roots to make sure we still don't
1802 * reference this bytenr. If we do then we can just discard this operation.
1804 static int check_existing_refs(struct btrfs_trans_handle *trans,
1805 struct btrfs_fs_info *fs_info,
1806 struct btrfs_qgroup_operation *oper)
1808 struct ulist *roots = NULL;
1809 struct ulist_node *unode;
1810 struct ulist_iterator uiter;
1813 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
1814 oper->elem.seq, &roots);
1819 ULIST_ITER_INIT(&uiter);
1820 while ((unode = ulist_next(roots, &uiter))) {
1821 if (unode->val == oper->ref_root) {
1827 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
1833 * If we share a reference across multiple roots then we may need to adjust
1834 * various qgroups referenced and exclusive counters. The basic premise is this
1836 * 1) We have seq to represent a 0 count. Instead of looping through all of the
1837 * qgroups and resetting their refcount to 0 we just constantly bump this
1838 * sequence number to act as the base reference count. This means that if
1839 * anybody is equal to or below this sequence they were never referenced. We
1840 * jack this sequence up by the number of roots we found each time in order to
1841 * make sure we don't have any overlap.
1843 * 2) We first search all the roots that reference the area _except_ the root
1844 * we're acting on currently. This makes up the old_refcnt of all the qgroups
1847 * 3) We walk all of the qgroups referenced by the root we are currently acting
1848 * on, and will either adjust old_refcnt in the case of a removal or the
1849 * new_refcnt in the case of an addition.
1851 * 4) Finally we walk all the qgroups that are referenced by this range
1852 * including the root we are acting on currently. We will adjust the counters
1853 * based on the number of roots we had and will have after this operation.
1855 * Take this example as an illustration
1859 * [qg 0/0] [qg 0/1] [qg 0/2]
1863 * Say we are adding a reference that is covered by qg 0/0. The first step
1864 * would give a refcnt of 1 to qg 0/1 and 0/2 and a refcnt of 2 to qg 1/0 with
1865 * old_roots being 2. Because it is adding new_roots will be 1. We then go
1866 * through qg 0/0 which will get the new_refcnt set to 1 and add 1 to qg 1/0's
1867 * new_refcnt, bringing it to 3. We then walk through all of the qgroups, we
1868 * notice that the old refcnt for qg 0/0 < the new refcnt, so we added a
1869 * reference and thus must add the size to the referenced bytes. Everything
1870 * else is the same so nothing else changes.
1872 static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
1873 struct btrfs_fs_info *fs_info,
1874 struct btrfs_qgroup_operation *oper)
1876 struct ulist *roots = NULL;
1877 struct ulist *qgroups, *tmp;
1878 struct btrfs_qgroup *qgroup;
1879 struct seq_list elem = SEQ_LIST_INIT(elem);
1885 if (oper->elem.seq) {
1886 ret = check_existing_refs(trans, fs_info, oper);
1893 qgroups = ulist_alloc(GFP_NOFS);
1897 tmp = ulist_alloc(GFP_NOFS);
1899 ulist_free(qgroups);
1903 btrfs_get_tree_mod_seq(fs_info, &elem);
1904 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr, elem.seq,
1906 btrfs_put_tree_mod_seq(fs_info, &elem);
1908 ulist_free(qgroups);
1912 spin_lock(&fs_info->qgroup_lock);
1913 qgroup = find_qgroup_rb(fs_info, oper->ref_root);
1916 seq = fs_info->qgroup_seq;
1919 * So roots is the list of all the roots currently pointing at the
1920 * bytenr, including the ref we are adding if we are adding, or not if
1921 * we are removing a ref. So we pass in the ref_root to skip that root
1922 * in our calculations. We set old_refnct and new_refcnt cause who the
1923 * hell knows what everything looked like before, and it doesn't matter
1926 ret = qgroup_calc_old_refcnt(fs_info, oper->ref_root, tmp, roots, qgroups,
1927 seq, &old_roots, 0);
1932 * Now adjust the refcounts of the qgroups that care about this
1933 * reference, either the old_count in the case of removal or new_count
1934 * in the case of an addition.
1936 ret = qgroup_calc_new_refcnt(fs_info, oper, qgroup, tmp, qgroups,
1942 * ...in the case of removals. If we had a removal before we got around
1943 * to processing this operation then we need to find that guy and count
1944 * his references as if they really existed so we don't end up screwing
1945 * up the exclusive counts. Then whenever we go to process the delete
1946 * everything will be grand and we can account for whatever exclusive
1947 * changes need to be made there. We also have to pass in old_roots so
1948 * we have an accurate count of the roots as it pertains to this
1949 * operations view of the world.
1951 ret = qgroup_account_deleted_refs(fs_info, oper, tmp, qgroups, seq,
1957 * We are adding our root, need to adjust up the number of roots,
1958 * otherwise old_roots is the number of roots we want.
1960 if (oper->type == BTRFS_QGROUP_OPER_ADD_SHARED) {
1961 new_roots = old_roots + 1;
1963 new_roots = old_roots;
1966 fs_info->qgroup_seq += old_roots + 1;
1970 * And now the magic happens, bless Arne for having a pretty elegant
1971 * solution for this.
1973 qgroup_adjust_counters(fs_info, oper->ref_root, oper->num_bytes,
1974 qgroups, seq, old_roots, new_roots, 0);
1976 spin_unlock(&fs_info->qgroup_lock);
1977 ulist_free(qgroups);
1984 * Process a reference to a shared subtree. This type of operation is
1985 * queued during snapshot removal when we encounter extents which are
1986 * shared between more than one root.
1988 static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
1989 struct btrfs_fs_info *fs_info,
1990 struct btrfs_qgroup_operation *oper)
1992 struct ulist *roots = NULL;
1993 struct ulist_node *unode;
1994 struct ulist_iterator uiter;
1995 struct btrfs_qgroup_list *glist;
1996 struct ulist *parents;
1999 struct btrfs_qgroup *qg;
2001 struct seq_list elem = SEQ_LIST_INIT(elem);
2003 parents = ulist_alloc(GFP_NOFS);
2007 btrfs_get_tree_mod_seq(fs_info, &elem);
2008 ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
2010 btrfs_put_tree_mod_seq(fs_info, &elem);
2014 if (roots->nnodes != 1)
2017 ULIST_ITER_INIT(&uiter);
2018 unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
2020 * If we find our ref root then that means all refs
2021 * this extent has to the root have not yet been
2022 * deleted. In that case, we do nothing and let the
2023 * last ref for this bytenr drive our update.
2025 * This can happen for example if an extent is
2026 * referenced multiple times in a snapshot (clone,
2027 * etc). If we are in the middle of snapshot removal,
2028 * queued updates for such an extent will find the
2029 * root if we have not yet finished removing the
2032 if (unode->val == oper->ref_root)
2035 root_obj = unode->val;
2038 spin_lock(&fs_info->qgroup_lock);
2039 qg = find_qgroup_rb(fs_info, root_obj);
2043 qg->excl += oper->num_bytes;
2044 qg->excl_cmpr += oper->num_bytes;
2045 qgroup_dirty(fs_info, qg);
2048 * Adjust counts for parent groups. First we find all
2049 * parents, then in the 2nd loop we do the adjustment
2050 * while adding parents of the parents to our ulist.
2052 list_for_each_entry(glist, &qg->groups, next_group) {
2053 err = ulist_add(parents, glist->group->qgroupid,
2054 ptr_to_u64(glist->group), GFP_ATOMIC);
2061 ULIST_ITER_INIT(&uiter);
2062 while ((unode = ulist_next(parents, &uiter))) {
2063 qg = u64_to_ptr(unode->aux);
2064 qg->excl += oper->num_bytes;
2065 qg->excl_cmpr += oper->num_bytes;
2066 qgroup_dirty(fs_info, qg);
2068 /* Add any parents of the parents */
2069 list_for_each_entry(glist, &qg->groups, next_group) {
2070 err = ulist_add(parents, glist->group->qgroupid,
2071 ptr_to_u64(glist->group), GFP_ATOMIC);
2080 spin_unlock(&fs_info->qgroup_lock);
2084 ulist_free(parents);
2089 * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
2090 * from the fs. First, all roots referencing the extent are searched, and
2091 * then the space is accounted accordingly to the different roots. The
2092 * accounting algorithm works in 3 steps documented inline.
2094 static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
2095 struct btrfs_fs_info *fs_info,
2096 struct btrfs_qgroup_operation *oper)
2100 if (!fs_info->quota_enabled)
2103 BUG_ON(!fs_info->quota_root);
2105 mutex_lock(&fs_info->qgroup_rescan_lock);
2106 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2107 if (fs_info->qgroup_rescan_progress.objectid <= oper->bytenr) {
2108 mutex_unlock(&fs_info->qgroup_rescan_lock);
2112 mutex_unlock(&fs_info->qgroup_rescan_lock);
2114 ASSERT(is_fstree(oper->ref_root));
2116 trace_btrfs_qgroup_account(oper);
2118 switch (oper->type) {
2119 case BTRFS_QGROUP_OPER_ADD_EXCL:
2120 case BTRFS_QGROUP_OPER_SUB_EXCL:
2121 ret = qgroup_excl_accounting(fs_info, oper);
2123 case BTRFS_QGROUP_OPER_ADD_SHARED:
2124 case BTRFS_QGROUP_OPER_SUB_SHARED:
2125 ret = qgroup_shared_accounting(trans, fs_info, oper);
2127 case BTRFS_QGROUP_OPER_SUB_SUBTREE:
2128 ret = qgroup_subtree_accounting(trans, fs_info, oper);
2137 * Needs to be called everytime we run delayed refs, even if there is an error
2138 * in order to cleanup outstanding operations.
2140 int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
2141 struct btrfs_fs_info *fs_info)
2143 struct btrfs_qgroup_operation *oper;
2146 while (!list_empty(&trans->qgroup_ref_list)) {
2147 oper = list_first_entry(&trans->qgroup_ref_list,
2148 struct btrfs_qgroup_operation, list);
2149 list_del_init(&oper->list);
2150 if (!ret || !trans->aborted)
2151 ret = btrfs_qgroup_account(trans, fs_info, oper);
2152 spin_lock(&fs_info->qgroup_op_lock);
2153 rb_erase(&oper->n, &fs_info->qgroup_op_tree);
2154 spin_unlock(&fs_info->qgroup_op_lock);
2155 btrfs_put_tree_mod_seq(fs_info, &oper->elem);
2162 * called from commit_transaction. Writes all changed qgroups to disk.
2164 int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2165 struct btrfs_fs_info *fs_info)
2167 struct btrfs_root *quota_root = fs_info->quota_root;
2169 int start_rescan_worker = 0;
2174 if (!fs_info->quota_enabled && fs_info->pending_quota_state)
2175 start_rescan_worker = 1;
2177 fs_info->quota_enabled = fs_info->pending_quota_state;
2179 spin_lock(&fs_info->qgroup_lock);
2180 while (!list_empty(&fs_info->dirty_qgroups)) {
2181 struct btrfs_qgroup *qgroup;
2182 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2183 struct btrfs_qgroup, dirty);
2184 list_del_init(&qgroup->dirty);
2185 spin_unlock(&fs_info->qgroup_lock);
2186 ret = update_qgroup_info_item(trans, quota_root, qgroup);
2188 fs_info->qgroup_flags |=
2189 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2190 ret = update_qgroup_limit_item(trans, quota_root, qgroup);
2192 fs_info->qgroup_flags |=
2193 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2194 spin_lock(&fs_info->qgroup_lock);
2196 if (fs_info->quota_enabled)
2197 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2199 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2200 spin_unlock(&fs_info->qgroup_lock);
2202 ret = update_qgroup_status_item(trans, fs_info, quota_root);
2204 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2206 if (!ret && start_rescan_worker) {
2207 ret = qgroup_rescan_init(fs_info, 0, 1);
2209 qgroup_rescan_zero_tracking(fs_info);
2210 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2211 &fs_info->qgroup_rescan_work);
2222 * copy the acounting information between qgroups. This is necessary when a
2223 * snapshot or a subvolume is created
2225 int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
2226 struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
2227 struct btrfs_qgroup_inherit *inherit)
2232 struct btrfs_root *quota_root = fs_info->quota_root;
2233 struct btrfs_qgroup *srcgroup;
2234 struct btrfs_qgroup *dstgroup;
2238 mutex_lock(&fs_info->qgroup_ioctl_lock);
2239 if (!fs_info->quota_enabled)
2248 i_qgroups = (u64 *)(inherit + 1);
2249 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2250 2 * inherit->num_excl_copies;
2251 for (i = 0; i < nums; ++i) {
2252 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
2258 if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) {
2267 * create a tracking group for the subvol itself
2269 ret = add_qgroup_item(trans, quota_root, objectid);
2274 struct btrfs_root *srcroot;
2275 struct btrfs_key srckey;
2277 srckey.objectid = srcid;
2278 srckey.type = BTRFS_ROOT_ITEM_KEY;
2279 srckey.offset = (u64)-1;
2280 srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
2281 if (IS_ERR(srcroot)) {
2282 ret = PTR_ERR(srcroot);
2287 level_size = srcroot->nodesize;
2292 * add qgroup to all inherited groups
2295 i_qgroups = (u64 *)(inherit + 1);
2296 for (i = 0; i < inherit->num_qgroups; ++i) {
2297 ret = add_qgroup_relation_item(trans, quota_root,
2298 objectid, *i_qgroups);
2301 ret = add_qgroup_relation_item(trans, quota_root,
2302 *i_qgroups, objectid);
2310 spin_lock(&fs_info->qgroup_lock);
2312 dstgroup = add_qgroup_rb(fs_info, objectid);
2313 if (IS_ERR(dstgroup)) {
2314 ret = PTR_ERR(dstgroup);
2318 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
2319 dstgroup->lim_flags = inherit->lim.flags;
2320 dstgroup->max_rfer = inherit->lim.max_rfer;
2321 dstgroup->max_excl = inherit->lim.max_excl;
2322 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2323 dstgroup->rsv_excl = inherit->lim.rsv_excl;
2325 ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
2327 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2328 btrfs_info(fs_info, "unable to update quota limit for %llu",
2329 dstgroup->qgroupid);
2335 srcgroup = find_qgroup_rb(fs_info, srcid);
2340 * We call inherit after we clone the root in order to make sure
2341 * our counts don't go crazy, so at this point the only
2342 * difference between the two roots should be the root node.
2344 dstgroup->rfer = srcgroup->rfer;
2345 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2346 dstgroup->excl = level_size;
2347 dstgroup->excl_cmpr = level_size;
2348 srcgroup->excl = level_size;
2349 srcgroup->excl_cmpr = level_size;
2351 /* inherit the limit info */
2352 dstgroup->lim_flags = srcgroup->lim_flags;
2353 dstgroup->max_rfer = srcgroup->max_rfer;
2354 dstgroup->max_excl = srcgroup->max_excl;
2355 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2356 dstgroup->rsv_excl = srcgroup->rsv_excl;
2358 qgroup_dirty(fs_info, dstgroup);
2359 qgroup_dirty(fs_info, srcgroup);
2365 i_qgroups = (u64 *)(inherit + 1);
2366 for (i = 0; i < inherit->num_qgroups; ++i) {
2367 ret = add_relation_rb(quota_root->fs_info, objectid,
2374 for (i = 0; i < inherit->num_ref_copies; ++i) {
2375 struct btrfs_qgroup *src;
2376 struct btrfs_qgroup *dst;
2378 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2379 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2386 dst->rfer = src->rfer - level_size;
2387 dst->rfer_cmpr = src->rfer_cmpr - level_size;
2390 for (i = 0; i < inherit->num_excl_copies; ++i) {
2391 struct btrfs_qgroup *src;
2392 struct btrfs_qgroup *dst;
2394 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2395 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2402 dst->excl = src->excl + level_size;
2403 dst->excl_cmpr = src->excl_cmpr + level_size;
2408 spin_unlock(&fs_info->qgroup_lock);
2410 mutex_unlock(&fs_info->qgroup_ioctl_lock);
2414 int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
2416 struct btrfs_root *quota_root;
2417 struct btrfs_qgroup *qgroup;
2418 struct btrfs_fs_info *fs_info = root->fs_info;
2419 u64 ref_root = root->root_key.objectid;
2421 struct ulist_node *unode;
2422 struct ulist_iterator uiter;
2424 if (!is_fstree(ref_root))
2430 spin_lock(&fs_info->qgroup_lock);
2431 quota_root = fs_info->quota_root;
2435 qgroup = find_qgroup_rb(fs_info, ref_root);
2440 * in a first step, we check all affected qgroups if any limits would
2443 ulist_reinit(fs_info->qgroup_ulist);
2444 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2445 (uintptr_t)qgroup, GFP_ATOMIC);
2448 ULIST_ITER_INIT(&uiter);
2449 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2450 struct btrfs_qgroup *qg;
2451 struct btrfs_qgroup_list *glist;
2453 qg = u64_to_ptr(unode->aux);
2455 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
2456 qg->reserved + (s64)qg->rfer + num_bytes >
2462 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
2463 qg->reserved + (s64)qg->excl + num_bytes >
2469 list_for_each_entry(glist, &qg->groups, next_group) {
2470 ret = ulist_add(fs_info->qgroup_ulist,
2471 glist->group->qgroupid,
2472 (uintptr_t)glist->group, GFP_ATOMIC);
2479 * no limits exceeded, now record the reservation into all qgroups
2481 ULIST_ITER_INIT(&uiter);
2482 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2483 struct btrfs_qgroup *qg;
2485 qg = u64_to_ptr(unode->aux);
2487 qg->reserved += num_bytes;
2491 spin_unlock(&fs_info->qgroup_lock);
2495 void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
2497 struct btrfs_root *quota_root;
2498 struct btrfs_qgroup *qgroup;
2499 struct btrfs_fs_info *fs_info = root->fs_info;
2500 struct ulist_node *unode;
2501 struct ulist_iterator uiter;
2502 u64 ref_root = root->root_key.objectid;
2505 if (!is_fstree(ref_root))
2511 spin_lock(&fs_info->qgroup_lock);
2513 quota_root = fs_info->quota_root;
2517 qgroup = find_qgroup_rb(fs_info, ref_root);
2521 ulist_reinit(fs_info->qgroup_ulist);
2522 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
2523 (uintptr_t)qgroup, GFP_ATOMIC);
2526 ULIST_ITER_INIT(&uiter);
2527 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
2528 struct btrfs_qgroup *qg;
2529 struct btrfs_qgroup_list *glist;
2531 qg = u64_to_ptr(unode->aux);
2533 qg->reserved -= num_bytes;
2535 list_for_each_entry(glist, &qg->groups, next_group) {
2536 ret = ulist_add(fs_info->qgroup_ulist,
2537 glist->group->qgroupid,
2538 (uintptr_t)glist->group, GFP_ATOMIC);
2545 spin_unlock(&fs_info->qgroup_lock);
2548 void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
2550 if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
2552 btrfs_err(trans->root->fs_info,
2553 "qgroups not uptodate in trans handle %p: list is%s empty, "
2555 trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
2556 (u32)(trans->delayed_ref_elem.seq >> 32),
2557 (u32)trans->delayed_ref_elem.seq);
2562 * returns < 0 on error, 0 when more leafs are to be scanned.
2563 * returns 1 when done.
2566 qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
2567 struct btrfs_trans_handle *trans, struct ulist *qgroups,
2568 struct ulist *tmp, struct extent_buffer *scratch_leaf)
2570 struct btrfs_key found;
2571 struct ulist *roots = NULL;
2572 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2579 path->leave_spinning = 1;
2580 mutex_lock(&fs_info->qgroup_rescan_lock);
2581 ret = btrfs_search_slot_for_read(fs_info->extent_root,
2582 &fs_info->qgroup_rescan_progress,
2585 pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
2586 fs_info->qgroup_rescan_progress.objectid,
2587 fs_info->qgroup_rescan_progress.type,
2588 fs_info->qgroup_rescan_progress.offset, ret);
2592 * The rescan is about to end, we will not be scanning any
2593 * further blocks. We cannot unset the RESCAN flag here, because
2594 * we want to commit the transaction if everything went well.
2595 * To make the live accounting work in this phase, we set our
2596 * scan progress pointer such that every real extent objectid
2599 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
2600 btrfs_release_path(path);
2601 mutex_unlock(&fs_info->qgroup_rescan_lock);
2605 btrfs_item_key_to_cpu(path->nodes[0], &found,
2606 btrfs_header_nritems(path->nodes[0]) - 1);
2607 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
2609 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2610 memcpy(scratch_leaf, path->nodes[0], sizeof(*scratch_leaf));
2611 slot = path->slots[0];
2612 btrfs_release_path(path);
2613 mutex_unlock(&fs_info->qgroup_rescan_lock);
2615 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
2616 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
2617 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
2618 found.type != BTRFS_METADATA_ITEM_KEY)
2620 if (found.type == BTRFS_METADATA_ITEM_KEY)
2621 num_bytes = fs_info->extent_root->nodesize;
2623 num_bytes = found.offset;
2625 ulist_reinit(qgroups);
2626 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
2630 spin_lock(&fs_info->qgroup_lock);
2631 seq = fs_info->qgroup_seq;
2632 fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
2635 ret = qgroup_calc_old_refcnt(fs_info, 0, tmp, roots, qgroups,
2636 seq, &new_roots, 1);
2638 spin_unlock(&fs_info->qgroup_lock);
2643 ret = qgroup_adjust_counters(fs_info, 0, num_bytes, qgroups,
2644 seq, 0, new_roots, 1);
2646 spin_unlock(&fs_info->qgroup_lock);
2650 spin_unlock(&fs_info->qgroup_lock);
2654 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2659 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
2661 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
2662 qgroup_rescan_work);
2663 struct btrfs_path *path;
2664 struct btrfs_trans_handle *trans = NULL;
2665 struct ulist *tmp = NULL, *qgroups = NULL;
2666 struct extent_buffer *scratch_leaf = NULL;
2670 path = btrfs_alloc_path();
2673 qgroups = ulist_alloc(GFP_NOFS);
2676 tmp = ulist_alloc(GFP_NOFS);
2679 scratch_leaf = kmalloc(sizeof(*scratch_leaf), GFP_NOFS);
2685 trans = btrfs_start_transaction(fs_info->fs_root, 0);
2686 if (IS_ERR(trans)) {
2687 err = PTR_ERR(trans);
2690 if (!fs_info->quota_enabled) {
2693 err = qgroup_rescan_leaf(fs_info, path, trans,
2694 qgroups, tmp, scratch_leaf);
2697 btrfs_commit_transaction(trans, fs_info->fs_root);
2699 btrfs_end_transaction(trans, fs_info->fs_root);
2703 kfree(scratch_leaf);
2704 ulist_free(qgroups);
2706 btrfs_free_path(path);
2708 mutex_lock(&fs_info->qgroup_rescan_lock);
2709 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2712 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
2713 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2714 } else if (err < 0) {
2715 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2717 mutex_unlock(&fs_info->qgroup_rescan_lock);
2720 * only update status, since the previous part has alreay updated the
2723 trans = btrfs_start_transaction(fs_info->quota_root, 1);
2724 if (IS_ERR(trans)) {
2725 err = PTR_ERR(trans);
2727 "fail to start transaction for status update: %d\n",
2731 ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
2734 btrfs_err(fs_info, "fail to update qgroup status: %d\n",
2736 btrfs_abort_transaction(trans, fs_info->quota_root, err);
2739 btrfs_end_transaction(trans, fs_info->quota_root);
2742 btrfs_info(fs_info, "qgroup scan completed%s",
2743 err > 0 ? " (inconsistency flag cleared)" : "");
2745 btrfs_err(fs_info, "qgroup scan failed with %d", err);
2749 complete_all(&fs_info->qgroup_rescan_completion);
2753 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
2754 * memory required for the rescan context.
2757 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
2763 (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
2764 !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
2769 mutex_lock(&fs_info->qgroup_rescan_lock);
2770 spin_lock(&fs_info->qgroup_lock);
2773 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2775 else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
2779 spin_unlock(&fs_info->qgroup_lock);
2780 mutex_unlock(&fs_info->qgroup_rescan_lock);
2783 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2786 memset(&fs_info->qgroup_rescan_progress, 0,
2787 sizeof(fs_info->qgroup_rescan_progress));
2788 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
2790 spin_unlock(&fs_info->qgroup_lock);
2791 mutex_unlock(&fs_info->qgroup_rescan_lock);
2793 init_completion(&fs_info->qgroup_rescan_completion);
2795 memset(&fs_info->qgroup_rescan_work, 0,
2796 sizeof(fs_info->qgroup_rescan_work));
2797 btrfs_init_work(&fs_info->qgroup_rescan_work,
2798 btrfs_qgroup_rescan_helper,
2799 btrfs_qgroup_rescan_worker, NULL, NULL);
2803 btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
2811 qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
2814 struct btrfs_qgroup *qgroup;
2816 spin_lock(&fs_info->qgroup_lock);
2817 /* clear all current qgroup tracking information */
2818 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
2819 qgroup = rb_entry(n, struct btrfs_qgroup, node);
2821 qgroup->rfer_cmpr = 0;
2823 qgroup->excl_cmpr = 0;
2825 spin_unlock(&fs_info->qgroup_lock);
2829 btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
2832 struct btrfs_trans_handle *trans;
2834 ret = qgroup_rescan_init(fs_info, 0, 1);
2839 * We have set the rescan_progress to 0, which means no more
2840 * delayed refs will be accounted by btrfs_qgroup_account_ref.
2841 * However, btrfs_qgroup_account_ref may be right after its call
2842 * to btrfs_find_all_roots, in which case it would still do the
2844 * To solve this, we're committing the transaction, which will
2845 * ensure we run all delayed refs and only after that, we are
2846 * going to clear all tracking information for a clean start.
2849 trans = btrfs_join_transaction(fs_info->fs_root);
2850 if (IS_ERR(trans)) {
2851 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2852 return PTR_ERR(trans);
2854 ret = btrfs_commit_transaction(trans, fs_info->fs_root);
2856 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2860 qgroup_rescan_zero_tracking(fs_info);
2862 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2863 &fs_info->qgroup_rescan_work);
2868 int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
2873 mutex_lock(&fs_info->qgroup_rescan_lock);
2874 spin_lock(&fs_info->qgroup_lock);
2875 running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
2876 spin_unlock(&fs_info->qgroup_lock);
2877 mutex_unlock(&fs_info->qgroup_rescan_lock);
2880 ret = wait_for_completion_interruptible(
2881 &fs_info->qgroup_rescan_completion);
2887 * this is only called from open_ctree where we're still single threaded, thus
2888 * locking is omitted here.
2891 btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
2893 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
2894 btrfs_queue_work(fs_info->qgroup_rescan_workers,
2895 &fs_info->qgroup_rescan_work);