2 * f2fs extent cache support
4 * Copyright (c) 2015 Motorola Mobility
5 * Copyright (c) 2015 Samsung Electronics
6 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
7 * Chao Yu <chao2.yu@samsung.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
15 #include <linux/f2fs_fs.h>
19 #include <trace/events/f2fs.h>
21 static struct kmem_cache *extent_tree_slab;
22 static struct kmem_cache *extent_node_slab;
24 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
25 struct extent_tree *et, struct extent_info *ei,
26 struct rb_node *parent, struct rb_node **p)
28 struct extent_node *en;
30 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
35 INIT_LIST_HEAD(&en->list);
37 rb_link_node(&en->rb_node, parent, p);
38 rb_insert_color(&en->rb_node, &et->root);
40 atomic_inc(&sbi->total_ext_node);
44 static void __detach_extent_node(struct f2fs_sb_info *sbi,
45 struct extent_tree *et, struct extent_node *en)
47 rb_erase(&en->rb_node, &et->root);
49 atomic_dec(&sbi->total_ext_node);
51 if (et->cached_en == en)
55 static struct extent_tree *__grab_extent_tree(struct inode *inode)
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
58 struct extent_tree *et;
59 nid_t ino = inode->i_ino;
61 down_write(&sbi->extent_tree_lock);
62 et = radix_tree_lookup(&sbi->extent_tree_root, ino);
64 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
65 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
66 memset(et, 0, sizeof(struct extent_tree));
70 rwlock_init(&et->lock);
71 atomic_set(&et->refcount, 0);
73 sbi->total_ext_tree++;
75 atomic_inc(&et->refcount);
76 up_write(&sbi->extent_tree_lock);
78 /* never died until evict_inode */
79 F2FS_I(inode)->extent_tree = et;
84 static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
87 struct rb_node *node = et->root.rb_node;
88 struct extent_node *en;
91 struct extent_info *cei = &et->cached_en->ei;
93 if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
98 en = rb_entry(node, struct extent_node, rb_node);
100 if (fofs < en->ei.fofs)
101 node = node->rb_left;
102 else if (fofs >= en->ei.fofs + en->ei.len)
103 node = node->rb_right;
110 static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
111 struct extent_tree *et, struct extent_node *en)
113 struct extent_node *prev;
114 struct rb_node *node;
116 node = rb_prev(&en->rb_node);
120 prev = rb_entry(node, struct extent_node, rb_node);
121 if (__is_back_mergeable(&en->ei, &prev->ei)) {
122 en->ei.fofs = prev->ei.fofs;
123 en->ei.blk = prev->ei.blk;
124 en->ei.len += prev->ei.len;
125 __detach_extent_node(sbi, et, prev);
131 static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
132 struct extent_tree *et, struct extent_node *en)
134 struct extent_node *next;
135 struct rb_node *node;
137 node = rb_next(&en->rb_node);
141 next = rb_entry(node, struct extent_node, rb_node);
142 if (__is_front_mergeable(&en->ei, &next->ei)) {
143 en->ei.len += next->ei.len;
144 __detach_extent_node(sbi, et, next);
150 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
151 struct extent_tree *et, struct extent_info *ei,
152 struct extent_node **den)
154 struct rb_node **p = &et->root.rb_node;
155 struct rb_node *parent = NULL;
156 struct extent_node *en;
160 en = rb_entry(parent, struct extent_node, rb_node);
162 if (ei->fofs < en->ei.fofs) {
163 if (__is_front_mergeable(ei, &en->ei)) {
164 f2fs_bug_on(sbi, !den);
165 en->ei.fofs = ei->fofs;
166 en->ei.blk = ei->blk;
167 en->ei.len += ei->len;
168 *den = __try_back_merge(sbi, et, en);
172 } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
173 if (__is_back_mergeable(ei, &en->ei)) {
174 f2fs_bug_on(sbi, !den);
175 en->ei.len += ei->len;
176 *den = __try_front_merge(sbi, et, en);
185 en = __attach_extent_node(sbi, et, ei, parent, p);
189 if (en->ei.len > et->largest.len)
190 et->largest = en->ei;
195 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
196 struct extent_tree *et, bool free_all)
198 struct rb_node *node, *next;
199 struct extent_node *en;
200 unsigned int count = et->count;
202 node = rb_first(&et->root);
204 next = rb_next(node);
205 en = rb_entry(node, struct extent_node, rb_node);
208 spin_lock(&sbi->extent_lock);
209 if (!list_empty(&en->list))
210 list_del_init(&en->list);
211 spin_unlock(&sbi->extent_lock);
214 if (free_all || list_empty(&en->list)) {
215 __detach_extent_node(sbi, et, en);
216 kmem_cache_free(extent_node_slab, en);
221 return count - et->count;
224 void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
226 struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
228 if (largest->fofs <= fofs && largest->fofs + largest->len > fofs)
232 void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
234 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
235 struct extent_tree *et;
236 struct extent_node *en;
237 struct extent_info ei;
239 if (!f2fs_may_extent_tree(inode))
242 et = __grab_extent_tree(inode);
244 if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
247 set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
248 le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
250 write_lock(&et->lock);
254 en = __insert_extent_tree(sbi, et, &ei, NULL);
256 spin_lock(&sbi->extent_lock);
257 list_add_tail(&en->list, &sbi->extent_list);
258 spin_unlock(&sbi->extent_lock);
261 write_unlock(&et->lock);
264 static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
265 struct extent_info *ei)
267 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
268 struct extent_tree *et = F2FS_I(inode)->extent_tree;
269 struct extent_node *en;
272 f2fs_bug_on(sbi, !et);
274 trace_f2fs_lookup_extent_tree_start(inode, pgofs);
276 read_lock(&et->lock);
278 if (et->largest.fofs <= pgofs &&
279 et->largest.fofs + et->largest.len > pgofs) {
282 stat_inc_read_hit(sbi->sb);
286 en = __lookup_extent_tree(et, pgofs);
289 spin_lock(&sbi->extent_lock);
290 if (!list_empty(&en->list))
291 list_move_tail(&en->list, &sbi->extent_list);
293 spin_unlock(&sbi->extent_lock);
295 stat_inc_read_hit(sbi->sb);
298 stat_inc_total_hit(sbi->sb);
299 read_unlock(&et->lock);
301 trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
305 /* return true, if on-disk extent should be updated */
306 static bool f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
309 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
310 struct extent_tree *et = F2FS_I(inode)->extent_tree;
311 struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
312 struct extent_node *den = NULL;
313 struct extent_info ei, dei, prev;
319 trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
321 write_lock(&et->lock);
323 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
324 write_unlock(&et->lock);
331 /* we do not guarantee that the largest extent is cached all the time */
332 f2fs_drop_largest_extent(inode, fofs);
334 /* 1. lookup and remove existing extent info in cache */
335 en = __lookup_extent_tree(et, fofs);
340 __detach_extent_node(sbi, et, en);
342 /* 2. if extent can be split more, split and insert the left part */
343 if (dei.len > F2FS_MIN_EXTENT_LEN) {
344 /* insert left part of split extent into cache */
345 if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
346 set_extent_info(&ei, dei.fofs, dei.blk,
348 en1 = __insert_extent_tree(sbi, et, &ei, NULL);
351 /* insert right part of split extent into cache */
352 endofs = dei.fofs + dei.len - 1;
353 if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
354 set_extent_info(&ei, fofs + 1,
355 fofs - dei.fofs + dei.blk + 1, endofs - fofs);
356 en2 = __insert_extent_tree(sbi, et, &ei, NULL);
361 /* 3. update extent in extent cache */
363 set_extent_info(&ei, fofs, blkaddr, 1);
364 en3 = __insert_extent_tree(sbi, et, &ei, &den);
366 /* give up extent_cache, if split and small updates happen */
368 prev.len < F2FS_MIN_EXTENT_LEN &&
369 et->largest.len < F2FS_MIN_EXTENT_LEN) {
371 set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
375 /* 4. update in global extent list */
376 spin_lock(&sbi->extent_lock);
377 if (en && !list_empty(&en->list))
380 * en1 and en2 split from en, they will become more and more smaller
381 * fragments after splitting several times. So if the length is smaller
382 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
385 list_add_tail(&en1->list, &sbi->extent_list);
387 list_add_tail(&en2->list, &sbi->extent_list);
389 if (list_empty(&en3->list))
390 list_add_tail(&en3->list, &sbi->extent_list);
392 list_move_tail(&en3->list, &sbi->extent_list);
394 if (den && !list_empty(&den->list))
395 list_del(&den->list);
396 spin_unlock(&sbi->extent_lock);
398 /* 5. release extent node */
400 kmem_cache_free(extent_node_slab, en);
402 kmem_cache_free(extent_node_slab, den);
404 if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
405 __free_extent_tree(sbi, et, true);
407 write_unlock(&et->lock);
409 return !__is_extent_same(&prev, &et->largest);
412 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
414 struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
415 struct extent_node *en, *tmp;
416 unsigned long ino = F2FS_ROOT_INO(sbi);
417 struct radix_tree_root *root = &sbi->extent_tree_root;
419 unsigned int node_cnt = 0, tree_cnt = 0;
422 if (!test_opt(sbi, EXTENT_CACHE))
425 if (!down_write_trylock(&sbi->extent_tree_lock))
428 /* 1. remove unreferenced extent tree */
429 while ((found = radix_tree_gang_lookup(root,
430 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
433 ino = treevec[found - 1]->ino + 1;
434 for (i = 0; i < found; i++) {
435 struct extent_tree *et = treevec[i];
437 if (!atomic_read(&et->refcount)) {
438 write_lock(&et->lock);
439 node_cnt += __free_extent_tree(sbi, et, true);
440 write_unlock(&et->lock);
442 radix_tree_delete(root, et->ino);
443 kmem_cache_free(extent_tree_slab, et);
444 sbi->total_ext_tree--;
447 if (node_cnt + tree_cnt >= nr_shrink)
452 up_write(&sbi->extent_tree_lock);
454 /* 2. remove LRU extent entries */
455 if (!down_write_trylock(&sbi->extent_tree_lock))
458 remained = nr_shrink - (node_cnt + tree_cnt);
460 spin_lock(&sbi->extent_lock);
461 list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
464 list_del_init(&en->list);
466 spin_unlock(&sbi->extent_lock);
468 while ((found = radix_tree_gang_lookup(root,
469 (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
472 ino = treevec[found - 1]->ino + 1;
473 for (i = 0; i < found; i++) {
474 struct extent_tree *et = treevec[i];
476 write_lock(&et->lock);
477 node_cnt += __free_extent_tree(sbi, et, false);
478 write_unlock(&et->lock);
480 if (node_cnt + tree_cnt >= nr_shrink)
485 up_write(&sbi->extent_tree_lock);
487 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
489 return node_cnt + tree_cnt;
492 unsigned int f2fs_destroy_extent_node(struct inode *inode)
494 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
495 struct extent_tree *et = F2FS_I(inode)->extent_tree;
496 unsigned int node_cnt = 0;
501 write_lock(&et->lock);
502 node_cnt = __free_extent_tree(sbi, et, true);
503 write_unlock(&et->lock);
508 void f2fs_destroy_extent_tree(struct inode *inode)
510 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
511 struct extent_tree *et = F2FS_I(inode)->extent_tree;
512 unsigned int node_cnt = 0;
517 if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
518 atomic_dec(&et->refcount);
522 /* free all extent info belong to this extent tree */
523 node_cnt = f2fs_destroy_extent_node(inode);
525 /* delete extent tree entry in radix tree */
526 down_write(&sbi->extent_tree_lock);
527 atomic_dec(&et->refcount);
528 f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
529 radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
530 kmem_cache_free(extent_tree_slab, et);
531 sbi->total_ext_tree--;
532 up_write(&sbi->extent_tree_lock);
534 F2FS_I(inode)->extent_tree = NULL;
536 trace_f2fs_destroy_extent_tree(inode, node_cnt);
539 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
540 struct extent_info *ei)
542 if (!f2fs_may_extent_tree(inode))
545 return f2fs_lookup_extent_tree(inode, pgofs, ei);
548 void f2fs_update_extent_cache(struct dnode_of_data *dn)
550 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
553 if (!f2fs_may_extent_tree(dn->inode))
556 f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
558 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
561 if (f2fs_update_extent_tree(dn->inode, fofs, dn->data_blkaddr))
565 void init_extent_cache_info(struct f2fs_sb_info *sbi)
567 INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
568 init_rwsem(&sbi->extent_tree_lock);
569 INIT_LIST_HEAD(&sbi->extent_list);
570 spin_lock_init(&sbi->extent_lock);
571 sbi->total_ext_tree = 0;
572 atomic_set(&sbi->total_ext_node, 0);
575 int __init create_extent_cache(void)
577 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
578 sizeof(struct extent_tree));
579 if (!extent_tree_slab)
581 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
582 sizeof(struct extent_node));
583 if (!extent_node_slab) {
584 kmem_cache_destroy(extent_tree_slab);
590 void destroy_extent_cache(void)
592 kmem_cache_destroy(extent_node_slab);
593 kmem_cache_destroy(extent_tree_slab);