2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
28 #include <linux/slab.h>
29 #include <linux/bitops.h>
30 #include <linux/freezer.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <trace/events/bcache.h>
40 * register_bcache: Return errors out to userspace correctly
42 * Writeback: don't undirty key until after a cache flush
44 * Create an iterator for key pointers
46 * On btree write error, mark bucket such that it won't be freed from the cache
49 * Check for bad keys in replay
51 * Refcount journal entries in journal_replay
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
65 * Add a tracepoint or somesuch to watch for writeback starvation
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
71 * Make sure all allocations get charged to the root cgroup
75 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector
78 * Also lookup by cgroup in get_open_bucket()
80 * Superblock needs to be fleshed out for multiple cache devices
82 * Add a sysfs tunable for the number of writeback IOs in flight
84 * Add a sysfs tunable for the number of open data buckets
86 * IO tracking: Can we track when one process is doing io on behalf of another?
87 * IO tracking: Don't use just an average, weigh more recent stuff higher
89 * Test module load/unload
92 #define MAX_NEED_GC 64
93 #define MAX_SAVE_PRIO 72
95 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
97 #define PTR_HASH(c, k) \
98 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
100 static struct workqueue_struct *btree_io_wq;
102 #define insert_lock(s, b) ((b)->level <= (s)->lock)
105 * These macros are for recursing down the btree - they handle the details of
106 * locking and looking up nodes in the cache for you. They're best treated as
107 * mere syntax when reading code that uses them.
109 * op->lock determines whether we take a read or a write lock at a given depth.
110 * If you've got a read lock and find that you need a write lock (i.e. you're
111 * going to have to split), set op->lock and return -EINTR; btree_root() will
112 * call you again and you'll have the correct lock.
116 * btree - recurse down the btree on a specified key
117 * @fn: function to call, which will be passed the child node
118 * @key: key to recurse on
119 * @b: parent btree node
120 * @op: pointer to struct btree_op
122 #define btree(fn, key, b, op, ...) \
124 int _r, l = (b)->level - 1; \
125 bool _w = l <= (op)->lock; \
126 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
127 if (!IS_ERR(_child)) { \
128 _child->parent = (b); \
129 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
130 rw_unlock(_w, _child); \
132 _r = PTR_ERR(_child); \
137 * btree_root - call a function on the root of the btree
138 * @fn: function to call, which will be passed the child node
140 * @op: pointer to struct btree_op
142 #define btree_root(fn, c, op, ...) \
146 struct btree *_b = (c)->root; \
147 bool _w = insert_lock(op, _b); \
148 rw_lock(_w, _b, _b->level); \
149 if (_b == (c)->root && \
150 _w == insert_lock(op, _b)) { \
152 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
157 bch_cannibalize_unlock(c); \
158 if (_r == -ENOSPC) { \
159 wait_event((c)->try_wait, \
163 } while (_r == -EINTR); \
165 finish_wait(&(c)->bucket_wait, &(op)->wait); \
169 static inline struct bset *write_block(struct btree *b)
171 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
174 /* Btree key manipulation */
176 void bkey_put(struct cache_set *c, struct bkey *k)
180 for (i = 0; i < KEY_PTRS(k); i++)
181 if (ptr_available(c, k, i))
182 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
187 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
189 uint64_t crc = b->key.ptr[0];
190 void *data = (void *) i + 8, *end = bset_bkey_last(i);
192 crc = bch_crc64_update(crc, data, end - data);
193 return crc ^ 0xffffffffffffffffULL;
196 void bch_btree_node_read_done(struct btree *b)
198 const char *err = "bad btree header";
199 struct bset *i = btree_bset_first(b);
200 struct btree_iter *iter;
202 iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
203 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
206 #ifdef CONFIG_BCACHE_DEBUG
214 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
215 i = write_block(b)) {
216 err = "unsupported bset version";
217 if (i->version > BCACHE_BSET_VERSION)
220 err = "bad btree header";
221 if (b->written + set_blocks(i, block_bytes(b->c)) >
226 if (i->magic != bset_magic(&b->c->sb))
229 err = "bad checksum";
230 switch (i->version) {
232 if (i->csum != csum_set(i))
235 case BCACHE_BSET_VERSION:
236 if (i->csum != btree_csum_set(b, i))
242 if (i != b->keys.set[0].data && !i->keys)
245 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
247 b->written += set_blocks(i, block_bytes(b->c));
250 err = "corrupted btree";
251 for (i = write_block(b);
252 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
253 i = ((void *) i) + block_bytes(b->c))
254 if (i->seq == b->keys.set[0].data->seq)
257 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
259 i = b->keys.set[0].data;
260 err = "short btree key";
261 if (b->keys.set[0].size &&
262 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
265 if (b->written < btree_blocks(b))
266 bch_bset_init_next(&b->keys, write_block(b),
267 bset_magic(&b->c->sb));
269 mempool_free(iter, b->c->fill_iter);
272 set_btree_node_io_error(b);
273 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
274 err, PTR_BUCKET_NR(b->c, &b->key, 0),
275 bset_block_offset(b, i), i->keys);
279 static void btree_node_read_endio(struct bio *bio, int error)
281 struct closure *cl = bio->bi_private;
285 static void bch_btree_node_read(struct btree *b)
287 uint64_t start_time = local_clock();
291 trace_bcache_btree_read(b);
293 closure_init_stack(&cl);
295 bio = bch_bbio_alloc(b->c);
296 bio->bi_rw = REQ_META|READ_SYNC;
297 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
298 bio->bi_end_io = btree_node_read_endio;
299 bio->bi_private = &cl;
301 bch_bio_map(bio, b->keys.set[0].data);
303 bch_submit_bbio(bio, b->c, &b->key, 0);
306 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
307 set_btree_node_io_error(b);
309 bch_bbio_free(bio, b->c);
311 if (btree_node_io_error(b))
314 bch_btree_node_read_done(b);
315 bch_time_stats_update(&b->c->btree_read_time, start_time);
319 bch_cache_set_error(b->c, "io error reading bucket %zu",
320 PTR_BUCKET_NR(b->c, &b->key, 0));
323 static void btree_complete_write(struct btree *b, struct btree_write *w)
325 if (w->prio_blocked &&
326 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
327 wake_up_allocators(b->c);
330 atomic_dec_bug(w->journal);
331 __closure_wake_up(&b->c->journal.wait);
338 static void btree_node_write_unlock(struct closure *cl)
340 struct btree *b = container_of(cl, struct btree, io);
345 static void __btree_node_write_done(struct closure *cl)
347 struct btree *b = container_of(cl, struct btree, io);
348 struct btree_write *w = btree_prev_write(b);
350 bch_bbio_free(b->bio, b->c);
352 btree_complete_write(b, w);
354 if (btree_node_dirty(b))
355 queue_delayed_work(btree_io_wq, &b->work,
356 msecs_to_jiffies(30000));
358 closure_return_with_destructor(cl, btree_node_write_unlock);
361 static void btree_node_write_done(struct closure *cl)
363 struct btree *b = container_of(cl, struct btree, io);
367 bio_for_each_segment_all(bv, b->bio, n)
368 __free_page(bv->bv_page);
370 __btree_node_write_done(cl);
373 static void btree_node_write_endio(struct bio *bio, int error)
375 struct closure *cl = bio->bi_private;
376 struct btree *b = container_of(cl, struct btree, io);
379 set_btree_node_io_error(b);
381 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
385 static void do_btree_node_write(struct btree *b)
387 struct closure *cl = &b->io;
388 struct bset *i = btree_bset_last(b);
391 i->version = BCACHE_BSET_VERSION;
392 i->csum = btree_csum_set(b, i);
395 b->bio = bch_bbio_alloc(b->c);
397 b->bio->bi_end_io = btree_node_write_endio;
398 b->bio->bi_private = cl;
399 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
400 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
401 bch_bio_map(b->bio, i);
404 * If we're appending to a leaf node, we don't technically need FUA -
405 * this write just needs to be persisted before the next journal write,
406 * which will be marked FLUSH|FUA.
408 * Similarly if we're writing a new btree root - the pointer is going to
409 * be in the next journal entry.
411 * But if we're writing a new btree node (that isn't a root) or
412 * appending to a non leaf btree node, we need either FUA or a flush
413 * when we write the parent with the new pointer. FUA is cheaper than a
414 * flush, and writes appending to leaf nodes aren't blocking anything so
415 * just make all btree node writes FUA to keep things sane.
418 bkey_copy(&k.key, &b->key);
419 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
420 bset_sector_offset(&b->keys, i));
422 if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
425 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
427 bio_for_each_segment_all(bv, b->bio, j)
428 memcpy(page_address(bv->bv_page),
429 base + j * PAGE_SIZE, PAGE_SIZE);
431 bch_submit_bbio(b->bio, b->c, &k.key, 0);
433 continue_at(cl, btree_node_write_done, NULL);
436 bch_bio_map(b->bio, i);
438 bch_submit_bbio(b->bio, b->c, &k.key, 0);
441 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
445 void bch_btree_node_write(struct btree *b, struct closure *parent)
447 struct bset *i = btree_bset_last(b);
449 trace_bcache_btree_write(b);
451 BUG_ON(current->bio_list);
452 BUG_ON(b->written >= btree_blocks(b));
453 BUG_ON(b->written && !i->keys);
454 BUG_ON(btree_bset_first(b)->seq != i->seq);
455 bch_check_keys(&b->keys, "writing");
457 cancel_delayed_work(&b->work);
459 /* If caller isn't waiting for write, parent refcount is cache set */
461 closure_init(&b->io, parent ?: &b->c->cl);
463 clear_bit(BTREE_NODE_dirty, &b->flags);
464 change_bit(BTREE_NODE_write_idx, &b->flags);
466 do_btree_node_write(b);
468 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
469 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
471 b->written += set_blocks(i, block_bytes(b->c));
473 /* If not a leaf node, always sort */
474 if (b->level && b->keys.nsets)
475 bch_btree_sort(&b->keys, &b->c->sort);
477 bch_btree_sort_lazy(&b->keys, &b->c->sort);
480 * do verify if there was more than one set initially (i.e. we did a
481 * sort) and we sorted down to a single set:
483 if (i != b->keys.set->data && !b->keys.nsets)
486 if (b->written < btree_blocks(b))
487 bch_bset_init_next(&b->keys, write_block(b),
488 bset_magic(&b->c->sb));
491 static void bch_btree_node_write_sync(struct btree *b)
495 closure_init_stack(&cl);
496 bch_btree_node_write(b, &cl);
500 static void btree_node_write_work(struct work_struct *w)
502 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
504 rw_lock(true, b, b->level);
506 if (btree_node_dirty(b))
507 bch_btree_node_write(b, NULL);
511 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
513 struct bset *i = btree_bset_last(b);
514 struct btree_write *w = btree_current_write(b);
519 if (!btree_node_dirty(b))
520 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
522 set_btree_node_dirty(b);
526 journal_pin_cmp(b->c, w->journal, journal_ref)) {
527 atomic_dec_bug(w->journal);
532 w->journal = journal_ref;
533 atomic_inc(w->journal);
537 /* Force write if set is too big */
538 if (set_bytes(i) > PAGE_SIZE - 48 &&
540 bch_btree_node_write(b, NULL);
544 * Btree in memory cache - allocation/freeing
545 * mca -> memory cache
548 #define mca_reserve(c) (((c->root && c->root->level) \
549 ? c->root->level : 1) * 8 + 16)
550 #define mca_can_free(c) \
551 max_t(int, 0, c->bucket_cache_used - mca_reserve(c))
553 static void mca_data_free(struct btree *b)
555 BUG_ON(b->io_mutex.count != 1);
557 bch_btree_keys_free(&b->keys);
559 b->c->bucket_cache_used--;
560 list_move(&b->list, &b->c->btree_cache_freed);
563 static void mca_bucket_free(struct btree *b)
565 BUG_ON(btree_node_dirty(b));
568 hlist_del_init_rcu(&b->hash);
569 list_move(&b->list, &b->c->btree_cache_freeable);
572 static unsigned btree_order(struct bkey *k)
574 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
577 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
579 if (!bch_btree_keys_alloc(&b->keys,
581 ilog2(b->c->btree_pages),
584 b->c->bucket_cache_used++;
585 list_move(&b->list, &b->c->btree_cache);
587 list_move(&b->list, &b->c->btree_cache_freed);
591 static struct btree *mca_bucket_alloc(struct cache_set *c,
592 struct bkey *k, gfp_t gfp)
594 struct btree *b = kzalloc(sizeof(struct btree), gfp);
598 init_rwsem(&b->lock);
599 lockdep_set_novalidate_class(&b->lock);
600 INIT_LIST_HEAD(&b->list);
601 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
603 sema_init(&b->io_mutex, 1);
605 mca_data_alloc(b, k, gfp);
609 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
613 closure_init_stack(&cl);
614 lockdep_assert_held(&b->c->bucket_lock);
616 if (!down_write_trylock(&b->lock))
619 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
621 if (b->keys.page_order < min_order)
625 if (btree_node_dirty(b))
628 if (down_trylock(&b->io_mutex))
633 if (btree_node_dirty(b))
634 bch_btree_node_write_sync(b);
636 /* wait for any in flight btree write */
646 static unsigned long bch_mca_scan(struct shrinker *shrink,
647 struct shrink_control *sc)
649 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
651 unsigned long i, nr = sc->nr_to_scan;
652 unsigned long freed = 0;
654 if (c->shrinker_disabled)
660 /* Return -1 if we can't do anything right now */
661 if (sc->gfp_mask & __GFP_IO)
662 mutex_lock(&c->bucket_lock);
663 else if (!mutex_trylock(&c->bucket_lock))
667 * It's _really_ critical that we don't free too many btree nodes - we
668 * have to always leave ourselves a reserve. The reserve is how we
669 * guarantee that allocating memory for a new btree node can always
670 * succeed, so that inserting keys into the btree can always succeed and
671 * IO can always make forward progress:
673 nr /= c->btree_pages;
674 nr = min_t(unsigned long, nr, mca_can_free(c));
677 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
682 !mca_reap(b, 0, false)) {
689 for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
690 if (list_empty(&c->btree_cache))
693 b = list_first_entry(&c->btree_cache, struct btree, list);
694 list_rotate_left(&c->btree_cache);
697 !mca_reap(b, 0, false)) {
706 mutex_unlock(&c->bucket_lock);
710 static unsigned long bch_mca_count(struct shrinker *shrink,
711 struct shrink_control *sc)
713 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
715 if (c->shrinker_disabled)
721 return mca_can_free(c) * c->btree_pages;
724 void bch_btree_cache_free(struct cache_set *c)
728 closure_init_stack(&cl);
730 if (c->shrink.list.next)
731 unregister_shrinker(&c->shrink);
733 mutex_lock(&c->bucket_lock);
735 #ifdef CONFIG_BCACHE_DEBUG
737 list_move(&c->verify_data->list, &c->btree_cache);
739 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
742 list_splice(&c->btree_cache_freeable,
745 while (!list_empty(&c->btree_cache)) {
746 b = list_first_entry(&c->btree_cache, struct btree, list);
748 if (btree_node_dirty(b))
749 btree_complete_write(b, btree_current_write(b));
750 clear_bit(BTREE_NODE_dirty, &b->flags);
755 while (!list_empty(&c->btree_cache_freed)) {
756 b = list_first_entry(&c->btree_cache_freed,
759 cancel_delayed_work_sync(&b->work);
763 mutex_unlock(&c->bucket_lock);
766 int bch_btree_cache_alloc(struct cache_set *c)
770 for (i = 0; i < mca_reserve(c); i++)
771 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
774 list_splice_init(&c->btree_cache,
775 &c->btree_cache_freeable);
777 #ifdef CONFIG_BCACHE_DEBUG
778 mutex_init(&c->verify_lock);
780 c->verify_ondisk = (void *)
781 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
783 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
785 if (c->verify_data &&
786 c->verify_data->keys.set->data)
787 list_del_init(&c->verify_data->list);
789 c->verify_data = NULL;
792 c->shrink.count_objects = bch_mca_count;
793 c->shrink.scan_objects = bch_mca_scan;
795 c->shrink.batch = c->btree_pages * 2;
796 register_shrinker(&c->shrink);
801 /* Btree in memory cache - hash table */
803 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
805 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
808 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
813 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
814 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
822 static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
826 trace_bcache_btree_cache_cannibalize(c);
828 if (!c->try_harder) {
829 c->try_harder = current;
830 c->try_harder_start = local_clock();
831 } else if (c->try_harder != current)
832 return ERR_PTR(-ENOSPC);
834 list_for_each_entry_reverse(b, &c->btree_cache, list)
835 if (!mca_reap(b, btree_order(k), false))
838 list_for_each_entry_reverse(b, &c->btree_cache, list)
839 if (!mca_reap(b, btree_order(k), true))
842 return ERR_PTR(-ENOMEM);
846 * We can only have one thread cannibalizing other cached btree nodes at a time,
847 * or we'll deadlock. We use an open coded mutex to ensure that, which a
848 * cannibalize_bucket() will take. This means every time we unlock the root of
849 * the btree, we need to release this lock if we have it held.
851 static void bch_cannibalize_unlock(struct cache_set *c)
853 if (c->try_harder == current) {
854 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
855 c->try_harder = NULL;
856 wake_up(&c->try_wait);
860 static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level)
864 BUG_ON(current->bio_list);
866 lockdep_assert_held(&c->bucket_lock);
871 /* btree_free() doesn't free memory; it sticks the node on the end of
872 * the list. Check if there's any freed nodes there:
874 list_for_each_entry(b, &c->btree_cache_freeable, list)
875 if (!mca_reap(b, btree_order(k), false))
878 /* We never free struct btree itself, just the memory that holds the on
879 * disk node. Check the freed list before allocating a new one:
881 list_for_each_entry(b, &c->btree_cache_freed, list)
882 if (!mca_reap(b, 0, false)) {
883 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
884 if (!b->keys.set[0].data)
890 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
894 BUG_ON(!down_write_trylock(&b->lock));
895 if (!b->keys.set->data)
898 BUG_ON(b->io_mutex.count != 1);
900 bkey_copy(&b->key, k);
901 list_move(&b->list, &c->btree_cache);
902 hlist_del_init_rcu(&b->hash);
903 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
905 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
906 b->parent = (void *) ~0UL;
912 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
913 &b->c->expensive_debug_checks);
915 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
916 &b->c->expensive_debug_checks);
923 b = mca_cannibalize(c, k);
931 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
932 * in from disk if necessary.
934 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
936 * The btree node will have either a read or a write lock held, depending on
937 * level and op->lock.
939 struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k,
940 int level, bool write)
950 if (current->bio_list)
951 return ERR_PTR(-EAGAIN);
953 mutex_lock(&c->bucket_lock);
954 b = mca_alloc(c, k, level);
955 mutex_unlock(&c->bucket_lock);
962 bch_btree_node_read(b);
965 downgrade_write(&b->lock);
967 rw_lock(write, b, level);
968 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
972 BUG_ON(b->level != level);
977 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
978 prefetch(b->keys.set[i].tree);
979 prefetch(b->keys.set[i].data);
982 for (; i <= b->keys.nsets; i++)
983 prefetch(b->keys.set[i].data);
985 if (btree_node_io_error(b)) {
987 return ERR_PTR(-EIO);
995 static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
999 mutex_lock(&c->bucket_lock);
1000 b = mca_alloc(c, k, level);
1001 mutex_unlock(&c->bucket_lock);
1003 if (!IS_ERR_OR_NULL(b)) {
1004 bch_btree_node_read(b);
1011 static void btree_node_free(struct btree *b)
1015 trace_bcache_btree_node_free(b);
1017 BUG_ON(b == b->c->root);
1019 if (btree_node_dirty(b))
1020 btree_complete_write(b, btree_current_write(b));
1021 clear_bit(BTREE_NODE_dirty, &b->flags);
1023 cancel_delayed_work(&b->work);
1025 mutex_lock(&b->c->bucket_lock);
1027 for (i = 0; i < KEY_PTRS(&b->key); i++) {
1028 BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin));
1030 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1031 PTR_BUCKET(b->c, &b->key, i));
1034 bch_bucket_free(b->c, &b->key);
1036 mutex_unlock(&b->c->bucket_lock);
1039 struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait)
1042 struct btree *b = ERR_PTR(-EAGAIN);
1044 mutex_lock(&c->bucket_lock);
1046 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1049 bkey_put(c, &k.key);
1050 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1052 b = mca_alloc(c, &k.key, level);
1058 "Tried to allocate bucket that was in btree cache");
1063 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1065 mutex_unlock(&c->bucket_lock);
1067 trace_bcache_btree_node_alloc(b);
1070 bch_bucket_free(c, &k.key);
1072 mutex_unlock(&c->bucket_lock);
1074 trace_bcache_btree_node_alloc_fail(b);
1078 static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait)
1080 struct btree *n = bch_btree_node_alloc(b->c, b->level, wait);
1081 if (!IS_ERR_OR_NULL(n)) {
1082 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1083 bkey_copy_key(&n->key, &b->key);
1089 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1093 bkey_copy(k, &b->key);
1094 bkey_copy_key(k, &ZERO_KEY);
1096 for (i = 0; i < KEY_PTRS(k); i++) {
1097 uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1;
1099 SET_PTR_GEN(k, i, g);
1102 atomic_inc(&b->c->prio_blocked);
1105 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1107 struct cache_set *c = b->c;
1109 unsigned i, reserve = c->root->level * 2 + 1;
1112 mutex_lock(&c->bucket_lock);
1114 for_each_cache(ca, c, i)
1115 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1117 prepare_to_wait(&c->bucket_wait, &op->wait,
1118 TASK_UNINTERRUPTIBLE);
1123 mutex_unlock(&c->bucket_lock);
1127 /* Garbage collection */
1129 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1137 * ptr_invalid() can't return true for the keys that mark btree nodes as
1138 * freed, but since ptr_bad() returns true we'll never actually use them
1139 * for anything and thus we don't want mark their pointers here
1141 if (!bkey_cmp(k, &ZERO_KEY))
1144 for (i = 0; i < KEY_PTRS(k); i++) {
1145 if (!ptr_available(c, k, i))
1148 g = PTR_BUCKET(c, k, i);
1150 if (gen_after(g->gc_gen, PTR_GEN(k, i)))
1151 g->gc_gen = PTR_GEN(k, i);
1153 if (ptr_stale(c, k, i)) {
1154 stale = max(stale, ptr_stale(c, k, i));
1158 cache_bug_on(GC_MARK(g) &&
1159 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1160 c, "inconsistent ptrs: mark = %llu, level = %i",
1164 SET_GC_MARK(g, GC_MARK_METADATA);
1165 else if (KEY_DIRTY(k))
1166 SET_GC_MARK(g, GC_MARK_DIRTY);
1168 /* guard against overflow */
1169 SET_GC_SECTORS_USED(g, min_t(unsigned,
1170 GC_SECTORS_USED(g) + KEY_SIZE(k),
1171 MAX_GC_SECTORS_USED));
1173 BUG_ON(!GC_SECTORS_USED(g));
1179 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1181 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1185 for (i = 0; i < KEY_PTRS(k); i++)
1186 if (ptr_available(c, k, i) &&
1187 !ptr_stale(c, k, i)) {
1188 struct bucket *b = PTR_BUCKET(c, k, i);
1190 b->gen = PTR_GEN(k, i);
1192 if (level && bkey_cmp(k, &ZERO_KEY))
1193 b->prio = BTREE_PRIO;
1194 else if (!level && b->prio == BTREE_PRIO)
1195 b->prio = INITIAL_PRIO;
1198 __bch_btree_mark_key(c, level, k);
1201 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1204 unsigned keys = 0, good_keys = 0;
1206 struct btree_iter iter;
1207 struct bset_tree *t;
1211 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1212 stale = max(stale, btree_mark_key(b, k));
1215 if (bch_ptr_bad(&b->keys, k))
1218 gc->key_bytes += bkey_u64s(k);
1222 gc->data += KEY_SIZE(k);
1225 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1226 btree_bug_on(t->size &&
1227 bset_written(&b->keys, t) &&
1228 bkey_cmp(&b->key, &t->end) < 0,
1229 b, "found short btree key in gc");
1231 if (b->c->gc_always_rewrite)
1237 if ((keys - good_keys) * 2 > keys)
1243 #define GC_MERGE_NODES 4U
1245 struct gc_merge_info {
1250 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1251 struct keylist *, atomic_t *, struct bkey *);
1253 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1254 struct keylist *keylist, struct gc_stat *gc,
1255 struct gc_merge_info *r)
1257 unsigned i, nodes = 0, keys = 0, blocks;
1258 struct btree *new_nodes[GC_MERGE_NODES];
1262 memset(new_nodes, 0, sizeof(new_nodes));
1263 closure_init_stack(&cl);
1265 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1266 keys += r[nodes++].keys;
1268 blocks = btree_default_blocks(b->c) * 2 / 3;
1271 __set_blocks(b->keys.set[0].data, keys,
1272 block_bytes(b->c)) > blocks * (nodes - 1))
1275 for (i = 0; i < nodes; i++) {
1276 new_nodes[i] = btree_node_alloc_replacement(r[i].b, false);
1277 if (IS_ERR_OR_NULL(new_nodes[i]))
1278 goto out_nocoalesce;
1281 for (i = nodes - 1; i > 0; --i) {
1282 struct bset *n1 = btree_bset_first(new_nodes[i]);
1283 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1284 struct bkey *k, *last = NULL;
1290 k < bset_bkey_last(n2);
1292 if (__set_blocks(n1, n1->keys + keys +
1294 block_bytes(b->c)) > blocks)
1298 keys += bkey_u64s(k);
1302 * Last node we're not getting rid of - we're getting
1303 * rid of the node at r[0]. Have to try and fit all of
1304 * the remaining keys into this node; we can't ensure
1305 * they will always fit due to rounding and variable
1306 * length keys (shouldn't be possible in practice,
1309 if (__set_blocks(n1, n1->keys + n2->keys,
1310 block_bytes(b->c)) >
1311 btree_blocks(new_nodes[i]))
1312 goto out_nocoalesce;
1315 /* Take the key of the node we're getting rid of */
1319 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1320 btree_blocks(new_nodes[i]));
1323 bkey_copy_key(&new_nodes[i]->key, last);
1325 memcpy(bset_bkey_last(n1),
1327 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1330 r[i].keys = n1->keys;
1333 bset_bkey_idx(n2, keys),
1334 (void *) bset_bkey_last(n2) -
1335 (void *) bset_bkey_idx(n2, keys));
1339 if (__bch_keylist_realloc(keylist,
1340 bkey_u64s(&new_nodes[i]->key)))
1341 goto out_nocoalesce;
1343 bch_btree_node_write(new_nodes[i], &cl);
1344 bch_keylist_add(keylist, &new_nodes[i]->key);
1347 for (i = 0; i < nodes; i++) {
1348 if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key)))
1349 goto out_nocoalesce;
1351 make_btree_freeing_key(r[i].b, keylist->top);
1352 bch_keylist_push(keylist);
1355 /* We emptied out this node */
1356 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1357 btree_node_free(new_nodes[0]);
1358 rw_unlock(true, new_nodes[0]);
1362 for (i = 0; i < nodes; i++) {
1363 btree_node_free(r[i].b);
1364 rw_unlock(true, r[i].b);
1366 r[i].b = new_nodes[i];
1369 bch_btree_insert_node(b, op, keylist, NULL, NULL);
1370 BUG_ON(!bch_keylist_empty(keylist));
1372 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1373 r[nodes - 1].b = ERR_PTR(-EINTR);
1375 trace_bcache_btree_gc_coalesce(nodes);
1378 /* Invalidated our iterator */
1384 while ((k = bch_keylist_pop(keylist)))
1385 if (!bkey_cmp(k, &ZERO_KEY))
1386 atomic_dec(&b->c->prio_blocked);
1388 for (i = 0; i < nodes; i++)
1389 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1390 btree_node_free(new_nodes[i]);
1391 rw_unlock(true, new_nodes[i]);
1396 static unsigned btree_gc_count_keys(struct btree *b)
1399 struct btree_iter iter;
1402 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1403 ret += bkey_u64s(k);
1408 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1409 struct closure *writes, struct gc_stat *gc)
1413 bool should_rewrite;
1416 struct keylist keys;
1417 struct btree_iter iter;
1418 struct gc_merge_info r[GC_MERGE_NODES];
1419 struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
1421 bch_keylist_init(&keys);
1422 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1424 for (i = 0; i < GC_MERGE_NODES; i++)
1425 r[i].b = ERR_PTR(-EINTR);
1428 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1430 r->b = bch_btree_node_get(b->c, k, b->level - 1, true);
1432 ret = PTR_ERR(r->b);
1436 r->keys = btree_gc_count_keys(r->b);
1438 ret = btree_gc_coalesce(b, op, &keys, gc, r);
1446 if (!IS_ERR(last->b)) {
1447 should_rewrite = btree_gc_mark_node(last->b, gc);
1448 if (should_rewrite &&
1449 !btree_check_reserve(b, NULL)) {
1450 n = btree_node_alloc_replacement(last->b,
1453 if (!IS_ERR_OR_NULL(n)) {
1454 bch_btree_node_write_sync(n);
1455 bch_keylist_add(&keys, &n->key);
1457 make_btree_freeing_key(last->b,
1459 bch_keylist_push(&keys);
1461 btree_node_free(last->b);
1463 bch_btree_insert_node(b, op, &keys,
1465 BUG_ON(!bch_keylist_empty(&keys));
1467 rw_unlock(true, last->b);
1470 /* Invalidated our iterator */
1476 if (last->b->level) {
1477 ret = btree_gc_recurse(last->b, op, writes, gc);
1482 bkey_copy_key(&b->c->gc_done, &last->b->key);
1485 * Must flush leaf nodes before gc ends, since replace
1486 * operations aren't journalled
1488 if (btree_node_dirty(last->b))
1489 bch_btree_node_write(last->b, writes);
1490 rw_unlock(true, last->b);
1493 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1496 if (need_resched()) {
1502 for (i = 0; i < GC_MERGE_NODES; i++)
1503 if (!IS_ERR_OR_NULL(r[i].b)) {
1504 if (btree_node_dirty(r[i].b))
1505 bch_btree_node_write(r[i].b, writes);
1506 rw_unlock(true, r[i].b);
1509 bch_keylist_free(&keys);
1514 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1515 struct closure *writes, struct gc_stat *gc)
1517 struct btree *n = NULL;
1519 bool should_rewrite;
1521 should_rewrite = btree_gc_mark_node(b, gc);
1522 if (should_rewrite) {
1523 n = btree_node_alloc_replacement(b, false);
1525 if (!IS_ERR_OR_NULL(n)) {
1526 bch_btree_node_write_sync(n);
1527 bch_btree_set_root(n);
1535 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1538 ret = btree_gc_recurse(b, op, writes, gc);
1543 bkey_copy_key(&b->c->gc_done, &b->key);
1548 static void btree_gc_start(struct cache_set *c)
1554 if (!c->gc_mark_valid)
1557 mutex_lock(&c->bucket_lock);
1559 c->gc_mark_valid = 0;
1560 c->gc_done = ZERO_KEY;
1562 for_each_cache(ca, c, i)
1563 for_each_bucket(b, ca) {
1565 if (!atomic_read(&b->pin)) {
1566 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
1567 SET_GC_SECTORS_USED(b, 0);
1571 mutex_unlock(&c->bucket_lock);
1574 size_t bch_btree_gc_finish(struct cache_set *c)
1576 size_t available = 0;
1581 mutex_lock(&c->bucket_lock);
1584 c->gc_mark_valid = 1;
1587 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1588 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1591 /* don't reclaim buckets to which writeback keys point */
1593 for (i = 0; i < c->nr_uuids; i++) {
1594 struct bcache_device *d = c->devices[i];
1595 struct cached_dev *dc;
1596 struct keybuf_key *w, *n;
1599 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1601 dc = container_of(d, struct cached_dev, disk);
1603 spin_lock(&dc->writeback_keys.lock);
1604 rbtree_postorder_for_each_entry_safe(w, n,
1605 &dc->writeback_keys.keys, node)
1606 for (j = 0; j < KEY_PTRS(&w->key); j++)
1607 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1609 spin_unlock(&dc->writeback_keys.lock);
1613 for_each_cache(ca, c, i) {
1616 ca->invalidate_needs_gc = 0;
1618 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1619 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1621 for (i = ca->prio_buckets;
1622 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1623 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1625 for_each_bucket(b, ca) {
1626 b->last_gc = b->gc_gen;
1627 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1629 if (!atomic_read(&b->pin) &&
1630 GC_MARK(b) == GC_MARK_RECLAIMABLE) {
1632 if (!GC_SECTORS_USED(b))
1633 bch_bucket_add_unused(ca, b);
1638 mutex_unlock(&c->bucket_lock);
1642 static void bch_btree_gc(struct cache_set *c)
1645 unsigned long available;
1646 struct gc_stat stats;
1647 struct closure writes;
1649 uint64_t start_time = local_clock();
1651 trace_bcache_gc_start(c);
1653 memset(&stats, 0, sizeof(struct gc_stat));
1654 closure_init_stack(&writes);
1655 bch_btree_op_init(&op, SHRT_MAX);
1660 ret = btree_root(gc_root, c, &op, &writes, &stats);
1661 closure_sync(&writes);
1663 if (ret && ret != -EAGAIN)
1664 pr_warn("gc failed!");
1667 available = bch_btree_gc_finish(c);
1668 wake_up_allocators(c);
1670 bch_time_stats_update(&c->btree_gc_time, start_time);
1672 stats.key_bytes *= sizeof(uint64_t);
1674 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1675 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1677 trace_bcache_gc_end(c);
1682 static int bch_gc_thread(void *arg)
1684 struct cache_set *c = arg;
1692 set_current_state(TASK_INTERRUPTIBLE);
1693 if (kthread_should_stop())
1696 mutex_lock(&c->bucket_lock);
1698 for_each_cache(ca, c, i)
1699 if (ca->invalidate_needs_gc) {
1700 mutex_unlock(&c->bucket_lock);
1701 set_current_state(TASK_RUNNING);
1705 mutex_unlock(&c->bucket_lock);
1714 int bch_gc_thread_start(struct cache_set *c)
1716 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1717 if (IS_ERR(c->gc_thread))
1718 return PTR_ERR(c->gc_thread);
1720 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1724 /* Initial partial gc */
1726 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1729 struct bkey *k, *p = NULL;
1730 struct btree_iter iter;
1732 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1733 bch_initial_mark_key(b->c, b->level, k);
1735 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1738 bch_btree_iter_init(&b->keys, &iter, NULL);
1741 k = bch_btree_iter_next_filter(&iter, &b->keys,
1744 btree_node_prefetch(b->c, k, b->level - 1);
1747 ret = btree(check_recurse, p, b, op);
1750 } while (p && !ret);
1756 int bch_btree_check(struct cache_set *c)
1760 bch_btree_op_init(&op, SHRT_MAX);
1762 return btree_root(check_recurse, c, &op);
1765 /* Btree insertion */
1767 static bool btree_insert_key(struct btree *b, struct bkey *k,
1768 struct bkey *replace_key)
1772 BUG_ON(bkey_cmp(k, &b->key) > 0);
1774 status = bch_btree_insert_key(&b->keys, k, replace_key);
1775 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1776 bch_check_keys(&b->keys, "%u for %s", status,
1777 replace_key ? "replace" : "insert");
1779 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1786 static size_t insert_u64s_remaining(struct btree *b)
1788 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1791 * Might land in the middle of an existing extent and have to split it
1793 if (b->keys.ops->is_extents)
1794 ret -= KEY_MAX_U64S;
1796 return max(ret, 0L);
1799 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1800 struct keylist *insert_keys,
1801 struct bkey *replace_key)
1804 int oldsize = bch_count_data(&b->keys);
1806 while (!bch_keylist_empty(insert_keys)) {
1807 struct bkey *k = insert_keys->keys;
1809 if (bkey_u64s(k) > insert_u64s_remaining(b))
1812 if (bkey_cmp(k, &b->key) <= 0) {
1816 ret |= btree_insert_key(b, k, replace_key);
1817 bch_keylist_pop_front(insert_keys);
1818 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1819 BKEY_PADDED(key) temp;
1820 bkey_copy(&temp.key, insert_keys->keys);
1822 bch_cut_back(&b->key, &temp.key);
1823 bch_cut_front(&b->key, insert_keys->keys);
1825 ret |= btree_insert_key(b, &temp.key, replace_key);
1833 op->insert_collision = true;
1835 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1837 BUG_ON(bch_count_data(&b->keys) < oldsize);
1841 static int btree_split(struct btree *b, struct btree_op *op,
1842 struct keylist *insert_keys,
1843 struct bkey *replace_key)
1846 struct btree *n1, *n2 = NULL, *n3 = NULL;
1847 uint64_t start_time = local_clock();
1849 struct keylist parent_keys;
1851 closure_init_stack(&cl);
1852 bch_keylist_init(&parent_keys);
1855 btree_check_reserve(b, op))
1858 n1 = btree_node_alloc_replacement(b, true);
1862 split = set_blocks(btree_bset_first(n1),
1863 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1868 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1870 n2 = bch_btree_node_alloc(b->c, b->level, true);
1875 n3 = bch_btree_node_alloc(b->c, b->level + 1, true);
1880 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
1883 * Has to be a linear search because we don't have an auxiliary
1887 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
1888 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
1891 bkey_copy_key(&n1->key,
1892 bset_bkey_idx(btree_bset_first(n1), keys));
1893 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
1895 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
1896 btree_bset_first(n1)->keys = keys;
1898 memcpy(btree_bset_first(n2)->start,
1899 bset_bkey_last(btree_bset_first(n1)),
1900 btree_bset_first(n2)->keys * sizeof(uint64_t));
1902 bkey_copy_key(&n2->key, &b->key);
1904 bch_keylist_add(&parent_keys, &n2->key);
1905 bch_btree_node_write(n2, &cl);
1906 rw_unlock(true, n2);
1908 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
1910 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
1913 bch_keylist_add(&parent_keys, &n1->key);
1914 bch_btree_node_write(n1, &cl);
1917 /* Depth increases, make a new root */
1918 bkey_copy_key(&n3->key, &MAX_KEY);
1919 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
1920 bch_btree_node_write(n3, &cl);
1923 bch_btree_set_root(n3);
1924 rw_unlock(true, n3);
1927 } else if (!b->parent) {
1928 /* Root filled up but didn't need to be split */
1930 bch_btree_set_root(n1);
1934 /* Split a non root node */
1936 make_btree_freeing_key(b, parent_keys.top);
1937 bch_keylist_push(&parent_keys);
1941 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
1942 BUG_ON(!bch_keylist_empty(&parent_keys));
1945 rw_unlock(true, n1);
1947 bch_time_stats_update(&b->c->btree_split_time, start_time);
1951 bkey_put(b->c, &n2->key);
1952 btree_node_free(n2);
1953 rw_unlock(true, n2);
1955 bkey_put(b->c, &n1->key);
1956 btree_node_free(n1);
1957 rw_unlock(true, n1);
1959 WARN(1, "bcache: btree split failed");
1961 if (n3 == ERR_PTR(-EAGAIN) ||
1962 n2 == ERR_PTR(-EAGAIN) ||
1963 n1 == ERR_PTR(-EAGAIN))
1969 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1970 struct keylist *insert_keys,
1971 atomic_t *journal_ref,
1972 struct bkey *replace_key)
1974 BUG_ON(b->level && replace_key);
1976 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
1977 if (current->bio_list) {
1978 op->lock = b->c->root->level + 1;
1980 } else if (op->lock <= b->c->root->level) {
1981 op->lock = b->c->root->level + 1;
1984 /* Invalidated all iterators */
1985 int ret = btree_split(b, op, insert_keys, replace_key);
1987 return bch_keylist_empty(insert_keys) ?
1991 BUG_ON(write_block(b) != btree_bset_last(b));
1993 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
1995 bch_btree_leaf_dirty(b, journal_ref);
1997 bch_btree_node_write_sync(b);
2004 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2005 struct bkey *check_key)
2008 uint64_t btree_ptr = b->key.ptr[0];
2009 unsigned long seq = b->seq;
2010 struct keylist insert;
2011 bool upgrade = op->lock == -1;
2013 bch_keylist_init(&insert);
2016 rw_unlock(false, b);
2017 rw_lock(true, b, b->level);
2019 if (b->key.ptr[0] != btree_ptr ||
2024 SET_KEY_PTRS(check_key, 1);
2025 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2027 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2029 bch_keylist_add(&insert, check_key);
2031 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2033 BUG_ON(!ret && !bch_keylist_empty(&insert));
2036 downgrade_write(&b->lock);
2040 struct btree_insert_op {
2042 struct keylist *keys;
2043 atomic_t *journal_ref;
2044 struct bkey *replace_key;
2047 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2049 struct btree_insert_op *op = container_of(b_op,
2050 struct btree_insert_op, op);
2052 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2053 op->journal_ref, op->replace_key);
2054 if (ret && !bch_keylist_empty(op->keys))
2060 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2061 atomic_t *journal_ref, struct bkey *replace_key)
2063 struct btree_insert_op op;
2066 BUG_ON(current->bio_list);
2067 BUG_ON(bch_keylist_empty(keys));
2069 bch_btree_op_init(&op.op, 0);
2071 op.journal_ref = journal_ref;
2072 op.replace_key = replace_key;
2074 while (!ret && !bch_keylist_empty(keys)) {
2076 ret = bch_btree_map_leaf_nodes(&op.op, c,
2077 &START_KEY(keys->keys),
2084 pr_err("error %i", ret);
2086 while ((k = bch_keylist_pop(keys)))
2088 } else if (op.op.insert_collision)
2094 void bch_btree_set_root(struct btree *b)
2099 closure_init_stack(&cl);
2101 trace_bcache_btree_set_root(b);
2103 BUG_ON(!b->written);
2105 for (i = 0; i < KEY_PTRS(&b->key); i++)
2106 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2108 mutex_lock(&b->c->bucket_lock);
2109 list_del_init(&b->list);
2110 mutex_unlock(&b->c->bucket_lock);
2114 bch_journal_meta(b->c, &cl);
2118 /* Map across nodes or keys */
2120 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2122 btree_map_nodes_fn *fn, int flags)
2124 int ret = MAP_CONTINUE;
2128 struct btree_iter iter;
2130 bch_btree_iter_init(&b->keys, &iter, from);
2132 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2134 ret = btree(map_nodes_recurse, k, b,
2135 op, from, fn, flags);
2138 if (ret != MAP_CONTINUE)
2143 if (!b->level || flags == MAP_ALL_NODES)
2149 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2150 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2152 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2155 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2156 struct bkey *from, btree_map_keys_fn *fn,
2159 int ret = MAP_CONTINUE;
2161 struct btree_iter iter;
2163 bch_btree_iter_init(&b->keys, &iter, from);
2165 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2168 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2171 if (ret != MAP_CONTINUE)
2175 if (!b->level && (flags & MAP_END_KEY))
2176 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2177 KEY_OFFSET(&b->key), 0));
2182 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2183 struct bkey *from, btree_map_keys_fn *fn, int flags)
2185 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2190 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2192 /* Overlapping keys compare equal */
2193 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2195 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2200 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2201 struct keybuf_key *r)
2203 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2211 keybuf_pred_fn *pred;
2214 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2217 struct refill *refill = container_of(op, struct refill, op);
2218 struct keybuf *buf = refill->buf;
2219 int ret = MAP_CONTINUE;
2221 if (bkey_cmp(k, refill->end) >= 0) {
2226 if (!KEY_SIZE(k)) /* end key */
2229 if (refill->pred(buf, k)) {
2230 struct keybuf_key *w;
2232 spin_lock(&buf->lock);
2234 w = array_alloc(&buf->freelist);
2236 spin_unlock(&buf->lock);
2241 bkey_copy(&w->key, k);
2243 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2244 array_free(&buf->freelist, w);
2248 if (array_freelist_empty(&buf->freelist))
2251 spin_unlock(&buf->lock);
2254 buf->last_scanned = *k;
2258 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2259 struct bkey *end, keybuf_pred_fn *pred)
2261 struct bkey start = buf->last_scanned;
2262 struct refill refill;
2266 bch_btree_op_init(&refill.op, -1);
2267 refill.nr_found = 0;
2272 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2273 refill_keybuf_fn, MAP_END_KEY);
2275 trace_bcache_keyscan(refill.nr_found,
2276 KEY_INODE(&start), KEY_OFFSET(&start),
2277 KEY_INODE(&buf->last_scanned),
2278 KEY_OFFSET(&buf->last_scanned));
2280 spin_lock(&buf->lock);
2282 if (!RB_EMPTY_ROOT(&buf->keys)) {
2283 struct keybuf_key *w;
2284 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2285 buf->start = START_KEY(&w->key);
2287 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2290 buf->start = MAX_KEY;
2294 spin_unlock(&buf->lock);
2297 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2299 rb_erase(&w->node, &buf->keys);
2300 array_free(&buf->freelist, w);
2303 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2305 spin_lock(&buf->lock);
2306 __bch_keybuf_del(buf, w);
2307 spin_unlock(&buf->lock);
2310 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2314 struct keybuf_key *p, *w, s;
2317 if (bkey_cmp(end, &buf->start) <= 0 ||
2318 bkey_cmp(start, &buf->end) >= 0)
2321 spin_lock(&buf->lock);
2322 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2324 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2326 w = RB_NEXT(w, node);
2331 __bch_keybuf_del(buf, p);
2334 spin_unlock(&buf->lock);
2338 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2340 struct keybuf_key *w;
2341 spin_lock(&buf->lock);
2343 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2345 while (w && w->private)
2346 w = RB_NEXT(w, node);
2349 w->private = ERR_PTR(-EINTR);
2351 spin_unlock(&buf->lock);
2355 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2358 keybuf_pred_fn *pred)
2360 struct keybuf_key *ret;
2363 ret = bch_keybuf_next(buf);
2367 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2368 pr_debug("scan finished");
2372 bch_refill_keybuf(c, buf, end, pred);
2378 void bch_keybuf_init(struct keybuf *buf)
2380 buf->last_scanned = MAX_KEY;
2381 buf->keys = RB_ROOT;
2383 spin_lock_init(&buf->lock);
2384 array_allocator_init(&buf->freelist);
2387 void bch_btree_exit(void)
2390 destroy_workqueue(btree_io_wq);
2393 int __init bch_btree_init(void)
2395 btree_io_wq = create_singlethread_workqueue("bch_btree_io");