bcache: Move keylist out of btree_op
[cascardo/linux.git] / drivers / md / bcache / journal.c
1 /*
2  * bcache journalling code, for btree insertions
3  *
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
11
12 #include <trace/events/bcache.h>
13
14 /*
15  * Journal replay/recovery:
16  *
17  * This code is all driven from run_cache_set(); we first read the journal
18  * entries, do some other stuff, then we mark all the keys in the journal
19  * entries (same as garbage collection would), then we replay them - reinserting
20  * them into the cache in precisely the same order as they appear in the
21  * journal.
22  *
23  * We only journal keys that go in leaf nodes, which simplifies things quite a
24  * bit.
25  */
26
27 static void journal_read_endio(struct bio *bio, int error)
28 {
29         struct closure *cl = bio->bi_private;
30         closure_put(cl);
31 }
32
33 static int journal_read_bucket(struct cache *ca, struct list_head *list,
34                                struct btree_op *op, unsigned bucket_index)
35 {
36         struct journal_device *ja = &ca->journal;
37         struct bio *bio = &ja->bio;
38
39         struct journal_replay *i;
40         struct jset *j, *data = ca->set->journal.w[0].data;
41         unsigned len, left, offset = 0;
42         int ret = 0;
43         sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44
45         pr_debug("reading %llu", (uint64_t) bucket);
46
47         while (offset < ca->sb.bucket_size) {
48 reread:         left = ca->sb.bucket_size - offset;
49                 len = min_t(unsigned, left, PAGE_SECTORS * 8);
50
51                 bio_reset(bio);
52                 bio->bi_sector  = bucket + offset;
53                 bio->bi_bdev    = ca->bdev;
54                 bio->bi_rw      = READ;
55                 bio->bi_size    = len << 9;
56
57                 bio->bi_end_io  = journal_read_endio;
58                 bio->bi_private = &op->cl;
59                 bch_bio_map(bio, data);
60
61                 closure_bio_submit(bio, &op->cl, ca);
62                 closure_sync(&op->cl);
63
64                 /* This function could be simpler now since we no longer write
65                  * journal entries that overlap bucket boundaries; this means
66                  * the start of a bucket will always have a valid journal entry
67                  * if it has any journal entries at all.
68                  */
69
70                 j = data;
71                 while (len) {
72                         struct list_head *where;
73                         size_t blocks, bytes = set_bytes(j);
74
75                         if (j->magic != jset_magic(ca->set))
76                                 return ret;
77
78                         if (bytes > left << 9)
79                                 return ret;
80
81                         if (bytes > len << 9)
82                                 goto reread;
83
84                         if (j->csum != csum_set(j))
85                                 return ret;
86
87                         blocks = set_blocks(j, ca->set);
88
89                         while (!list_empty(list)) {
90                                 i = list_first_entry(list,
91                                         struct journal_replay, list);
92                                 if (i->j.seq >= j->last_seq)
93                                         break;
94                                 list_del(&i->list);
95                                 kfree(i);
96                         }
97
98                         list_for_each_entry_reverse(i, list, list) {
99                                 if (j->seq == i->j.seq)
100                                         goto next_set;
101
102                                 if (j->seq < i->j.last_seq)
103                                         goto next_set;
104
105                                 if (j->seq > i->j.seq) {
106                                         where = &i->list;
107                                         goto add;
108                                 }
109                         }
110
111                         where = list;
112 add:
113                         i = kmalloc(offsetof(struct journal_replay, j) +
114                                     bytes, GFP_KERNEL);
115                         if (!i)
116                                 return -ENOMEM;
117                         memcpy(&i->j, j, bytes);
118                         list_add(&i->list, where);
119                         ret = 1;
120
121                         ja->seq[bucket_index] = j->seq;
122 next_set:
123                         offset  += blocks * ca->sb.block_size;
124                         len     -= blocks * ca->sb.block_size;
125                         j = ((void *) j) + blocks * block_bytes(ca);
126                 }
127         }
128
129         return ret;
130 }
131
132 int bch_journal_read(struct cache_set *c, struct list_head *list,
133                         struct btree_op *op)
134 {
135 #define read_bucket(b)                                                  \
136         ({                                                              \
137                 int ret = journal_read_bucket(ca, list, op, b);         \
138                 __set_bit(b, bitmap);                                   \
139                 if (ret < 0)                                            \
140                         return ret;                                     \
141                 ret;                                                    \
142         })
143
144         struct cache *ca;
145         unsigned iter;
146
147         for_each_cache(ca, c, iter) {
148                 struct journal_device *ja = &ca->journal;
149                 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
150                 unsigned i, l, r, m;
151                 uint64_t seq;
152
153                 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
154                 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
155
156                 /*
157                  * Read journal buckets ordered by golden ratio hash to quickly
158                  * find a sequence of buckets with valid journal entries
159                  */
160                 for (i = 0; i < ca->sb.njournal_buckets; i++) {
161                         l = (i * 2654435769U) % ca->sb.njournal_buckets;
162
163                         if (test_bit(l, bitmap))
164                                 break;
165
166                         if (read_bucket(l))
167                                 goto bsearch;
168                 }
169
170                 /*
171                  * If that fails, check all the buckets we haven't checked
172                  * already
173                  */
174                 pr_debug("falling back to linear search");
175
176                 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
177                      l < ca->sb.njournal_buckets;
178                      l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
179                         if (read_bucket(l))
180                                 goto bsearch;
181
182                 if (list_empty(list))
183                         continue;
184 bsearch:
185                 /* Binary search */
186                 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
187                 pr_debug("starting binary search, l %u r %u", l, r);
188
189                 while (l + 1 < r) {
190                         seq = list_entry(list->prev, struct journal_replay,
191                                          list)->j.seq;
192
193                         m = (l + r) >> 1;
194                         read_bucket(m);
195
196                         if (seq != list_entry(list->prev, struct journal_replay,
197                                               list)->j.seq)
198                                 l = m;
199                         else
200                                 r = m;
201                 }
202
203                 /*
204                  * Read buckets in reverse order until we stop finding more
205                  * journal entries
206                  */
207                 pr_debug("finishing up: m %u njournal_buckets %u",
208                          m, ca->sb.njournal_buckets);
209                 l = m;
210
211                 while (1) {
212                         if (!l--)
213                                 l = ca->sb.njournal_buckets - 1;
214
215                         if (l == m)
216                                 break;
217
218                         if (test_bit(l, bitmap))
219                                 continue;
220
221                         if (!read_bucket(l))
222                                 break;
223                 }
224
225                 seq = 0;
226
227                 for (i = 0; i < ca->sb.njournal_buckets; i++)
228                         if (ja->seq[i] > seq) {
229                                 seq = ja->seq[i];
230                                 ja->cur_idx = ja->discard_idx =
231                                         ja->last_idx = i;
232
233                         }
234         }
235
236         if (!list_empty(list))
237                 c->journal.seq = list_entry(list->prev,
238                                             struct journal_replay,
239                                             list)->j.seq;
240
241         return 0;
242 #undef read_bucket
243 }
244
245 void bch_journal_mark(struct cache_set *c, struct list_head *list)
246 {
247         atomic_t p = { 0 };
248         struct bkey *k;
249         struct journal_replay *i;
250         struct journal *j = &c->journal;
251         uint64_t last = j->seq;
252
253         /*
254          * journal.pin should never fill up - we never write a journal
255          * entry when it would fill up. But if for some reason it does, we
256          * iterate over the list in reverse order so that we can just skip that
257          * refcount instead of bugging.
258          */
259
260         list_for_each_entry_reverse(i, list, list) {
261                 BUG_ON(last < i->j.seq);
262                 i->pin = NULL;
263
264                 while (last-- != i->j.seq)
265                         if (fifo_free(&j->pin) > 1) {
266                                 fifo_push_front(&j->pin, p);
267                                 atomic_set(&fifo_front(&j->pin), 0);
268                         }
269
270                 if (fifo_free(&j->pin) > 1) {
271                         fifo_push_front(&j->pin, p);
272                         i->pin = &fifo_front(&j->pin);
273                         atomic_set(i->pin, 1);
274                 }
275
276                 for (k = i->j.start;
277                      k < end(&i->j);
278                      k = bkey_next(k)) {
279                         unsigned j;
280
281                         for (j = 0; j < KEY_PTRS(k); j++) {
282                                 struct bucket *g = PTR_BUCKET(c, k, j);
283                                 atomic_inc(&g->pin);
284
285                                 if (g->prio == BTREE_PRIO &&
286                                     !ptr_stale(c, k, j))
287                                         g->prio = INITIAL_PRIO;
288                         }
289
290                         __bch_btree_mark_key(c, 0, k);
291                 }
292         }
293 }
294
295 int bch_journal_replay(struct cache_set *s, struct list_head *list,
296                           struct btree_op *op)
297 {
298         int ret = 0, keys = 0, entries = 0;
299         struct bkey *k;
300         struct journal_replay *i =
301                 list_entry(list->prev, struct journal_replay, list);
302
303         uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
304         struct keylist keylist;
305
306         bch_keylist_init(&keylist);
307
308         list_for_each_entry(i, list, list) {
309                 BUG_ON(i->pin && atomic_read(i->pin) != 1);
310
311                 cache_set_err_on(n != i->j.seq, s,
312 "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
313                                  n, i->j.seq - 1, start, end);
314
315                 for (k = i->j.start;
316                      k < end(&i->j);
317                      k = bkey_next(k)) {
318                         trace_bcache_journal_replay_key(k);
319
320                         bkey_copy(keylist.top, k);
321                         bch_keylist_push(&keylist);
322
323                         op->journal = i->pin;
324
325                         ret = bch_btree_insert(op, s, &keylist);
326                         if (ret)
327                                 goto err;
328
329                         BUG_ON(!bch_keylist_empty(&keylist));
330                         keys++;
331
332                         cond_resched();
333                 }
334
335                 if (i->pin)
336                         atomic_dec(i->pin);
337                 n = i->j.seq + 1;
338                 entries++;
339         }
340
341         pr_info("journal replay done, %i keys in %i entries, seq %llu",
342                 keys, entries, end);
343
344         while (!list_empty(list)) {
345                 i = list_first_entry(list, struct journal_replay, list);
346                 list_del(&i->list);
347                 kfree(i);
348         }
349 err:
350         closure_sync(&op->cl);
351         return ret;
352 }
353
354 /* Journalling */
355
356 static void btree_flush_write(struct cache_set *c)
357 {
358         /*
359          * Try to find the btree node with that references the oldest journal
360          * entry, best is our current candidate and is locked if non NULL:
361          */
362         struct btree *b, *best;
363         unsigned i;
364 retry:
365         best = NULL;
366
367         for_each_cached_btree(b, c, i)
368                 if (btree_current_write(b)->journal) {
369                         if (!best)
370                                 best = b;
371                         else if (journal_pin_cmp(c,
372                                                  btree_current_write(best),
373                                                  btree_current_write(b))) {
374                                 best = b;
375                         }
376                 }
377
378         b = best;
379         if (b) {
380                 rw_lock(true, b, b->level);
381
382                 if (!btree_current_write(b)->journal) {
383                         rw_unlock(true, b);
384                         /* We raced */
385                         goto retry;
386                 }
387
388                 bch_btree_node_write(b, NULL);
389                 rw_unlock(true, b);
390         }
391 }
392
393 #define last_seq(j)     ((j)->seq - fifo_used(&(j)->pin) + 1)
394
395 static void journal_discard_endio(struct bio *bio, int error)
396 {
397         struct journal_device *ja =
398                 container_of(bio, struct journal_device, discard_bio);
399         struct cache *ca = container_of(ja, struct cache, journal);
400
401         atomic_set(&ja->discard_in_flight, DISCARD_DONE);
402
403         closure_wake_up(&ca->set->journal.wait);
404         closure_put(&ca->set->cl);
405 }
406
407 static void journal_discard_work(struct work_struct *work)
408 {
409         struct journal_device *ja =
410                 container_of(work, struct journal_device, discard_work);
411
412         submit_bio(0, &ja->discard_bio);
413 }
414
415 static void do_journal_discard(struct cache *ca)
416 {
417         struct journal_device *ja = &ca->journal;
418         struct bio *bio = &ja->discard_bio;
419
420         if (!ca->discard) {
421                 ja->discard_idx = ja->last_idx;
422                 return;
423         }
424
425         switch (atomic_read(&ja->discard_in_flight)) {
426         case DISCARD_IN_FLIGHT:
427                 return;
428
429         case DISCARD_DONE:
430                 ja->discard_idx = (ja->discard_idx + 1) %
431                         ca->sb.njournal_buckets;
432
433                 atomic_set(&ja->discard_in_flight, DISCARD_READY);
434                 /* fallthrough */
435
436         case DISCARD_READY:
437                 if (ja->discard_idx == ja->last_idx)
438                         return;
439
440                 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
441
442                 bio_init(bio);
443                 bio->bi_sector          = bucket_to_sector(ca->set,
444                                                 ca->sb.d[ja->discard_idx]);
445                 bio->bi_bdev            = ca->bdev;
446                 bio->bi_rw              = REQ_WRITE|REQ_DISCARD;
447                 bio->bi_max_vecs        = 1;
448                 bio->bi_io_vec          = bio->bi_inline_vecs;
449                 bio->bi_size            = bucket_bytes(ca);
450                 bio->bi_end_io          = journal_discard_endio;
451
452                 closure_get(&ca->set->cl);
453                 INIT_WORK(&ja->discard_work, journal_discard_work);
454                 schedule_work(&ja->discard_work);
455         }
456 }
457
458 static void journal_reclaim(struct cache_set *c)
459 {
460         struct bkey *k = &c->journal.key;
461         struct cache *ca;
462         uint64_t last_seq;
463         unsigned iter, n = 0;
464         atomic_t p;
465
466         while (!atomic_read(&fifo_front(&c->journal.pin)))
467                 fifo_pop(&c->journal.pin, p);
468
469         last_seq = last_seq(&c->journal);
470
471         /* Update last_idx */
472
473         for_each_cache(ca, c, iter) {
474                 struct journal_device *ja = &ca->journal;
475
476                 while (ja->last_idx != ja->cur_idx &&
477                        ja->seq[ja->last_idx] < last_seq)
478                         ja->last_idx = (ja->last_idx + 1) %
479                                 ca->sb.njournal_buckets;
480         }
481
482         for_each_cache(ca, c, iter)
483                 do_journal_discard(ca);
484
485         if (c->journal.blocks_free)
486                 goto out;
487
488         /*
489          * Allocate:
490          * XXX: Sort by free journal space
491          */
492
493         for_each_cache(ca, c, iter) {
494                 struct journal_device *ja = &ca->journal;
495                 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
496
497                 /* No space available on this device */
498                 if (next == ja->discard_idx)
499                         continue;
500
501                 ja->cur_idx = next;
502                 k->ptr[n++] = PTR(0,
503                                   bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
504                                   ca->sb.nr_this_dev);
505         }
506
507         bkey_init(k);
508         SET_KEY_PTRS(k, n);
509
510         if (n)
511                 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
512 out:
513         if (!journal_full(&c->journal))
514                 __closure_wake_up(&c->journal.wait);
515 }
516
517 void bch_journal_next(struct journal *j)
518 {
519         atomic_t p = { 1 };
520
521         j->cur = (j->cur == j->w)
522                 ? &j->w[1]
523                 : &j->w[0];
524
525         /*
526          * The fifo_push() needs to happen at the same time as j->seq is
527          * incremented for last_seq() to be calculated correctly
528          */
529         BUG_ON(!fifo_push(&j->pin, p));
530         atomic_set(&fifo_back(&j->pin), 1);
531
532         j->cur->data->seq       = ++j->seq;
533         j->cur->need_write      = false;
534         j->cur->data->keys      = 0;
535
536         if (fifo_full(&j->pin))
537                 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
538 }
539
540 static void journal_write_endio(struct bio *bio, int error)
541 {
542         struct journal_write *w = bio->bi_private;
543
544         cache_set_err_on(error, w->c, "journal io error");
545         closure_put(&w->c->journal.io);
546 }
547
548 static void journal_write(struct closure *);
549
550 static void journal_write_done(struct closure *cl)
551 {
552         struct journal *j = container_of(cl, struct journal, io);
553         struct journal_write *w = (j->cur == j->w)
554                 ? &j->w[1]
555                 : &j->w[0];
556
557         __closure_wake_up(&w->wait);
558         continue_at_nobarrier(cl, journal_write, system_wq);
559 }
560
561 static void journal_write_unlocked(struct closure *cl)
562         __releases(c->journal.lock)
563 {
564         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
565         struct cache *ca;
566         struct journal_write *w = c->journal.cur;
567         struct bkey *k = &c->journal.key;
568         unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
569
570         struct bio *bio;
571         struct bio_list list;
572         bio_list_init(&list);
573
574         if (!w->need_write) {
575                 /*
576                  * XXX: have to unlock closure before we unlock journal lock,
577                  * else we race with bch_journal(). But this way we race
578                  * against cache set unregister. Doh.
579                  */
580                 set_closure_fn(cl, NULL, NULL);
581                 closure_sub(cl, CLOSURE_RUNNING + 1);
582                 spin_unlock(&c->journal.lock);
583                 return;
584         } else if (journal_full(&c->journal)) {
585                 journal_reclaim(c);
586                 spin_unlock(&c->journal.lock);
587
588                 btree_flush_write(c);
589                 continue_at(cl, journal_write, system_wq);
590         }
591
592         c->journal.blocks_free -= set_blocks(w->data, c);
593
594         w->data->btree_level = c->root->level;
595
596         bkey_copy(&w->data->btree_root, &c->root->key);
597         bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
598
599         for_each_cache(ca, c, i)
600                 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
601
602         w->data->magic          = jset_magic(c);
603         w->data->version        = BCACHE_JSET_VERSION;
604         w->data->last_seq       = last_seq(&c->journal);
605         w->data->csum           = csum_set(w->data);
606
607         for (i = 0; i < KEY_PTRS(k); i++) {
608                 ca = PTR_CACHE(c, k, i);
609                 bio = &ca->journal.bio;
610
611                 atomic_long_add(sectors, &ca->meta_sectors_written);
612
613                 bio_reset(bio);
614                 bio->bi_sector  = PTR_OFFSET(k, i);
615                 bio->bi_bdev    = ca->bdev;
616                 bio->bi_rw      = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
617                 bio->bi_size    = sectors << 9;
618
619                 bio->bi_end_io  = journal_write_endio;
620                 bio->bi_private = w;
621                 bch_bio_map(bio, w->data);
622
623                 trace_bcache_journal_write(bio);
624                 bio_list_add(&list, bio);
625
626                 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
627
628                 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
629         }
630
631         atomic_dec_bug(&fifo_back(&c->journal.pin));
632         bch_journal_next(&c->journal);
633         journal_reclaim(c);
634
635         spin_unlock(&c->journal.lock);
636
637         while ((bio = bio_list_pop(&list)))
638                 closure_bio_submit(bio, cl, c->cache[0]);
639
640         continue_at(cl, journal_write_done, NULL);
641 }
642
643 static void journal_write(struct closure *cl)
644 {
645         struct cache_set *c = container_of(cl, struct cache_set, journal.io);
646
647         spin_lock(&c->journal.lock);
648         journal_write_unlocked(cl);
649 }
650
651 static void journal_try_write(struct cache_set *c)
652         __releases(c->journal.lock)
653 {
654         struct closure *cl = &c->journal.io;
655         struct journal_write *w = c->journal.cur;
656
657         w->need_write = true;
658
659         if (closure_trylock(cl, &c->cl))
660                 journal_write_unlocked(cl);
661         else
662                 spin_unlock(&c->journal.lock);
663 }
664
665 static struct journal_write *journal_wait_for_write(struct cache_set *c,
666                                                     unsigned nkeys)
667 {
668         size_t sectors;
669         struct closure cl;
670
671         closure_init_stack(&cl);
672
673         spin_lock(&c->journal.lock);
674
675         while (1) {
676                 struct journal_write *w = c->journal.cur;
677
678                 sectors = __set_blocks(w->data, w->data->keys + nkeys,
679                                        c) * c->sb.block_size;
680
681                 if (sectors <= min_t(size_t,
682                                      c->journal.blocks_free * c->sb.block_size,
683                                      PAGE_SECTORS << JSET_BITS))
684                         return w;
685
686                 /* XXX: tracepoint */
687                 if (!journal_full(&c->journal)) {
688                         trace_bcache_journal_entry_full(c);
689
690                         /*
691                          * XXX: If we were inserting so many keys that they
692                          * won't fit in an _empty_ journal write, we'll
693                          * deadlock. For now, handle this in
694                          * bch_keylist_realloc() - but something to think about.
695                          */
696                         BUG_ON(!w->data->keys);
697
698                         closure_wait(&w->wait, &cl);
699                         journal_try_write(c); /* unlocks */
700                 } else {
701                         trace_bcache_journal_full(c);
702
703                         closure_wait(&c->journal.wait, &cl);
704                         journal_reclaim(c);
705                         spin_unlock(&c->journal.lock);
706
707                         btree_flush_write(c);
708                 }
709
710                 closure_sync(&cl);
711                 spin_lock(&c->journal.lock);
712         }
713 }
714
715 static void journal_write_work(struct work_struct *work)
716 {
717         struct cache_set *c = container_of(to_delayed_work(work),
718                                            struct cache_set,
719                                            journal.work);
720         spin_lock(&c->journal.lock);
721         journal_try_write(c);
722 }
723
724 /*
725  * Entry point to the journalling code - bio_insert() and btree_invalidate()
726  * pass bch_journal() a list of keys to be journalled, and then
727  * bch_journal() hands those same keys off to btree_insert_async()
728  */
729
730 atomic_t *bch_journal(struct cache_set *c,
731                       struct keylist *keys,
732                       struct closure *parent)
733 {
734         struct journal_write *w;
735         atomic_t *ret;
736
737         if (!CACHE_SYNC(&c->sb))
738                 return NULL;
739
740         w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
741
742         memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
743         w->data->keys += bch_keylist_nkeys(keys);
744
745         ret = &fifo_back(&c->journal.pin);
746         atomic_inc(ret);
747
748         if (parent) {
749                 closure_wait(&w->wait, parent);
750                 journal_try_write(c);
751         } else if (!w->need_write) {
752                 schedule_delayed_work(&c->journal.work,
753                                       msecs_to_jiffies(c->journal_delay_ms));
754                 spin_unlock(&c->journal.lock);
755         } else {
756                 spin_unlock(&c->journal.lock);
757         }
758
759
760         return ret;
761 }
762
763 void bch_journal_meta(struct cache_set *c, struct closure *cl)
764 {
765         struct keylist keys;
766         atomic_t *ref;
767
768         bch_keylist_init(&keys);
769
770         ref = bch_journal(c, &keys, cl);
771         if (ref)
772                 atomic_dec_bug(ref);
773 }
774
775 void bch_journal_free(struct cache_set *c)
776 {
777         free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
778         free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
779         free_fifo(&c->journal.pin);
780 }
781
782 int bch_journal_alloc(struct cache_set *c)
783 {
784         struct journal *j = &c->journal;
785
786         closure_init_unlocked(&j->io);
787         spin_lock_init(&j->lock);
788         INIT_DELAYED_WORK(&j->work, journal_write_work);
789
790         c->journal_delay_ms = 100;
791
792         j->w[0].c = c;
793         j->w[1].c = c;
794
795         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
796             !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
797             !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
798                 return -ENOMEM;
799
800         return 0;
801 }