staging: zcache: optimize zcache_do_preload
[cascardo/linux.git] / drivers / staging / zcache / zcache-main.c
1 /*
2  * zcache.c
3  *
4  * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
5  * Copyright (c) 2010,2011, Nitin Gupta
6  *
7  * Zcache provides an in-kernel "host implementation" for transcendent memory
8  * and, thus indirectly, for cleancache and frontswap.  Zcache includes two
9  * page-accessible memory [1] interfaces, both utilizing the crypto compression
10  * API:
11  * 1) "compression buddies" ("zbud") is used for ephemeral pages
12  * 2) zsmalloc is used for persistent pages.
13  * Xvmalloc (based on the TLSF allocator) has very low fragmentation
14  * so maximizes space efficiency, while zbud allows pairs (and potentially,
15  * in the future, more than a pair of) compressed pages to be closely linked
16  * so that reclaiming can be done via the kernel's physical-page-oriented
17  * "shrinker" interface.
18  *
19  * [1] For a definition of page-accessible memory (aka PAM), see:
20  *   http://marc.info/?l=linux-mm&m=127811271605009
21  */
22
23 #include <linux/module.h>
24 #include <linux/cpu.h>
25 #include <linux/highmem.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/math64.h>
32 #include <linux/crypto.h>
33 #include <linux/string.h>
34 #include <linux/idr.h>
35 #include "tmem.h"
36
37 #include "../zsmalloc/zsmalloc.h"
38
39 #ifdef CONFIG_CLEANCACHE
40 #include <linux/cleancache.h>
41 #endif
42 #ifdef CONFIG_FRONTSWAP
43 #include <linux/frontswap.h>
44 #endif
45
46 #if 0
47 /* this is more aggressive but may cause other problems? */
48 #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
49 #else
50 #define ZCACHE_GFP_MASK \
51         (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
52 #endif
53
54 #define MAX_CLIENTS 16
55 #define LOCAL_CLIENT ((uint16_t)-1)
56
57 MODULE_LICENSE("GPL");
58
59 struct zcache_client {
60         struct idr tmem_pools;
61         struct zs_pool *zspool;
62         bool allocated;
63         atomic_t refcount;
64 };
65
66 static struct zcache_client zcache_host;
67 static struct zcache_client zcache_clients[MAX_CLIENTS];
68
69 static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
70 {
71         BUG_ON(cli == NULL);
72         if (cli == &zcache_host)
73                 return LOCAL_CLIENT;
74         return cli - &zcache_clients[0];
75 }
76
77 static inline bool is_local_client(struct zcache_client *cli)
78 {
79         return cli == &zcache_host;
80 }
81
82 /* crypto API for zcache  */
83 #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
84 static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
85 static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
86
87 enum comp_op {
88         ZCACHE_COMPOP_COMPRESS,
89         ZCACHE_COMPOP_DECOMPRESS
90 };
91
92 static inline int zcache_comp_op(enum comp_op op,
93                                 const u8 *src, unsigned int slen,
94                                 u8 *dst, unsigned int *dlen)
95 {
96         struct crypto_comp *tfm;
97         int ret;
98
99         BUG_ON(!zcache_comp_pcpu_tfms);
100         tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
101         BUG_ON(!tfm);
102         switch (op) {
103         case ZCACHE_COMPOP_COMPRESS:
104                 ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
105                 break;
106         case ZCACHE_COMPOP_DECOMPRESS:
107                 ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
108                 break;
109         default:
110                 ret = -EINVAL;
111         }
112         put_cpu();
113         return ret;
114 }
115
116 /**********
117  * Compression buddies ("zbud") provides for packing two (or, possibly
118  * in the future, more) compressed ephemeral pages into a single "raw"
119  * (physical) page and tracking them with data structures so that
120  * the raw pages can be easily reclaimed.
121  *
122  * A zbud page ("zbpg") is an aligned page containing a list_head,
123  * a lock, and two "zbud headers".  The remainder of the physical
124  * page is divided up into aligned 64-byte "chunks" which contain
125  * the compressed data for zero, one, or two zbuds.  Each zbpg
126  * resides on: (1) an "unused list" if it has no zbuds; (2) a
127  * "buddied" list if it is fully populated  with two zbuds; or
128  * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
129  * the one unbuddied zbud uses.  The data inside a zbpg cannot be
130  * read or written unless the zbpg's lock is held.
131  */
132
133 #define ZBH_SENTINEL  0x43214321
134 #define ZBPG_SENTINEL  0xdeadbeef
135
136 #define ZBUD_MAX_BUDS 2
137
138 struct zbud_hdr {
139         uint16_t client_id;
140         uint16_t pool_id;
141         struct tmem_oid oid;
142         uint32_t index;
143         uint16_t size; /* compressed size in bytes, zero means unused */
144         DECL_SENTINEL
145 };
146
147 struct zbud_page {
148         struct list_head bud_list;
149         spinlock_t lock;
150         struct zbud_hdr buddy[ZBUD_MAX_BUDS];
151         DECL_SENTINEL
152         /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
153 };
154
155 #define CHUNK_SHIFT     6
156 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
157 #define CHUNK_MASK      (~(CHUNK_SIZE-1))
158 #define NCHUNKS         (((PAGE_SIZE - sizeof(struct zbud_page)) & \
159                                 CHUNK_MASK) >> CHUNK_SHIFT)
160 #define MAX_CHUNK       (NCHUNKS-1)
161
162 static struct {
163         struct list_head list;
164         unsigned count;
165 } zbud_unbuddied[NCHUNKS];
166 /* list N contains pages with N chunks USED and NCHUNKS-N unused */
167 /* element 0 is never used but optimizing that isn't worth it */
168 static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
169
170 struct list_head zbud_buddied_list;
171 static unsigned long zcache_zbud_buddied_count;
172
173 /* protects the buddied list and all unbuddied lists */
174 static DEFINE_SPINLOCK(zbud_budlists_spinlock);
175
176 static LIST_HEAD(zbpg_unused_list);
177 static unsigned long zcache_zbpg_unused_list_count;
178
179 /* protects the unused page list */
180 static DEFINE_SPINLOCK(zbpg_unused_list_spinlock);
181
182 static atomic_t zcache_zbud_curr_raw_pages;
183 static atomic_t zcache_zbud_curr_zpages;
184 static unsigned long zcache_zbud_curr_zbytes;
185 static unsigned long zcache_zbud_cumul_zpages;
186 static unsigned long zcache_zbud_cumul_zbytes;
187 static unsigned long zcache_compress_poor;
188 static unsigned long zcache_mean_compress_poor;
189
190 /* forward references */
191 static void *zcache_get_free_page(void);
192 static void zcache_free_page(void *p);
193
194 /*
195  * zbud helper functions
196  */
197
198 static inline unsigned zbud_max_buddy_size(void)
199 {
200         return MAX_CHUNK << CHUNK_SHIFT;
201 }
202
203 static inline unsigned zbud_size_to_chunks(unsigned size)
204 {
205         BUG_ON(size == 0 || size > zbud_max_buddy_size());
206         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
207 }
208
209 static inline int zbud_budnum(struct zbud_hdr *zh)
210 {
211         unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
212         struct zbud_page *zbpg = NULL;
213         unsigned budnum = -1U;
214         int i;
215
216         for (i = 0; i < ZBUD_MAX_BUDS; i++)
217                 if (offset == offsetof(typeof(*zbpg), buddy[i])) {
218                         budnum = i;
219                         break;
220                 }
221         BUG_ON(budnum == -1U);
222         return budnum;
223 }
224
225 static char *zbud_data(struct zbud_hdr *zh, unsigned size)
226 {
227         struct zbud_page *zbpg;
228         char *p;
229         unsigned budnum;
230
231         ASSERT_SENTINEL(zh, ZBH);
232         budnum = zbud_budnum(zh);
233         BUG_ON(size == 0 || size > zbud_max_buddy_size());
234         zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
235         ASSERT_SPINLOCK(&zbpg->lock);
236         p = (char *)zbpg;
237         if (budnum == 0)
238                 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
239                                                         CHUNK_MASK);
240         else if (budnum == 1)
241                 p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
242         return p;
243 }
244
245 /*
246  * zbud raw page management
247  */
248
249 static struct zbud_page *zbud_alloc_raw_page(void)
250 {
251         struct zbud_page *zbpg = NULL;
252         struct zbud_hdr *zh0, *zh1;
253         bool recycled = 0;
254
255         /* if any pages on the zbpg list, use one */
256         spin_lock(&zbpg_unused_list_spinlock);
257         if (!list_empty(&zbpg_unused_list)) {
258                 zbpg = list_first_entry(&zbpg_unused_list,
259                                 struct zbud_page, bud_list);
260                 list_del_init(&zbpg->bud_list);
261                 zcache_zbpg_unused_list_count--;
262                 recycled = 1;
263         }
264         spin_unlock(&zbpg_unused_list_spinlock);
265         if (zbpg == NULL)
266                 /* none on zbpg list, try to get a kernel page */
267                 zbpg = zcache_get_free_page();
268         if (likely(zbpg != NULL)) {
269                 INIT_LIST_HEAD(&zbpg->bud_list);
270                 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
271                 spin_lock_init(&zbpg->lock);
272                 if (recycled) {
273                         ASSERT_INVERTED_SENTINEL(zbpg, ZBPG);
274                         SET_SENTINEL(zbpg, ZBPG);
275                         BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
276                         BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
277                 } else {
278                         atomic_inc(&zcache_zbud_curr_raw_pages);
279                         INIT_LIST_HEAD(&zbpg->bud_list);
280                         SET_SENTINEL(zbpg, ZBPG);
281                         zh0->size = 0; zh1->size = 0;
282                         tmem_oid_set_invalid(&zh0->oid);
283                         tmem_oid_set_invalid(&zh1->oid);
284                 }
285         }
286         return zbpg;
287 }
288
289 static void zbud_free_raw_page(struct zbud_page *zbpg)
290 {
291         struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
292
293         ASSERT_SENTINEL(zbpg, ZBPG);
294         BUG_ON(!list_empty(&zbpg->bud_list));
295         ASSERT_SPINLOCK(&zbpg->lock);
296         BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
297         BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
298         INVERT_SENTINEL(zbpg, ZBPG);
299         spin_unlock(&zbpg->lock);
300         spin_lock(&zbpg_unused_list_spinlock);
301         list_add(&zbpg->bud_list, &zbpg_unused_list);
302         zcache_zbpg_unused_list_count++;
303         spin_unlock(&zbpg_unused_list_spinlock);
304 }
305
306 /*
307  * core zbud handling routines
308  */
309
310 static unsigned zbud_free(struct zbud_hdr *zh)
311 {
312         unsigned size;
313
314         ASSERT_SENTINEL(zh, ZBH);
315         BUG_ON(!tmem_oid_valid(&zh->oid));
316         size = zh->size;
317         BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
318         zh->size = 0;
319         tmem_oid_set_invalid(&zh->oid);
320         INVERT_SENTINEL(zh, ZBH);
321         zcache_zbud_curr_zbytes -= size;
322         atomic_dec(&zcache_zbud_curr_zpages);
323         return size;
324 }
325
326 static void zbud_free_and_delist(struct zbud_hdr *zh)
327 {
328         unsigned chunks;
329         struct zbud_hdr *zh_other;
330         unsigned budnum = zbud_budnum(zh), size;
331         struct zbud_page *zbpg =
332                 container_of(zh, struct zbud_page, buddy[budnum]);
333
334         spin_lock(&zbud_budlists_spinlock);
335         spin_lock(&zbpg->lock);
336         if (list_empty(&zbpg->bud_list)) {
337                 /* ignore zombie page... see zbud_evict_pages() */
338                 spin_unlock(&zbpg->lock);
339                 spin_unlock(&zbud_budlists_spinlock);
340                 return;
341         }
342         size = zbud_free(zh);
343         ASSERT_SPINLOCK(&zbpg->lock);
344         zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
345         if (zh_other->size == 0) { /* was unbuddied: unlist and free */
346                 chunks = zbud_size_to_chunks(size) ;
347                 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
348                 list_del_init(&zbpg->bud_list);
349                 zbud_unbuddied[chunks].count--;
350                 spin_unlock(&zbud_budlists_spinlock);
351                 zbud_free_raw_page(zbpg);
352         } else { /* was buddied: move remaining buddy to unbuddied list */
353                 chunks = zbud_size_to_chunks(zh_other->size) ;
354                 list_del_init(&zbpg->bud_list);
355                 zcache_zbud_buddied_count--;
356                 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
357                 zbud_unbuddied[chunks].count++;
358                 spin_unlock(&zbud_budlists_spinlock);
359                 spin_unlock(&zbpg->lock);
360         }
361 }
362
363 static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
364                                         struct tmem_oid *oid,
365                                         uint32_t index, struct page *page,
366                                         void *cdata, unsigned size)
367 {
368         struct zbud_hdr *zh0, *zh1, *zh = NULL;
369         struct zbud_page *zbpg = NULL, *ztmp;
370         unsigned nchunks;
371         char *to;
372         int i, found_good_buddy = 0;
373
374         nchunks = zbud_size_to_chunks(size) ;
375         for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
376                 spin_lock(&zbud_budlists_spinlock);
377                 if (!list_empty(&zbud_unbuddied[i].list)) {
378                         list_for_each_entry_safe(zbpg, ztmp,
379                                     &zbud_unbuddied[i].list, bud_list) {
380                                 if (spin_trylock(&zbpg->lock)) {
381                                         found_good_buddy = i;
382                                         goto found_unbuddied;
383                                 }
384                         }
385                 }
386                 spin_unlock(&zbud_budlists_spinlock);
387         }
388         /* didn't find a good buddy, try allocating a new page */
389         zbpg = zbud_alloc_raw_page();
390         if (unlikely(zbpg == NULL))
391                 goto out;
392         /* ok, have a page, now compress the data before taking locks */
393         spin_lock(&zbud_budlists_spinlock);
394         spin_lock(&zbpg->lock);
395         list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
396         zbud_unbuddied[nchunks].count++;
397         zh = &zbpg->buddy[0];
398         goto init_zh;
399
400 found_unbuddied:
401         ASSERT_SPINLOCK(&zbpg->lock);
402         zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
403         BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
404         if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
405                 ASSERT_SENTINEL(zh0, ZBH);
406                 zh = zh1;
407         } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
408                 ASSERT_SENTINEL(zh1, ZBH);
409                 zh = zh0;
410         } else
411                 BUG();
412         list_del_init(&zbpg->bud_list);
413         zbud_unbuddied[found_good_buddy].count--;
414         list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
415         zcache_zbud_buddied_count++;
416
417 init_zh:
418         SET_SENTINEL(zh, ZBH);
419         zh->size = size;
420         zh->index = index;
421         zh->oid = *oid;
422         zh->pool_id = pool_id;
423         zh->client_id = client_id;
424         to = zbud_data(zh, size);
425         memcpy(to, cdata, size);
426         spin_unlock(&zbpg->lock);
427         spin_unlock(&zbud_budlists_spinlock);
428
429         zbud_cumul_chunk_counts[nchunks]++;
430         atomic_inc(&zcache_zbud_curr_zpages);
431         zcache_zbud_cumul_zpages++;
432         zcache_zbud_curr_zbytes += size;
433         zcache_zbud_cumul_zbytes += size;
434 out:
435         return zh;
436 }
437
438 static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
439 {
440         struct zbud_page *zbpg;
441         unsigned budnum = zbud_budnum(zh);
442         unsigned int out_len = PAGE_SIZE;
443         char *to_va, *from_va;
444         unsigned size;
445         int ret = 0;
446
447         zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
448         spin_lock(&zbpg->lock);
449         if (list_empty(&zbpg->bud_list)) {
450                 /* ignore zombie page... see zbud_evict_pages() */
451                 ret = -EINVAL;
452                 goto out;
453         }
454         ASSERT_SENTINEL(zh, ZBH);
455         BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
456         to_va = kmap_atomic(page);
457         size = zh->size;
458         from_va = zbud_data(zh, size);
459         ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
460                                 to_va, &out_len);
461         BUG_ON(ret);
462         BUG_ON(out_len != PAGE_SIZE);
463         kunmap_atomic(to_va);
464 out:
465         spin_unlock(&zbpg->lock);
466         return ret;
467 }
468
469 /*
470  * The following routines handle shrinking of ephemeral pages by evicting
471  * pages "least valuable" first.
472  */
473
474 static unsigned long zcache_evicted_raw_pages;
475 static unsigned long zcache_evicted_buddied_pages;
476 static unsigned long zcache_evicted_unbuddied_pages;
477
478 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
479                                                 uint16_t poolid);
480 static void zcache_put_pool(struct tmem_pool *pool);
481
482 /*
483  * Flush and free all zbuds in a zbpg, then free the pageframe
484  */
485 static void zbud_evict_zbpg(struct zbud_page *zbpg)
486 {
487         struct zbud_hdr *zh;
488         int i, j;
489         uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
490         uint32_t index[ZBUD_MAX_BUDS];
491         struct tmem_oid oid[ZBUD_MAX_BUDS];
492         struct tmem_pool *pool;
493
494         ASSERT_SPINLOCK(&zbpg->lock);
495         BUG_ON(!list_empty(&zbpg->bud_list));
496         for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
497                 zh = &zbpg->buddy[i];
498                 if (zh->size) {
499                         client_id[j] = zh->client_id;
500                         pool_id[j] = zh->pool_id;
501                         oid[j] = zh->oid;
502                         index[j] = zh->index;
503                         j++;
504                         zbud_free(zh);
505                 }
506         }
507         spin_unlock(&zbpg->lock);
508         for (i = 0; i < j; i++) {
509                 pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
510                 if (pool != NULL) {
511                         tmem_flush_page(pool, &oid[i], index[i]);
512                         zcache_put_pool(pool);
513                 }
514         }
515         ASSERT_SENTINEL(zbpg, ZBPG);
516         spin_lock(&zbpg->lock);
517         zbud_free_raw_page(zbpg);
518 }
519
520 /*
521  * Free nr pages.  This code is funky because we want to hold the locks
522  * protecting various lists for as short a time as possible, and in some
523  * circumstances the list may change asynchronously when the list lock is
524  * not held.  In some cases we also trylock not only to avoid waiting on a
525  * page in use by another cpu, but also to avoid potential deadlock due to
526  * lock inversion.
527  */
528 static void zbud_evict_pages(int nr)
529 {
530         struct zbud_page *zbpg;
531         int i;
532
533         /* first try freeing any pages on unused list */
534 retry_unused_list:
535         spin_lock_bh(&zbpg_unused_list_spinlock);
536         if (!list_empty(&zbpg_unused_list)) {
537                 /* can't walk list here, since it may change when unlocked */
538                 zbpg = list_first_entry(&zbpg_unused_list,
539                                 struct zbud_page, bud_list);
540                 list_del_init(&zbpg->bud_list);
541                 zcache_zbpg_unused_list_count--;
542                 atomic_dec(&zcache_zbud_curr_raw_pages);
543                 spin_unlock_bh(&zbpg_unused_list_spinlock);
544                 zcache_free_page(zbpg);
545                 zcache_evicted_raw_pages++;
546                 if (--nr <= 0)
547                         goto out;
548                 goto retry_unused_list;
549         }
550         spin_unlock_bh(&zbpg_unused_list_spinlock);
551
552         /* now try freeing unbuddied pages, starting with least space avail */
553         for (i = 0; i < MAX_CHUNK; i++) {
554 retry_unbud_list_i:
555                 spin_lock_bh(&zbud_budlists_spinlock);
556                 if (list_empty(&zbud_unbuddied[i].list)) {
557                         spin_unlock_bh(&zbud_budlists_spinlock);
558                         continue;
559                 }
560                 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
561                         if (unlikely(!spin_trylock(&zbpg->lock)))
562                                 continue;
563                         list_del_init(&zbpg->bud_list);
564                         zbud_unbuddied[i].count--;
565                         spin_unlock(&zbud_budlists_spinlock);
566                         zcache_evicted_unbuddied_pages++;
567                         /* want budlists unlocked when doing zbpg eviction */
568                         zbud_evict_zbpg(zbpg);
569                         local_bh_enable();
570                         if (--nr <= 0)
571                                 goto out;
572                         goto retry_unbud_list_i;
573                 }
574                 spin_unlock_bh(&zbud_budlists_spinlock);
575         }
576
577         /* as a last resort, free buddied pages */
578 retry_bud_list:
579         spin_lock_bh(&zbud_budlists_spinlock);
580         if (list_empty(&zbud_buddied_list)) {
581                 spin_unlock_bh(&zbud_budlists_spinlock);
582                 goto out;
583         }
584         list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
585                 if (unlikely(!spin_trylock(&zbpg->lock)))
586                         continue;
587                 list_del_init(&zbpg->bud_list);
588                 zcache_zbud_buddied_count--;
589                 spin_unlock(&zbud_budlists_spinlock);
590                 zcache_evicted_buddied_pages++;
591                 /* want budlists unlocked when doing zbpg eviction */
592                 zbud_evict_zbpg(zbpg);
593                 local_bh_enable();
594                 if (--nr <= 0)
595                         goto out;
596                 goto retry_bud_list;
597         }
598         spin_unlock_bh(&zbud_budlists_spinlock);
599 out:
600         return;
601 }
602
603 static void __init zbud_init(void)
604 {
605         int i;
606
607         INIT_LIST_HEAD(&zbud_buddied_list);
608
609         for (i = 0; i < NCHUNKS; i++)
610                 INIT_LIST_HEAD(&zbud_unbuddied[i].list);
611 }
612
613 #ifdef CONFIG_SYSFS
614 /*
615  * These sysfs routines show a nice distribution of how many zbpg's are
616  * currently (and have ever been placed) in each unbuddied list.  It's fun
617  * to watch but can probably go away before final merge.
618  */
619 static int zbud_show_unbuddied_list_counts(char *buf)
620 {
621         int i;
622         char *p = buf;
623
624         for (i = 0; i < NCHUNKS; i++)
625                 p += sprintf(p, "%u ", zbud_unbuddied[i].count);
626         return p - buf;
627 }
628
629 static int zbud_show_cumul_chunk_counts(char *buf)
630 {
631         unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
632         unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
633         unsigned long total_chunks_lte_42 = 0;
634         char *p = buf;
635
636         for (i = 0; i < NCHUNKS; i++) {
637                 p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
638                 chunks += zbud_cumul_chunk_counts[i];
639                 total_chunks += zbud_cumul_chunk_counts[i];
640                 sum_total_chunks += i * zbud_cumul_chunk_counts[i];
641                 if (i == 21)
642                         total_chunks_lte_21 = total_chunks;
643                 if (i == 32)
644                         total_chunks_lte_32 = total_chunks;
645                 if (i == 42)
646                         total_chunks_lte_42 = total_chunks;
647         }
648         p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
649                 total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
650                 chunks == 0 ? 0 : sum_total_chunks / chunks);
651         return p - buf;
652 }
653 #endif
654
655 /**********
656  * This "zv" PAM implementation combines the slab-based zsmalloc
657  * with the crypto compression API to maximize the amount of data that can
658  * be packed into a physical page.
659  *
660  * Zv represents a PAM page with the index and object (plus a "size" value
661  * necessary for decompression) immediately preceding the compressed data.
662  */
663
664 #define ZVH_SENTINEL  0x43214321
665
666 struct zv_hdr {
667         uint32_t pool_id;
668         struct tmem_oid oid;
669         uint32_t index;
670         size_t size;
671         DECL_SENTINEL
672 };
673
674 /* rudimentary policy limits */
675 /* total number of persistent pages may not exceed this percentage */
676 static unsigned int zv_page_count_policy_percent = 75;
677 /*
678  * byte count defining poor compression; pages with greater zsize will be
679  * rejected
680  */
681 static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
682 /*
683  * byte count defining poor *mean* compression; pages with greater zsize
684  * will be rejected until sufficient better-compressed pages are accepted
685  * driving the mean below this threshold
686  */
687 static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
688
689 static atomic_t zv_curr_dist_counts[NCHUNKS];
690 static atomic_t zv_cumul_dist_counts[NCHUNKS];
691
692 static unsigned long zv_create(struct zs_pool *pool, uint32_t pool_id,
693                                 struct tmem_oid *oid, uint32_t index,
694                                 void *cdata, unsigned clen)
695 {
696         struct zv_hdr *zv;
697         u32 size = clen + sizeof(struct zv_hdr);
698         int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
699         unsigned long handle = 0;
700
701         BUG_ON(!irqs_disabled());
702         BUG_ON(chunks >= NCHUNKS);
703         handle = zs_malloc(pool, size);
704         if (!handle)
705                 goto out;
706         atomic_inc(&zv_curr_dist_counts[chunks]);
707         atomic_inc(&zv_cumul_dist_counts[chunks]);
708         zv = zs_map_object(pool, handle);
709         zv->index = index;
710         zv->oid = *oid;
711         zv->pool_id = pool_id;
712         zv->size = clen;
713         SET_SENTINEL(zv, ZVH);
714         memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
715         zs_unmap_object(pool, handle);
716 out:
717         return handle;
718 }
719
720 static void zv_free(struct zs_pool *pool, unsigned long handle)
721 {
722         unsigned long flags;
723         struct zv_hdr *zv;
724         uint16_t size;
725         int chunks;
726
727         zv = zs_map_object(pool, handle);
728         ASSERT_SENTINEL(zv, ZVH);
729         size = zv->size + sizeof(struct zv_hdr);
730         INVERT_SENTINEL(zv, ZVH);
731         zs_unmap_object(pool, handle);
732
733         chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
734         BUG_ON(chunks >= NCHUNKS);
735         atomic_dec(&zv_curr_dist_counts[chunks]);
736
737         local_irq_save(flags);
738         zs_free(pool, handle);
739         local_irq_restore(flags);
740 }
741
742 static void zv_decompress(struct page *page, unsigned long handle)
743 {
744         unsigned int clen = PAGE_SIZE;
745         char *to_va;
746         int ret;
747         struct zv_hdr *zv;
748
749         zv = zs_map_object(zcache_host.zspool, handle);
750         BUG_ON(zv->size == 0);
751         ASSERT_SENTINEL(zv, ZVH);
752         to_va = kmap_atomic(page);
753         ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
754                                 zv->size, to_va, &clen);
755         kunmap_atomic(to_va);
756         zs_unmap_object(zcache_host.zspool, handle);
757         BUG_ON(ret);
758         BUG_ON(clen != PAGE_SIZE);
759 }
760
761 #ifdef CONFIG_SYSFS
762 /*
763  * show a distribution of compression stats for zv pages.
764  */
765
766 static int zv_curr_dist_counts_show(char *buf)
767 {
768         unsigned long i, n, chunks = 0, sum_total_chunks = 0;
769         char *p = buf;
770
771         for (i = 0; i < NCHUNKS; i++) {
772                 n = atomic_read(&zv_curr_dist_counts[i]);
773                 p += sprintf(p, "%lu ", n);
774                 chunks += n;
775                 sum_total_chunks += i * n;
776         }
777         p += sprintf(p, "mean:%lu\n",
778                 chunks == 0 ? 0 : sum_total_chunks / chunks);
779         return p - buf;
780 }
781
782 static int zv_cumul_dist_counts_show(char *buf)
783 {
784         unsigned long i, n, chunks = 0, sum_total_chunks = 0;
785         char *p = buf;
786
787         for (i = 0; i < NCHUNKS; i++) {
788                 n = atomic_read(&zv_cumul_dist_counts[i]);
789                 p += sprintf(p, "%lu ", n);
790                 chunks += n;
791                 sum_total_chunks += i * n;
792         }
793         p += sprintf(p, "mean:%lu\n",
794                 chunks == 0 ? 0 : sum_total_chunks / chunks);
795         return p - buf;
796 }
797
798 /*
799  * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
800  * pages that don't compress to less than this value (including metadata
801  * overhead) to be rejected.  We don't allow the value to get too close
802  * to PAGE_SIZE.
803  */
804 static ssize_t zv_max_zsize_show(struct kobject *kobj,
805                                     struct kobj_attribute *attr,
806                                     char *buf)
807 {
808         return sprintf(buf, "%u\n", zv_max_zsize);
809 }
810
811 static ssize_t zv_max_zsize_store(struct kobject *kobj,
812                                     struct kobj_attribute *attr,
813                                     const char *buf, size_t count)
814 {
815         unsigned long val;
816         int err;
817
818         if (!capable(CAP_SYS_ADMIN))
819                 return -EPERM;
820
821         err = kstrtoul(buf, 10, &val);
822         if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
823                 return -EINVAL;
824         zv_max_zsize = val;
825         return count;
826 }
827
828 /*
829  * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
830  * pages that don't compress to less than this value (including metadata
831  * overhead) to be rejected UNLESS the mean compression is also smaller
832  * than this value.  In other words, we are load-balancing-by-zsize the
833  * accepted pages.  Again, we don't allow the value to get too close
834  * to PAGE_SIZE.
835  */
836 static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
837                                     struct kobj_attribute *attr,
838                                     char *buf)
839 {
840         return sprintf(buf, "%u\n", zv_max_mean_zsize);
841 }
842
843 static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
844                                     struct kobj_attribute *attr,
845                                     const char *buf, size_t count)
846 {
847         unsigned long val;
848         int err;
849
850         if (!capable(CAP_SYS_ADMIN))
851                 return -EPERM;
852
853         err = kstrtoul(buf, 10, &val);
854         if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
855                 return -EINVAL;
856         zv_max_mean_zsize = val;
857         return count;
858 }
859
860 /*
861  * setting zv_page_count_policy_percent via sysfs sets an upper bound of
862  * persistent (e.g. swap) pages that will be retained according to:
863  *     (zv_page_count_policy_percent * totalram_pages) / 100)
864  * when that limit is reached, further puts will be rejected (until
865  * some pages have been flushed).  Note that, due to compression,
866  * this number may exceed 100; it defaults to 75 and we set an
867  * arbitary limit of 150.  A poor choice will almost certainly result
868  * in OOM's, so this value should only be changed prudently.
869  */
870 static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
871                                                  struct kobj_attribute *attr,
872                                                  char *buf)
873 {
874         return sprintf(buf, "%u\n", zv_page_count_policy_percent);
875 }
876
877 static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
878                                                   struct kobj_attribute *attr,
879                                                   const char *buf, size_t count)
880 {
881         unsigned long val;
882         int err;
883
884         if (!capable(CAP_SYS_ADMIN))
885                 return -EPERM;
886
887         err = kstrtoul(buf, 10, &val);
888         if (err || (val == 0) || (val > 150))
889                 return -EINVAL;
890         zv_page_count_policy_percent = val;
891         return count;
892 }
893
894 static struct kobj_attribute zcache_zv_max_zsize_attr = {
895                 .attr = { .name = "zv_max_zsize", .mode = 0644 },
896                 .show = zv_max_zsize_show,
897                 .store = zv_max_zsize_store,
898 };
899
900 static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
901                 .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
902                 .show = zv_max_mean_zsize_show,
903                 .store = zv_max_mean_zsize_store,
904 };
905
906 static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
907                 .attr = { .name = "zv_page_count_policy_percent",
908                           .mode = 0644 },
909                 .show = zv_page_count_policy_percent_show,
910                 .store = zv_page_count_policy_percent_store,
911 };
912 #endif
913
914 /*
915  * zcache core code starts here
916  */
917
918 /* useful stats not collected by cleancache or frontswap */
919 static unsigned long zcache_flush_total;
920 static unsigned long zcache_flush_found;
921 static unsigned long zcache_flobj_total;
922 static unsigned long zcache_flobj_found;
923 static unsigned long zcache_failed_eph_puts;
924 static unsigned long zcache_failed_pers_puts;
925
926 /*
927  * Tmem operations assume the poolid implies the invoking client.
928  * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
929  * RAMster has each client numbered by cluster node, and a KVM version
930  * of zcache would have one client per guest and each client might
931  * have a poolid==N.
932  */
933 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
934 {
935         struct tmem_pool *pool = NULL;
936         struct zcache_client *cli = NULL;
937
938         if (cli_id == LOCAL_CLIENT)
939                 cli = &zcache_host;
940         else {
941                 if (cli_id >= MAX_CLIENTS)
942                         goto out;
943                 cli = &zcache_clients[cli_id];
944                 if (cli == NULL)
945                         goto out;
946         }
947
948         atomic_inc(&cli->refcount);
949         pool = idr_find(&cli->tmem_pools, poolid);
950         if (pool != NULL)
951                 atomic_inc(&pool->refcount);
952 out:
953         return pool;
954 }
955
956 static void zcache_put_pool(struct tmem_pool *pool)
957 {
958         struct zcache_client *cli = NULL;
959
960         if (pool == NULL)
961                 BUG();
962         cli = pool->client;
963         atomic_dec(&pool->refcount);
964         atomic_dec(&cli->refcount);
965 }
966
967 int zcache_new_client(uint16_t cli_id)
968 {
969         struct zcache_client *cli = NULL;
970         int ret = -1;
971
972         if (cli_id == LOCAL_CLIENT)
973                 cli = &zcache_host;
974         else if ((unsigned int)cli_id < MAX_CLIENTS)
975                 cli = &zcache_clients[cli_id];
976         if (cli == NULL)
977                 goto out;
978         if (cli->allocated)
979                 goto out;
980         cli->allocated = 1;
981 #ifdef CONFIG_FRONTSWAP
982         cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
983         if (cli->zspool == NULL)
984                 goto out;
985         idr_init(&cli->tmem_pools);
986 #endif
987         ret = 0;
988 out:
989         return ret;
990 }
991
992 /* counters for debugging */
993 static unsigned long zcache_failed_get_free_pages;
994 static unsigned long zcache_failed_alloc;
995 static unsigned long zcache_put_to_flush;
996
997 /*
998  * for now, used named slabs so can easily track usage; later can
999  * either just use kmalloc, or perhaps add a slab-like allocator
1000  * to more carefully manage total memory utilization
1001  */
1002 static struct kmem_cache *zcache_objnode_cache;
1003 static struct kmem_cache *zcache_obj_cache;
1004 static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
1005 static unsigned long zcache_curr_obj_count_max;
1006 static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
1007 static unsigned long zcache_curr_objnode_count_max;
1008
1009 /*
1010  * to avoid memory allocation recursion (e.g. due to direct reclaim), we
1011  * preload all necessary data structures so the hostops callbacks never
1012  * actually do a malloc
1013  */
1014 struct zcache_preload {
1015         void *page;
1016         struct tmem_obj *obj;
1017         int nr;
1018         struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
1019 };
1020 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
1021
1022 static int zcache_do_preload(struct tmem_pool *pool)
1023 {
1024         struct zcache_preload *kp;
1025         struct tmem_objnode *objnode;
1026         struct tmem_obj *obj;
1027         void *page;
1028         int ret = -ENOMEM;
1029
1030         if (unlikely(zcache_objnode_cache == NULL))
1031                 goto out;
1032         if (unlikely(zcache_obj_cache == NULL))
1033                 goto out;
1034
1035         /* IRQ has already been disabled. */
1036         kp = &__get_cpu_var(zcache_preloads);
1037         while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1038                 objnode = kmem_cache_alloc(zcache_objnode_cache,
1039                                 ZCACHE_GFP_MASK);
1040                 if (unlikely(objnode == NULL)) {
1041                         zcache_failed_alloc++;
1042                         goto out;
1043                 }
1044
1045                 kp->objnodes[kp->nr++] = objnode;
1046         }
1047
1048         obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1049         if (unlikely(obj == NULL)) {
1050                 zcache_failed_alloc++;
1051                 goto out;
1052         }
1053
1054         page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1055         if (unlikely(page == NULL)) {
1056                 zcache_failed_get_free_pages++;
1057                 kmem_cache_free(zcache_obj_cache, obj);
1058                 goto out;
1059         }
1060
1061         if (kp->obj == NULL)
1062                 kp->obj = obj;
1063         else
1064                 kmem_cache_free(zcache_obj_cache, obj);
1065
1066         if (kp->page == NULL)
1067                 kp->page = page;
1068         else
1069                 free_page((unsigned long)page);
1070
1071         ret = 0;
1072 out:
1073         return ret;
1074 }
1075
1076 static void *zcache_get_free_page(void)
1077 {
1078         struct zcache_preload *kp;
1079         void *page;
1080
1081         kp = &__get_cpu_var(zcache_preloads);
1082         page = kp->page;
1083         BUG_ON(page == NULL);
1084         kp->page = NULL;
1085         return page;
1086 }
1087
1088 static void zcache_free_page(void *p)
1089 {
1090         free_page((unsigned long)p);
1091 }
1092
1093 /*
1094  * zcache implementation for tmem host ops
1095  */
1096
1097 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1098 {
1099         struct tmem_objnode *objnode = NULL;
1100         unsigned long count;
1101         struct zcache_preload *kp;
1102
1103         kp = &__get_cpu_var(zcache_preloads);
1104         if (kp->nr <= 0)
1105                 goto out;
1106         objnode = kp->objnodes[kp->nr - 1];
1107         BUG_ON(objnode == NULL);
1108         kp->objnodes[kp->nr - 1] = NULL;
1109         kp->nr--;
1110         count = atomic_inc_return(&zcache_curr_objnode_count);
1111         if (count > zcache_curr_objnode_count_max)
1112                 zcache_curr_objnode_count_max = count;
1113 out:
1114         return objnode;
1115 }
1116
1117 static void zcache_objnode_free(struct tmem_objnode *objnode,
1118                                         struct tmem_pool *pool)
1119 {
1120         atomic_dec(&zcache_curr_objnode_count);
1121         BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1122         kmem_cache_free(zcache_objnode_cache, objnode);
1123 }
1124
1125 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1126 {
1127         struct tmem_obj *obj = NULL;
1128         unsigned long count;
1129         struct zcache_preload *kp;
1130
1131         kp = &__get_cpu_var(zcache_preloads);
1132         obj = kp->obj;
1133         BUG_ON(obj == NULL);
1134         kp->obj = NULL;
1135         count = atomic_inc_return(&zcache_curr_obj_count);
1136         if (count > zcache_curr_obj_count_max)
1137                 zcache_curr_obj_count_max = count;
1138         return obj;
1139 }
1140
1141 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1142 {
1143         atomic_dec(&zcache_curr_obj_count);
1144         BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1145         kmem_cache_free(zcache_obj_cache, obj);
1146 }
1147
1148 static struct tmem_hostops zcache_hostops = {
1149         .obj_alloc = zcache_obj_alloc,
1150         .obj_free = zcache_obj_free,
1151         .objnode_alloc = zcache_objnode_alloc,
1152         .objnode_free = zcache_objnode_free,
1153 };
1154
1155 /*
1156  * zcache implementations for PAM page descriptor ops
1157  */
1158
1159 static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1160 static unsigned long zcache_curr_eph_pampd_count_max;
1161 static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1162 static unsigned long zcache_curr_pers_pampd_count_max;
1163
1164 /* forward reference */
1165 static int zcache_compress(struct page *from, void **out_va, unsigned *out_len);
1166
1167 static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1168                                 struct tmem_pool *pool, struct tmem_oid *oid,
1169                                  uint32_t index)
1170 {
1171         void *pampd = NULL, *cdata;
1172         unsigned clen;
1173         int ret;
1174         unsigned long count;
1175         struct page *page = (struct page *)(data);
1176         struct zcache_client *cli = pool->client;
1177         uint16_t client_id = get_client_id_from_client(cli);
1178         unsigned long zv_mean_zsize;
1179         unsigned long curr_pers_pampd_count;
1180         u64 total_zsize;
1181
1182         if (eph) {
1183                 ret = zcache_compress(page, &cdata, &clen);
1184                 if (ret == 0)
1185                         goto out;
1186                 if (clen == 0 || clen > zbud_max_buddy_size()) {
1187                         zcache_compress_poor++;
1188                         goto out;
1189                 }
1190                 pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1191                                                 index, page, cdata, clen);
1192                 if (pampd != NULL) {
1193                         count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1194                         if (count > zcache_curr_eph_pampd_count_max)
1195                                 zcache_curr_eph_pampd_count_max = count;
1196                 }
1197         } else {
1198                 curr_pers_pampd_count =
1199                         atomic_read(&zcache_curr_pers_pampd_count);
1200                 if (curr_pers_pampd_count >
1201                     (zv_page_count_policy_percent * totalram_pages) / 100)
1202                         goto out;
1203                 ret = zcache_compress(page, &cdata, &clen);
1204                 if (ret == 0)
1205                         goto out;
1206                 /* reject if compression is too poor */
1207                 if (clen > zv_max_zsize) {
1208                         zcache_compress_poor++;
1209                         goto out;
1210                 }
1211                 /* reject if mean compression is too poor */
1212                 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1213                         total_zsize = zs_get_total_size_bytes(cli->zspool);
1214                         zv_mean_zsize = div_u64(total_zsize,
1215                                                 curr_pers_pampd_count);
1216                         if (zv_mean_zsize > zv_max_mean_zsize) {
1217                                 zcache_mean_compress_poor++;
1218                                 goto out;
1219                         }
1220                 }
1221                 pampd = (void *)zv_create(cli->zspool, pool->pool_id,
1222                                                 oid, index, cdata, clen);
1223                 if (pampd == NULL)
1224                         goto out;
1225                 count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1226                 if (count > zcache_curr_pers_pampd_count_max)
1227                         zcache_curr_pers_pampd_count_max = count;
1228         }
1229 out:
1230         return pampd;
1231 }
1232
1233 /*
1234  * fill the pageframe corresponding to the struct page with the data
1235  * from the passed pampd
1236  */
1237 static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1238                                         void *pampd, struct tmem_pool *pool,
1239                                         struct tmem_oid *oid, uint32_t index)
1240 {
1241         int ret = 0;
1242
1243         BUG_ON(is_ephemeral(pool));
1244         zv_decompress((struct page *)(data), (unsigned long)pampd);
1245         return ret;
1246 }
1247
1248 /*
1249  * fill the pageframe corresponding to the struct page with the data
1250  * from the passed pampd
1251  */
1252 static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1253                                         void *pampd, struct tmem_pool *pool,
1254                                         struct tmem_oid *oid, uint32_t index)
1255 {
1256         int ret = 0;
1257
1258         BUG_ON(!is_ephemeral(pool));
1259         zbud_decompress((struct page *)(data), pampd);
1260         zbud_free_and_delist((struct zbud_hdr *)pampd);
1261         atomic_dec(&zcache_curr_eph_pampd_count);
1262         return ret;
1263 }
1264
1265 /*
1266  * free the pampd and remove it from any zcache lists
1267  * pampd must no longer be pointed to from any tmem data structures!
1268  */
1269 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1270                                 struct tmem_oid *oid, uint32_t index)
1271 {
1272         struct zcache_client *cli = pool->client;
1273
1274         if (is_ephemeral(pool)) {
1275                 zbud_free_and_delist((struct zbud_hdr *)pampd);
1276                 atomic_dec(&zcache_curr_eph_pampd_count);
1277                 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1278         } else {
1279                 zv_free(cli->zspool, (unsigned long)pampd);
1280                 atomic_dec(&zcache_curr_pers_pampd_count);
1281                 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1282         }
1283 }
1284
1285 static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
1286 {
1287 }
1288
1289 static void zcache_pampd_new_obj(struct tmem_obj *obj)
1290 {
1291 }
1292
1293 static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
1294 {
1295         return -1;
1296 }
1297
1298 static bool zcache_pampd_is_remote(void *pampd)
1299 {
1300         return 0;
1301 }
1302
1303 static struct tmem_pamops zcache_pamops = {
1304         .create = zcache_pampd_create,
1305         .get_data = zcache_pampd_get_data,
1306         .get_data_and_free = zcache_pampd_get_data_and_free,
1307         .free = zcache_pampd_free,
1308         .free_obj = zcache_pampd_free_obj,
1309         .new_obj = zcache_pampd_new_obj,
1310         .replace_in_obj = zcache_pampd_replace_in_obj,
1311         .is_remote = zcache_pampd_is_remote,
1312 };
1313
1314 /*
1315  * zcache compression/decompression and related per-cpu stuff
1316  */
1317
1318 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1319 #define ZCACHE_DSTMEM_ORDER 1
1320
1321 static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1322 {
1323         int ret = 0;
1324         unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1325         char *from_va;
1326
1327         BUG_ON(!irqs_disabled());
1328         if (unlikely(dmem == NULL))
1329                 goto out;  /* no buffer or no compressor so can't compress */
1330         *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1331         from_va = kmap_atomic(from);
1332         mb();
1333         ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1334                                 out_len);
1335         BUG_ON(ret);
1336         *out_va = dmem;
1337         kunmap_atomic(from_va);
1338         ret = 1;
1339 out:
1340         return ret;
1341 }
1342
1343 static int zcache_comp_cpu_up(int cpu)
1344 {
1345         struct crypto_comp *tfm;
1346
1347         tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
1348         if (IS_ERR(tfm))
1349                 return NOTIFY_BAD;
1350         *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
1351         return NOTIFY_OK;
1352 }
1353
1354 static void zcache_comp_cpu_down(int cpu)
1355 {
1356         struct crypto_comp *tfm;
1357
1358         tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
1359         crypto_free_comp(tfm);
1360         *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
1361 }
1362
1363 static int zcache_cpu_notifier(struct notifier_block *nb,
1364                                 unsigned long action, void *pcpu)
1365 {
1366         int ret, cpu = (long)pcpu;
1367         struct zcache_preload *kp;
1368
1369         switch (action) {
1370         case CPU_UP_PREPARE:
1371                 ret = zcache_comp_cpu_up(cpu);
1372                 if (ret != NOTIFY_OK) {
1373                         pr_err("zcache: can't allocate compressor transform\n");
1374                         return ret;
1375                 }
1376                 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1377                         GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
1378                 break;
1379         case CPU_DEAD:
1380         case CPU_UP_CANCELED:
1381                 zcache_comp_cpu_down(cpu);
1382                 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1383                         ZCACHE_DSTMEM_ORDER);
1384                 per_cpu(zcache_dstmem, cpu) = NULL;
1385                 kp = &per_cpu(zcache_preloads, cpu);
1386                 while (kp->nr) {
1387                         kmem_cache_free(zcache_objnode_cache,
1388                                         kp->objnodes[kp->nr - 1]);
1389                         kp->objnodes[kp->nr - 1] = NULL;
1390                         kp->nr--;
1391                 }
1392                 if (kp->obj) {
1393                         kmem_cache_free(zcache_obj_cache, kp->obj);
1394                         kp->obj = NULL;
1395                 }
1396                 if (kp->page) {
1397                         free_page((unsigned long)kp->page);
1398                         kp->page = NULL;
1399                 }
1400                 break;
1401         default:
1402                 break;
1403         }
1404         return NOTIFY_OK;
1405 }
1406
1407 static struct notifier_block zcache_cpu_notifier_block = {
1408         .notifier_call = zcache_cpu_notifier
1409 };
1410
1411 #ifdef CONFIG_SYSFS
1412 #define ZCACHE_SYSFS_RO(_name) \
1413         static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1414                                 struct kobj_attribute *attr, char *buf) \
1415         { \
1416                 return sprintf(buf, "%lu\n", zcache_##_name); \
1417         } \
1418         static struct kobj_attribute zcache_##_name##_attr = { \
1419                 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1420                 .show = zcache_##_name##_show, \
1421         }
1422
1423 #define ZCACHE_SYSFS_RO_ATOMIC(_name) \
1424         static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1425                                 struct kobj_attribute *attr, char *buf) \
1426         { \
1427             return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
1428         } \
1429         static struct kobj_attribute zcache_##_name##_attr = { \
1430                 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1431                 .show = zcache_##_name##_show, \
1432         }
1433
1434 #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
1435         static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1436                                 struct kobj_attribute *attr, char *buf) \
1437         { \
1438             return _func(buf); \
1439         } \
1440         static struct kobj_attribute zcache_##_name##_attr = { \
1441                 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1442                 .show = zcache_##_name##_show, \
1443         }
1444
1445 ZCACHE_SYSFS_RO(curr_obj_count_max);
1446 ZCACHE_SYSFS_RO(curr_objnode_count_max);
1447 ZCACHE_SYSFS_RO(flush_total);
1448 ZCACHE_SYSFS_RO(flush_found);
1449 ZCACHE_SYSFS_RO(flobj_total);
1450 ZCACHE_SYSFS_RO(flobj_found);
1451 ZCACHE_SYSFS_RO(failed_eph_puts);
1452 ZCACHE_SYSFS_RO(failed_pers_puts);
1453 ZCACHE_SYSFS_RO(zbud_curr_zbytes);
1454 ZCACHE_SYSFS_RO(zbud_cumul_zpages);
1455 ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
1456 ZCACHE_SYSFS_RO(zbud_buddied_count);
1457 ZCACHE_SYSFS_RO(zbpg_unused_list_count);
1458 ZCACHE_SYSFS_RO(evicted_raw_pages);
1459 ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
1460 ZCACHE_SYSFS_RO(evicted_buddied_pages);
1461 ZCACHE_SYSFS_RO(failed_get_free_pages);
1462 ZCACHE_SYSFS_RO(failed_alloc);
1463 ZCACHE_SYSFS_RO(put_to_flush);
1464 ZCACHE_SYSFS_RO(compress_poor);
1465 ZCACHE_SYSFS_RO(mean_compress_poor);
1466 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
1467 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
1468 ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
1469 ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
1470 ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
1471                         zbud_show_unbuddied_list_counts);
1472 ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
1473                         zbud_show_cumul_chunk_counts);
1474 ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
1475                         zv_curr_dist_counts_show);
1476 ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
1477                         zv_cumul_dist_counts_show);
1478
1479 static struct attribute *zcache_attrs[] = {
1480         &zcache_curr_obj_count_attr.attr,
1481         &zcache_curr_obj_count_max_attr.attr,
1482         &zcache_curr_objnode_count_attr.attr,
1483         &zcache_curr_objnode_count_max_attr.attr,
1484         &zcache_flush_total_attr.attr,
1485         &zcache_flobj_total_attr.attr,
1486         &zcache_flush_found_attr.attr,
1487         &zcache_flobj_found_attr.attr,
1488         &zcache_failed_eph_puts_attr.attr,
1489         &zcache_failed_pers_puts_attr.attr,
1490         &zcache_compress_poor_attr.attr,
1491         &zcache_mean_compress_poor_attr.attr,
1492         &zcache_zbud_curr_raw_pages_attr.attr,
1493         &zcache_zbud_curr_zpages_attr.attr,
1494         &zcache_zbud_curr_zbytes_attr.attr,
1495         &zcache_zbud_cumul_zpages_attr.attr,
1496         &zcache_zbud_cumul_zbytes_attr.attr,
1497         &zcache_zbud_buddied_count_attr.attr,
1498         &zcache_zbpg_unused_list_count_attr.attr,
1499         &zcache_evicted_raw_pages_attr.attr,
1500         &zcache_evicted_unbuddied_pages_attr.attr,
1501         &zcache_evicted_buddied_pages_attr.attr,
1502         &zcache_failed_get_free_pages_attr.attr,
1503         &zcache_failed_alloc_attr.attr,
1504         &zcache_put_to_flush_attr.attr,
1505         &zcache_zbud_unbuddied_list_counts_attr.attr,
1506         &zcache_zbud_cumul_chunk_counts_attr.attr,
1507         &zcache_zv_curr_dist_counts_attr.attr,
1508         &zcache_zv_cumul_dist_counts_attr.attr,
1509         &zcache_zv_max_zsize_attr.attr,
1510         &zcache_zv_max_mean_zsize_attr.attr,
1511         &zcache_zv_page_count_policy_percent_attr.attr,
1512         NULL,
1513 };
1514
1515 static struct attribute_group zcache_attr_group = {
1516         .attrs = zcache_attrs,
1517         .name = "zcache",
1518 };
1519
1520 #endif /* CONFIG_SYSFS */
1521 /*
1522  * When zcache is disabled ("frozen"), pools can be created and destroyed,
1523  * but all puts (and thus all other operations that require memory allocation)
1524  * must fail.  If zcache is unfrozen, accepts puts, then frozen again,
1525  * data consistency requires all puts while frozen to be converted into
1526  * flushes.
1527  */
1528 static bool zcache_freeze;
1529
1530 /*
1531  * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1532  */
1533 static int shrink_zcache_memory(struct shrinker *shrink,
1534                                 struct shrink_control *sc)
1535 {
1536         int ret = -1;
1537         int nr = sc->nr_to_scan;
1538         gfp_t gfp_mask = sc->gfp_mask;
1539
1540         if (nr >= 0) {
1541                 if (!(gfp_mask & __GFP_FS))
1542                         /* does this case really need to be skipped? */
1543                         goto out;
1544                 zbud_evict_pages(nr);
1545         }
1546         ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
1547 out:
1548         return ret;
1549 }
1550
1551 static struct shrinker zcache_shrinker = {
1552         .shrink = shrink_zcache_memory,
1553         .seeks = DEFAULT_SEEKS,
1554 };
1555
1556 /*
1557  * zcache shims between cleancache/frontswap ops and tmem
1558  */
1559
1560 static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1561                                 uint32_t index, struct page *page)
1562 {
1563         struct tmem_pool *pool;
1564         int ret = -1;
1565
1566         BUG_ON(!irqs_disabled());
1567         pool = zcache_get_pool_by_id(cli_id, pool_id);
1568         if (unlikely(pool == NULL))
1569                 goto out;
1570         if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1571                 /* preload does preempt_disable on success */
1572                 ret = tmem_put(pool, oidp, index, (char *)(page),
1573                                 PAGE_SIZE, 0, is_ephemeral(pool));
1574                 if (ret < 0) {
1575                         if (is_ephemeral(pool))
1576                                 zcache_failed_eph_puts++;
1577                         else
1578                                 zcache_failed_pers_puts++;
1579                 }
1580                 zcache_put_pool(pool);
1581         } else {
1582                 zcache_put_to_flush++;
1583                 if (atomic_read(&pool->obj_count) > 0)
1584                         /* the put fails whether the flush succeeds or not */
1585                         (void)tmem_flush_page(pool, oidp, index);
1586                 zcache_put_pool(pool);
1587         }
1588 out:
1589         return ret;
1590 }
1591
1592 static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1593                                 uint32_t index, struct page *page)
1594 {
1595         struct tmem_pool *pool;
1596         int ret = -1;
1597         unsigned long flags;
1598         size_t size = PAGE_SIZE;
1599
1600         local_irq_save(flags);
1601         pool = zcache_get_pool_by_id(cli_id, pool_id);
1602         if (likely(pool != NULL)) {
1603                 if (atomic_read(&pool->obj_count) > 0)
1604                         ret = tmem_get(pool, oidp, index, (char *)(page),
1605                                         &size, 0, is_ephemeral(pool));
1606                 zcache_put_pool(pool);
1607         }
1608         local_irq_restore(flags);
1609         return ret;
1610 }
1611
1612 static int zcache_flush_page(int cli_id, int pool_id,
1613                                 struct tmem_oid *oidp, uint32_t index)
1614 {
1615         struct tmem_pool *pool;
1616         int ret = -1;
1617         unsigned long flags;
1618
1619         local_irq_save(flags);
1620         zcache_flush_total++;
1621         pool = zcache_get_pool_by_id(cli_id, pool_id);
1622         if (likely(pool != NULL)) {
1623                 if (atomic_read(&pool->obj_count) > 0)
1624                         ret = tmem_flush_page(pool, oidp, index);
1625                 zcache_put_pool(pool);
1626         }
1627         if (ret >= 0)
1628                 zcache_flush_found++;
1629         local_irq_restore(flags);
1630         return ret;
1631 }
1632
1633 static int zcache_flush_object(int cli_id, int pool_id,
1634                                 struct tmem_oid *oidp)
1635 {
1636         struct tmem_pool *pool;
1637         int ret = -1;
1638         unsigned long flags;
1639
1640         local_irq_save(flags);
1641         zcache_flobj_total++;
1642         pool = zcache_get_pool_by_id(cli_id, pool_id);
1643         if (likely(pool != NULL)) {
1644                 if (atomic_read(&pool->obj_count) > 0)
1645                         ret = tmem_flush_object(pool, oidp);
1646                 zcache_put_pool(pool);
1647         }
1648         if (ret >= 0)
1649                 zcache_flobj_found++;
1650         local_irq_restore(flags);
1651         return ret;
1652 }
1653
1654 static int zcache_destroy_pool(int cli_id, int pool_id)
1655 {
1656         struct tmem_pool *pool = NULL;
1657         struct zcache_client *cli = NULL;
1658         int ret = -1;
1659
1660         if (pool_id < 0)
1661                 goto out;
1662         if (cli_id == LOCAL_CLIENT)
1663                 cli = &zcache_host;
1664         else if ((unsigned int)cli_id < MAX_CLIENTS)
1665                 cli = &zcache_clients[cli_id];
1666         if (cli == NULL)
1667                 goto out;
1668         atomic_inc(&cli->refcount);
1669         pool = idr_find(&cli->tmem_pools, pool_id);
1670         if (pool == NULL)
1671                 goto out;
1672         idr_remove(&cli->tmem_pools, pool_id);
1673         /* wait for pool activity on other cpus to quiesce */
1674         while (atomic_read(&pool->refcount) != 0)
1675                 ;
1676         atomic_dec(&cli->refcount);
1677         local_bh_disable();
1678         ret = tmem_destroy_pool(pool);
1679         local_bh_enable();
1680         kfree(pool);
1681         pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
1682                         pool_id, cli_id);
1683 out:
1684         return ret;
1685 }
1686
1687 static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1688 {
1689         int poolid = -1;
1690         struct tmem_pool *pool;
1691         struct zcache_client *cli = NULL;
1692         int r;
1693
1694         if (cli_id == LOCAL_CLIENT)
1695                 cli = &zcache_host;
1696         else if ((unsigned int)cli_id < MAX_CLIENTS)
1697                 cli = &zcache_clients[cli_id];
1698         if (cli == NULL)
1699                 goto out;
1700         atomic_inc(&cli->refcount);
1701         pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
1702         if (pool == NULL) {
1703                 pr_info("zcache: pool creation failed: out of memory\n");
1704                 goto out;
1705         }
1706
1707         do {
1708                 r = idr_pre_get(&cli->tmem_pools, GFP_ATOMIC);
1709                 if (r != 1) {
1710                         kfree(pool);
1711                         pr_info("zcache: pool creation failed: out of memory\n");
1712                         goto out;
1713                 }
1714                 r = idr_get_new(&cli->tmem_pools, pool, &poolid);
1715         } while (r == -EAGAIN);
1716         if (r) {
1717                 pr_info("zcache: pool creation failed: error %d\n", r);
1718                 kfree(pool);
1719                 goto out;
1720         }
1721
1722         atomic_set(&pool->refcount, 0);
1723         pool->client = cli;
1724         pool->pool_id = poolid;
1725         tmem_new_pool(pool, flags);
1726         pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
1727                 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1728                 poolid, cli_id);
1729 out:
1730         if (cli != NULL)
1731                 atomic_dec(&cli->refcount);
1732         return poolid;
1733 }
1734
1735 /**********
1736  * Two kernel functionalities currently can be layered on top of tmem.
1737  * These are "cleancache" which is used as a second-chance cache for clean
1738  * page cache pages; and "frontswap" which is used for swap pages
1739  * to avoid writes to disk.  A generic "shim" is provided here for each
1740  * to translate in-kernel semantics to zcache semantics.
1741  */
1742
1743 #ifdef CONFIG_CLEANCACHE
1744 static void zcache_cleancache_put_page(int pool_id,
1745                                         struct cleancache_filekey key,
1746                                         pgoff_t index, struct page *page)
1747 {
1748         u32 ind = (u32) index;
1749         struct tmem_oid oid = *(struct tmem_oid *)&key;
1750
1751         if (likely(ind == index))
1752                 (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1753 }
1754
1755 static int zcache_cleancache_get_page(int pool_id,
1756                                         struct cleancache_filekey key,
1757                                         pgoff_t index, struct page *page)
1758 {
1759         u32 ind = (u32) index;
1760         struct tmem_oid oid = *(struct tmem_oid *)&key;
1761         int ret = -1;
1762
1763         if (likely(ind == index))
1764                 ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1765         return ret;
1766 }
1767
1768 static void zcache_cleancache_flush_page(int pool_id,
1769                                         struct cleancache_filekey key,
1770                                         pgoff_t index)
1771 {
1772         u32 ind = (u32) index;
1773         struct tmem_oid oid = *(struct tmem_oid *)&key;
1774
1775         if (likely(ind == index))
1776                 (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1777 }
1778
1779 static void zcache_cleancache_flush_inode(int pool_id,
1780                                         struct cleancache_filekey key)
1781 {
1782         struct tmem_oid oid = *(struct tmem_oid *)&key;
1783
1784         (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1785 }
1786
1787 static void zcache_cleancache_flush_fs(int pool_id)
1788 {
1789         if (pool_id >= 0)
1790                 (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
1791 }
1792
1793 static int zcache_cleancache_init_fs(size_t pagesize)
1794 {
1795         BUG_ON(sizeof(struct cleancache_filekey) !=
1796                                 sizeof(struct tmem_oid));
1797         BUG_ON(pagesize != PAGE_SIZE);
1798         return zcache_new_pool(LOCAL_CLIENT, 0);
1799 }
1800
1801 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1802 {
1803         /* shared pools are unsupported and map to private */
1804         BUG_ON(sizeof(struct cleancache_filekey) !=
1805                                 sizeof(struct tmem_oid));
1806         BUG_ON(pagesize != PAGE_SIZE);
1807         return zcache_new_pool(LOCAL_CLIENT, 0);
1808 }
1809
1810 static struct cleancache_ops zcache_cleancache_ops = {
1811         .put_page = zcache_cleancache_put_page,
1812         .get_page = zcache_cleancache_get_page,
1813         .invalidate_page = zcache_cleancache_flush_page,
1814         .invalidate_inode = zcache_cleancache_flush_inode,
1815         .invalidate_fs = zcache_cleancache_flush_fs,
1816         .init_shared_fs = zcache_cleancache_init_shared_fs,
1817         .init_fs = zcache_cleancache_init_fs
1818 };
1819
1820 struct cleancache_ops zcache_cleancache_register_ops(void)
1821 {
1822         struct cleancache_ops old_ops =
1823                 cleancache_register_ops(&zcache_cleancache_ops);
1824
1825         return old_ops;
1826 }
1827 #endif
1828
1829 #ifdef CONFIG_FRONTSWAP
1830 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1831 static int zcache_frontswap_poolid = -1;
1832
1833 /*
1834  * Swizzling increases objects per swaptype, increasing tmem concurrency
1835  * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
1836  * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1837  * frontswap_load(), but has side-effects. Hence using 8.
1838  */
1839 #define SWIZ_BITS               8
1840 #define SWIZ_MASK               ((1 << SWIZ_BITS) - 1)
1841 #define _oswiz(_type, _ind)     ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1842 #define iswiz(_ind)             (_ind >> SWIZ_BITS)
1843
1844 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1845 {
1846         struct tmem_oid oid = { .oid = { 0 } };
1847         oid.oid[0] = _oswiz(type, ind);
1848         return oid;
1849 }
1850
1851 static int zcache_frontswap_store(unsigned type, pgoff_t offset,
1852                                    struct page *page)
1853 {
1854         u64 ind64 = (u64)offset;
1855         u32 ind = (u32)offset;
1856         struct tmem_oid oid = oswiz(type, ind);
1857         int ret = -1;
1858         unsigned long flags;
1859
1860         BUG_ON(!PageLocked(page));
1861         if (likely(ind64 == ind)) {
1862                 local_irq_save(flags);
1863                 ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1864                                         &oid, iswiz(ind), page);
1865                 local_irq_restore(flags);
1866         }
1867         return ret;
1868 }
1869
1870 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1871  * was not present (should never happen!) */
1872 static int zcache_frontswap_load(unsigned type, pgoff_t offset,
1873                                    struct page *page)
1874 {
1875         u64 ind64 = (u64)offset;
1876         u32 ind = (u32)offset;
1877         struct tmem_oid oid = oswiz(type, ind);
1878         int ret = -1;
1879
1880         BUG_ON(!PageLocked(page));
1881         if (likely(ind64 == ind))
1882                 ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1883                                         &oid, iswiz(ind), page);
1884         return ret;
1885 }
1886
1887 /* flush a single page from frontswap */
1888 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1889 {
1890         u64 ind64 = (u64)offset;
1891         u32 ind = (u32)offset;
1892         struct tmem_oid oid = oswiz(type, ind);
1893
1894         if (likely(ind64 == ind))
1895                 (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1896                                         &oid, iswiz(ind));
1897 }
1898
1899 /* flush all pages from the passed swaptype */
1900 static void zcache_frontswap_flush_area(unsigned type)
1901 {
1902         struct tmem_oid oid;
1903         int ind;
1904
1905         for (ind = SWIZ_MASK; ind >= 0; ind--) {
1906                 oid = oswiz(type, ind);
1907                 (void)zcache_flush_object(LOCAL_CLIENT,
1908                                                 zcache_frontswap_poolid, &oid);
1909         }
1910 }
1911
1912 static void zcache_frontswap_init(unsigned ignored)
1913 {
1914         /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1915         if (zcache_frontswap_poolid < 0)
1916                 zcache_frontswap_poolid =
1917                         zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
1918 }
1919
1920 static struct frontswap_ops zcache_frontswap_ops = {
1921         .store = zcache_frontswap_store,
1922         .load = zcache_frontswap_load,
1923         .invalidate_page = zcache_frontswap_flush_page,
1924         .invalidate_area = zcache_frontswap_flush_area,
1925         .init = zcache_frontswap_init
1926 };
1927
1928 struct frontswap_ops zcache_frontswap_register_ops(void)
1929 {
1930         struct frontswap_ops old_ops =
1931                 frontswap_register_ops(&zcache_frontswap_ops);
1932
1933         return old_ops;
1934 }
1935 #endif
1936
1937 /*
1938  * zcache initialization
1939  * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
1940  * NOTHING HAPPENS!
1941  */
1942
1943 static int zcache_enabled;
1944
1945 static int __init enable_zcache(char *s)
1946 {
1947         zcache_enabled = 1;
1948         return 1;
1949 }
1950 __setup("zcache", enable_zcache);
1951
1952 /* allow independent dynamic disabling of cleancache and frontswap */
1953
1954 static int use_cleancache = 1;
1955
1956 static int __init no_cleancache(char *s)
1957 {
1958         use_cleancache = 0;
1959         return 1;
1960 }
1961
1962 __setup("nocleancache", no_cleancache);
1963
1964 static int use_frontswap = 1;
1965
1966 static int __init no_frontswap(char *s)
1967 {
1968         use_frontswap = 0;
1969         return 1;
1970 }
1971
1972 __setup("nofrontswap", no_frontswap);
1973
1974 static int __init enable_zcache_compressor(char *s)
1975 {
1976         strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
1977         zcache_enabled = 1;
1978         return 1;
1979 }
1980 __setup("zcache=", enable_zcache_compressor);
1981
1982
1983 static int __init zcache_comp_init(void)
1984 {
1985         int ret = 0;
1986
1987         /* check crypto algorithm */
1988         if (*zcache_comp_name != '\0') {
1989                 ret = crypto_has_comp(zcache_comp_name, 0, 0);
1990                 if (!ret)
1991                         pr_info("zcache: %s not supported\n",
1992                                         zcache_comp_name);
1993         }
1994         if (!ret)
1995                 strcpy(zcache_comp_name, "lzo");
1996         ret = crypto_has_comp(zcache_comp_name, 0, 0);
1997         if (!ret) {
1998                 ret = 1;
1999                 goto out;
2000         }
2001         pr_info("zcache: using %s compressor\n", zcache_comp_name);
2002
2003         /* alloc percpu transforms */
2004         ret = 0;
2005         zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
2006         if (!zcache_comp_pcpu_tfms)
2007                 ret = 1;
2008 out:
2009         return ret;
2010 }
2011
2012 static int __init zcache_init(void)
2013 {
2014         int ret = 0;
2015
2016 #ifdef CONFIG_SYSFS
2017         ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
2018         if (ret) {
2019                 pr_err("zcache: can't create sysfs\n");
2020                 goto out;
2021         }
2022 #endif /* CONFIG_SYSFS */
2023
2024         if (zcache_enabled) {
2025                 unsigned int cpu;
2026
2027                 tmem_register_hostops(&zcache_hostops);
2028                 tmem_register_pamops(&zcache_pamops);
2029                 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
2030                 if (ret) {
2031                         pr_err("zcache: can't register cpu notifier\n");
2032                         goto out;
2033                 }
2034                 ret = zcache_comp_init();
2035                 if (ret) {
2036                         pr_err("zcache: compressor initialization failed\n");
2037                         goto out;
2038                 }
2039                 for_each_online_cpu(cpu) {
2040                         void *pcpu = (void *)(long)cpu;
2041                         zcache_cpu_notifier(&zcache_cpu_notifier_block,
2042                                 CPU_UP_PREPARE, pcpu);
2043                 }
2044         }
2045         zcache_objnode_cache = kmem_cache_create("zcache_objnode",
2046                                 sizeof(struct tmem_objnode), 0, 0, NULL);
2047         zcache_obj_cache = kmem_cache_create("zcache_obj",
2048                                 sizeof(struct tmem_obj), 0, 0, NULL);
2049         ret = zcache_new_client(LOCAL_CLIENT);
2050         if (ret) {
2051                 pr_err("zcache: can't create client\n");
2052                 goto out;
2053         }
2054
2055 #ifdef CONFIG_CLEANCACHE
2056         if (zcache_enabled && use_cleancache) {
2057                 struct cleancache_ops old_ops;
2058
2059                 zbud_init();
2060                 register_shrinker(&zcache_shrinker);
2061                 old_ops = zcache_cleancache_register_ops();
2062                 pr_info("zcache: cleancache enabled using kernel "
2063                         "transcendent memory and compression buddies\n");
2064                 if (old_ops.init_fs != NULL)
2065                         pr_warning("zcache: cleancache_ops overridden");
2066         }
2067 #endif
2068 #ifdef CONFIG_FRONTSWAP
2069         if (zcache_enabled && use_frontswap) {
2070                 struct frontswap_ops old_ops;
2071
2072                 old_ops = zcache_frontswap_register_ops();
2073                 pr_info("zcache: frontswap enabled using kernel "
2074                         "transcendent memory and zsmalloc\n");
2075                 if (old_ops.init != NULL)
2076                         pr_warning("zcache: frontswap_ops overridden");
2077         }
2078 #endif
2079 out:
2080         return ret;
2081 }
2082
2083 module_init(zcache_init)