zram: remove `num_migrated' device attr
[cascardo/linux.git] / drivers / block / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
42
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
45
46 #define ZRAM_ATTR_RO(name)                                              \
47 static ssize_t name##_show(struct device *d,            \
48                                 struct device_attribute *attr, char *b) \
49 {                                                                       \
50         struct zram *zram = dev_to_zram(d);                             \
51         return scnprintf(b, PAGE_SIZE, "%llu\n",                        \
52                 (u64)atomic64_read(&zram->stats.name));                 \
53 }                                                                       \
54 static DEVICE_ATTR_RO(name);
55
56 static inline bool init_done(struct zram *zram)
57 {
58         return zram->disksize;
59 }
60
61 static inline struct zram *dev_to_zram(struct device *dev)
62 {
63         return (struct zram *)dev_to_disk(dev)->private_data;
64 }
65
66 static ssize_t compact_store(struct device *dev,
67                 struct device_attribute *attr, const char *buf, size_t len)
68 {
69         unsigned long nr_migrated;
70         struct zram *zram = dev_to_zram(dev);
71         struct zram_meta *meta;
72
73         down_read(&zram->init_lock);
74         if (!init_done(zram)) {
75                 up_read(&zram->init_lock);
76                 return -EINVAL;
77         }
78
79         meta = zram->meta;
80         nr_migrated = zs_compact(meta->mem_pool);
81         atomic64_add(nr_migrated, &zram->stats.num_migrated);
82         up_read(&zram->init_lock);
83
84         return len;
85 }
86
87 static ssize_t disksize_show(struct device *dev,
88                 struct device_attribute *attr, char *buf)
89 {
90         struct zram *zram = dev_to_zram(dev);
91
92         return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
93 }
94
95 static ssize_t initstate_show(struct device *dev,
96                 struct device_attribute *attr, char *buf)
97 {
98         u32 val;
99         struct zram *zram = dev_to_zram(dev);
100
101         down_read(&zram->init_lock);
102         val = init_done(zram);
103         up_read(&zram->init_lock);
104
105         return scnprintf(buf, PAGE_SIZE, "%u\n", val);
106 }
107
108 static ssize_t orig_data_size_show(struct device *dev,
109                 struct device_attribute *attr, char *buf)
110 {
111         struct zram *zram = dev_to_zram(dev);
112
113         return scnprintf(buf, PAGE_SIZE, "%llu\n",
114                 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
115 }
116
117 static ssize_t mem_used_total_show(struct device *dev,
118                 struct device_attribute *attr, char *buf)
119 {
120         u64 val = 0;
121         struct zram *zram = dev_to_zram(dev);
122
123         down_read(&zram->init_lock);
124         if (init_done(zram)) {
125                 struct zram_meta *meta = zram->meta;
126                 val = zs_get_total_pages(meta->mem_pool);
127         }
128         up_read(&zram->init_lock);
129
130         return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
131 }
132
133 static ssize_t max_comp_streams_show(struct device *dev,
134                 struct device_attribute *attr, char *buf)
135 {
136         int val;
137         struct zram *zram = dev_to_zram(dev);
138
139         down_read(&zram->init_lock);
140         val = zram->max_comp_streams;
141         up_read(&zram->init_lock);
142
143         return scnprintf(buf, PAGE_SIZE, "%d\n", val);
144 }
145
146 static ssize_t mem_limit_show(struct device *dev,
147                 struct device_attribute *attr, char *buf)
148 {
149         u64 val;
150         struct zram *zram = dev_to_zram(dev);
151
152         down_read(&zram->init_lock);
153         val = zram->limit_pages;
154         up_read(&zram->init_lock);
155
156         return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
157 }
158
159 static ssize_t mem_limit_store(struct device *dev,
160                 struct device_attribute *attr, const char *buf, size_t len)
161 {
162         u64 limit;
163         char *tmp;
164         struct zram *zram = dev_to_zram(dev);
165
166         limit = memparse(buf, &tmp);
167         if (buf == tmp) /* no chars parsed, invalid input */
168                 return -EINVAL;
169
170         down_write(&zram->init_lock);
171         zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
172         up_write(&zram->init_lock);
173
174         return len;
175 }
176
177 static ssize_t mem_used_max_show(struct device *dev,
178                 struct device_attribute *attr, char *buf)
179 {
180         u64 val = 0;
181         struct zram *zram = dev_to_zram(dev);
182
183         down_read(&zram->init_lock);
184         if (init_done(zram))
185                 val = atomic_long_read(&zram->stats.max_used_pages);
186         up_read(&zram->init_lock);
187
188         return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
189 }
190
191 static ssize_t mem_used_max_store(struct device *dev,
192                 struct device_attribute *attr, const char *buf, size_t len)
193 {
194         int err;
195         unsigned long val;
196         struct zram *zram = dev_to_zram(dev);
197
198         err = kstrtoul(buf, 10, &val);
199         if (err || val != 0)
200                 return -EINVAL;
201
202         down_read(&zram->init_lock);
203         if (init_done(zram)) {
204                 struct zram_meta *meta = zram->meta;
205                 atomic_long_set(&zram->stats.max_used_pages,
206                                 zs_get_total_pages(meta->mem_pool));
207         }
208         up_read(&zram->init_lock);
209
210         return len;
211 }
212
213 static ssize_t max_comp_streams_store(struct device *dev,
214                 struct device_attribute *attr, const char *buf, size_t len)
215 {
216         int num;
217         struct zram *zram = dev_to_zram(dev);
218         int ret;
219
220         ret = kstrtoint(buf, 0, &num);
221         if (ret < 0)
222                 return ret;
223         if (num < 1)
224                 return -EINVAL;
225
226         down_write(&zram->init_lock);
227         if (init_done(zram)) {
228                 if (!zcomp_set_max_streams(zram->comp, num)) {
229                         pr_info("Cannot change max compression streams\n");
230                         ret = -EINVAL;
231                         goto out;
232                 }
233         }
234
235         zram->max_comp_streams = num;
236         ret = len;
237 out:
238         up_write(&zram->init_lock);
239         return ret;
240 }
241
242 static ssize_t comp_algorithm_show(struct device *dev,
243                 struct device_attribute *attr, char *buf)
244 {
245         size_t sz;
246         struct zram *zram = dev_to_zram(dev);
247
248         down_read(&zram->init_lock);
249         sz = zcomp_available_show(zram->compressor, buf);
250         up_read(&zram->init_lock);
251
252         return sz;
253 }
254
255 static ssize_t comp_algorithm_store(struct device *dev,
256                 struct device_attribute *attr, const char *buf, size_t len)
257 {
258         struct zram *zram = dev_to_zram(dev);
259         down_write(&zram->init_lock);
260         if (init_done(zram)) {
261                 up_write(&zram->init_lock);
262                 pr_info("Can't change algorithm for initialized device\n");
263                 return -EBUSY;
264         }
265         strlcpy(zram->compressor, buf, sizeof(zram->compressor));
266         up_write(&zram->init_lock);
267         return len;
268 }
269
270 /* flag operations needs meta->tb_lock */
271 static int zram_test_flag(struct zram_meta *meta, u32 index,
272                         enum zram_pageflags flag)
273 {
274         return meta->table[index].value & BIT(flag);
275 }
276
277 static void zram_set_flag(struct zram_meta *meta, u32 index,
278                         enum zram_pageflags flag)
279 {
280         meta->table[index].value |= BIT(flag);
281 }
282
283 static void zram_clear_flag(struct zram_meta *meta, u32 index,
284                         enum zram_pageflags flag)
285 {
286         meta->table[index].value &= ~BIT(flag);
287 }
288
289 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
290 {
291         return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
292 }
293
294 static void zram_set_obj_size(struct zram_meta *meta,
295                                         u32 index, size_t size)
296 {
297         unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
298
299         meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
300 }
301
302 static inline int is_partial_io(struct bio_vec *bvec)
303 {
304         return bvec->bv_len != PAGE_SIZE;
305 }
306
307 /*
308  * Check if request is within bounds and aligned on zram logical blocks.
309  */
310 static inline int valid_io_request(struct zram *zram,
311                 sector_t start, unsigned int size)
312 {
313         u64 end, bound;
314
315         /* unaligned request */
316         if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
317                 return 0;
318         if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
319                 return 0;
320
321         end = start + (size >> SECTOR_SHIFT);
322         bound = zram->disksize >> SECTOR_SHIFT;
323         /* out of range range */
324         if (unlikely(start >= bound || end > bound || start > end))
325                 return 0;
326
327         /* I/O request is valid */
328         return 1;
329 }
330
331 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
332 {
333         size_t num_pages = disksize >> PAGE_SHIFT;
334         size_t index;
335
336         /* Free all pages that are still in this zram device */
337         for (index = 0; index < num_pages; index++) {
338                 unsigned long handle = meta->table[index].handle;
339
340                 if (!handle)
341                         continue;
342
343                 zs_free(meta->mem_pool, handle);
344         }
345
346         zs_destroy_pool(meta->mem_pool);
347         vfree(meta->table);
348         kfree(meta);
349 }
350
351 static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
352 {
353         size_t num_pages;
354         char pool_name[8];
355         struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
356
357         if (!meta)
358                 return NULL;
359
360         num_pages = disksize >> PAGE_SHIFT;
361         meta->table = vzalloc(num_pages * sizeof(*meta->table));
362         if (!meta->table) {
363                 pr_err("Error allocating zram address table\n");
364                 goto out_error;
365         }
366
367         snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
368         meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
369         if (!meta->mem_pool) {
370                 pr_err("Error creating memory pool\n");
371                 goto out_error;
372         }
373
374         return meta;
375
376 out_error:
377         vfree(meta->table);
378         kfree(meta);
379         return NULL;
380 }
381
382 static inline bool zram_meta_get(struct zram *zram)
383 {
384         if (atomic_inc_not_zero(&zram->refcount))
385                 return true;
386         return false;
387 }
388
389 static inline void zram_meta_put(struct zram *zram)
390 {
391         atomic_dec(&zram->refcount);
392 }
393
394 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
395 {
396         if (*offset + bvec->bv_len >= PAGE_SIZE)
397                 (*index)++;
398         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
399 }
400
401 static int page_zero_filled(void *ptr)
402 {
403         unsigned int pos;
404         unsigned long *page;
405
406         page = (unsigned long *)ptr;
407
408         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
409                 if (page[pos])
410                         return 0;
411         }
412
413         return 1;
414 }
415
416 static void handle_zero_page(struct bio_vec *bvec)
417 {
418         struct page *page = bvec->bv_page;
419         void *user_mem;
420
421         user_mem = kmap_atomic(page);
422         if (is_partial_io(bvec))
423                 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
424         else
425                 clear_page(user_mem);
426         kunmap_atomic(user_mem);
427
428         flush_dcache_page(page);
429 }
430
431
432 /*
433  * To protect concurrent access to the same index entry,
434  * caller should hold this table index entry's bit_spinlock to
435  * indicate this index entry is accessing.
436  */
437 static void zram_free_page(struct zram *zram, size_t index)
438 {
439         struct zram_meta *meta = zram->meta;
440         unsigned long handle = meta->table[index].handle;
441
442         if (unlikely(!handle)) {
443                 /*
444                  * No memory is allocated for zero filled pages.
445                  * Simply clear zero page flag.
446                  */
447                 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
448                         zram_clear_flag(meta, index, ZRAM_ZERO);
449                         atomic64_dec(&zram->stats.zero_pages);
450                 }
451                 return;
452         }
453
454         zs_free(meta->mem_pool, handle);
455
456         atomic64_sub(zram_get_obj_size(meta, index),
457                         &zram->stats.compr_data_size);
458         atomic64_dec(&zram->stats.pages_stored);
459
460         meta->table[index].handle = 0;
461         zram_set_obj_size(meta, index, 0);
462 }
463
464 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
465 {
466         int ret = 0;
467         unsigned char *cmem;
468         struct zram_meta *meta = zram->meta;
469         unsigned long handle;
470         size_t size;
471
472         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
473         handle = meta->table[index].handle;
474         size = zram_get_obj_size(meta, index);
475
476         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
477                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
478                 clear_page(mem);
479                 return 0;
480         }
481
482         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
483         if (size == PAGE_SIZE)
484                 copy_page(mem, cmem);
485         else
486                 ret = zcomp_decompress(zram->comp, cmem, size, mem);
487         zs_unmap_object(meta->mem_pool, handle);
488         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
489
490         /* Should NEVER happen. Return bio error if it does. */
491         if (unlikely(ret)) {
492                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
493                 return ret;
494         }
495
496         return 0;
497 }
498
499 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
500                           u32 index, int offset)
501 {
502         int ret;
503         struct page *page;
504         unsigned char *user_mem, *uncmem = NULL;
505         struct zram_meta *meta = zram->meta;
506         page = bvec->bv_page;
507
508         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
509         if (unlikely(!meta->table[index].handle) ||
510                         zram_test_flag(meta, index, ZRAM_ZERO)) {
511                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
512                 handle_zero_page(bvec);
513                 return 0;
514         }
515         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
516
517         if (is_partial_io(bvec))
518                 /* Use  a temporary buffer to decompress the page */
519                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
520
521         user_mem = kmap_atomic(page);
522         if (!is_partial_io(bvec))
523                 uncmem = user_mem;
524
525         if (!uncmem) {
526                 pr_info("Unable to allocate temp memory\n");
527                 ret = -ENOMEM;
528                 goto out_cleanup;
529         }
530
531         ret = zram_decompress_page(zram, uncmem, index);
532         /* Should NEVER happen. Return bio error if it does. */
533         if (unlikely(ret))
534                 goto out_cleanup;
535
536         if (is_partial_io(bvec))
537                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
538                                 bvec->bv_len);
539
540         flush_dcache_page(page);
541         ret = 0;
542 out_cleanup:
543         kunmap_atomic(user_mem);
544         if (is_partial_io(bvec))
545                 kfree(uncmem);
546         return ret;
547 }
548
549 static inline void update_used_max(struct zram *zram,
550                                         const unsigned long pages)
551 {
552         unsigned long old_max, cur_max;
553
554         old_max = atomic_long_read(&zram->stats.max_used_pages);
555
556         do {
557                 cur_max = old_max;
558                 if (pages > cur_max)
559                         old_max = atomic_long_cmpxchg(
560                                 &zram->stats.max_used_pages, cur_max, pages);
561         } while (old_max != cur_max);
562 }
563
564 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
565                            int offset)
566 {
567         int ret = 0;
568         size_t clen;
569         unsigned long handle;
570         struct page *page;
571         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
572         struct zram_meta *meta = zram->meta;
573         struct zcomp_strm *zstrm;
574         bool locked = false;
575         unsigned long alloced_pages;
576
577         page = bvec->bv_page;
578         if (is_partial_io(bvec)) {
579                 /*
580                  * This is a partial IO. We need to read the full page
581                  * before to write the changes.
582                  */
583                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
584                 if (!uncmem) {
585                         ret = -ENOMEM;
586                         goto out;
587                 }
588                 ret = zram_decompress_page(zram, uncmem, index);
589                 if (ret)
590                         goto out;
591         }
592
593         zstrm = zcomp_strm_find(zram->comp);
594         locked = true;
595         user_mem = kmap_atomic(page);
596
597         if (is_partial_io(bvec)) {
598                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
599                        bvec->bv_len);
600                 kunmap_atomic(user_mem);
601                 user_mem = NULL;
602         } else {
603                 uncmem = user_mem;
604         }
605
606         if (page_zero_filled(uncmem)) {
607                 if (user_mem)
608                         kunmap_atomic(user_mem);
609                 /* Free memory associated with this sector now. */
610                 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
611                 zram_free_page(zram, index);
612                 zram_set_flag(meta, index, ZRAM_ZERO);
613                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
614
615                 atomic64_inc(&zram->stats.zero_pages);
616                 ret = 0;
617                 goto out;
618         }
619
620         ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
621         if (!is_partial_io(bvec)) {
622                 kunmap_atomic(user_mem);
623                 user_mem = NULL;
624                 uncmem = NULL;
625         }
626
627         if (unlikely(ret)) {
628                 pr_err("Compression failed! err=%d\n", ret);
629                 goto out;
630         }
631         src = zstrm->buffer;
632         if (unlikely(clen > max_zpage_size)) {
633                 clen = PAGE_SIZE;
634                 if (is_partial_io(bvec))
635                         src = uncmem;
636         }
637
638         handle = zs_malloc(meta->mem_pool, clen);
639         if (!handle) {
640                 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
641                         index, clen);
642                 ret = -ENOMEM;
643                 goto out;
644         }
645
646         alloced_pages = zs_get_total_pages(meta->mem_pool);
647         if (zram->limit_pages && alloced_pages > zram->limit_pages) {
648                 zs_free(meta->mem_pool, handle);
649                 ret = -ENOMEM;
650                 goto out;
651         }
652
653         update_used_max(zram, alloced_pages);
654
655         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
656
657         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
658                 src = kmap_atomic(page);
659                 copy_page(cmem, src);
660                 kunmap_atomic(src);
661         } else {
662                 memcpy(cmem, src, clen);
663         }
664
665         zcomp_strm_release(zram->comp, zstrm);
666         locked = false;
667         zs_unmap_object(meta->mem_pool, handle);
668
669         /*
670          * Free memory associated with this sector
671          * before overwriting unused sectors.
672          */
673         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
674         zram_free_page(zram, index);
675
676         meta->table[index].handle = handle;
677         zram_set_obj_size(meta, index, clen);
678         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
679
680         /* Update stats */
681         atomic64_add(clen, &zram->stats.compr_data_size);
682         atomic64_inc(&zram->stats.pages_stored);
683 out:
684         if (locked)
685                 zcomp_strm_release(zram->comp, zstrm);
686         if (is_partial_io(bvec))
687                 kfree(uncmem);
688         return ret;
689 }
690
691 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
692                         int offset, int rw)
693 {
694         int ret;
695
696         if (rw == READ) {
697                 atomic64_inc(&zram->stats.num_reads);
698                 ret = zram_bvec_read(zram, bvec, index, offset);
699         } else {
700                 atomic64_inc(&zram->stats.num_writes);
701                 ret = zram_bvec_write(zram, bvec, index, offset);
702         }
703
704         if (unlikely(ret)) {
705                 if (rw == READ)
706                         atomic64_inc(&zram->stats.failed_reads);
707                 else
708                         atomic64_inc(&zram->stats.failed_writes);
709         }
710
711         return ret;
712 }
713
714 /*
715  * zram_bio_discard - handler on discard request
716  * @index: physical block index in PAGE_SIZE units
717  * @offset: byte offset within physical block
718  */
719 static void zram_bio_discard(struct zram *zram, u32 index,
720                              int offset, struct bio *bio)
721 {
722         size_t n = bio->bi_iter.bi_size;
723         struct zram_meta *meta = zram->meta;
724
725         /*
726          * zram manages data in physical block size units. Because logical block
727          * size isn't identical with physical block size on some arch, we
728          * could get a discard request pointing to a specific offset within a
729          * certain physical block.  Although we can handle this request by
730          * reading that physiclal block and decompressing and partially zeroing
731          * and re-compressing and then re-storing it, this isn't reasonable
732          * because our intent with a discard request is to save memory.  So
733          * skipping this logical block is appropriate here.
734          */
735         if (offset) {
736                 if (n <= (PAGE_SIZE - offset))
737                         return;
738
739                 n -= (PAGE_SIZE - offset);
740                 index++;
741         }
742
743         while (n >= PAGE_SIZE) {
744                 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
745                 zram_free_page(zram, index);
746                 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
747                 atomic64_inc(&zram->stats.notify_free);
748                 index++;
749                 n -= PAGE_SIZE;
750         }
751 }
752
753 static void zram_reset_device(struct zram *zram)
754 {
755         struct zram_meta *meta;
756         struct zcomp *comp;
757         u64 disksize;
758
759         down_write(&zram->init_lock);
760
761         zram->limit_pages = 0;
762
763         if (!init_done(zram)) {
764                 up_write(&zram->init_lock);
765                 return;
766         }
767
768         meta = zram->meta;
769         comp = zram->comp;
770         disksize = zram->disksize;
771         /*
772          * Refcount will go down to 0 eventually and r/w handler
773          * cannot handle further I/O so it will bail out by
774          * check zram_meta_get.
775          */
776         zram_meta_put(zram);
777         /*
778          * We want to free zram_meta in process context to avoid
779          * deadlock between reclaim path and any other locks.
780          */
781         wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
782
783         /* Reset stats */
784         memset(&zram->stats, 0, sizeof(zram->stats));
785         zram->disksize = 0;
786         zram->max_comp_streams = 1;
787         set_capacity(zram->disk, 0);
788
789         up_write(&zram->init_lock);
790         /* I/O operation under all of CPU are done so let's free */
791         zram_meta_free(meta, disksize);
792         zcomp_destroy(comp);
793 }
794
795 static ssize_t disksize_store(struct device *dev,
796                 struct device_attribute *attr, const char *buf, size_t len)
797 {
798         u64 disksize;
799         struct zcomp *comp;
800         struct zram_meta *meta;
801         struct zram *zram = dev_to_zram(dev);
802         int err;
803
804         disksize = memparse(buf, NULL);
805         if (!disksize)
806                 return -EINVAL;
807
808         disksize = PAGE_ALIGN(disksize);
809         meta = zram_meta_alloc(zram->disk->first_minor, disksize);
810         if (!meta)
811                 return -ENOMEM;
812
813         comp = zcomp_create(zram->compressor, zram->max_comp_streams);
814         if (IS_ERR(comp)) {
815                 pr_info("Cannot initialise %s compressing backend\n",
816                                 zram->compressor);
817                 err = PTR_ERR(comp);
818                 goto out_free_meta;
819         }
820
821         down_write(&zram->init_lock);
822         if (init_done(zram)) {
823                 pr_info("Cannot change disksize for initialized device\n");
824                 err = -EBUSY;
825                 goto out_destroy_comp;
826         }
827
828         init_waitqueue_head(&zram->io_done);
829         atomic_set(&zram->refcount, 1);
830         zram->meta = meta;
831         zram->comp = comp;
832         zram->disksize = disksize;
833         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
834         up_write(&zram->init_lock);
835
836         /*
837          * Revalidate disk out of the init_lock to avoid lockdep splat.
838          * It's okay because disk's capacity is protected by init_lock
839          * so that revalidate_disk always sees up-to-date capacity.
840          */
841         revalidate_disk(zram->disk);
842
843         return len;
844
845 out_destroy_comp:
846         up_write(&zram->init_lock);
847         zcomp_destroy(comp);
848 out_free_meta:
849         zram_meta_free(meta, disksize);
850         return err;
851 }
852
853 static ssize_t reset_store(struct device *dev,
854                 struct device_attribute *attr, const char *buf, size_t len)
855 {
856         int ret;
857         unsigned short do_reset;
858         struct zram *zram;
859         struct block_device *bdev;
860
861         zram = dev_to_zram(dev);
862         bdev = bdget_disk(zram->disk, 0);
863
864         if (!bdev)
865                 return -ENOMEM;
866
867         mutex_lock(&bdev->bd_mutex);
868         /* Do not reset an active device! */
869         if (bdev->bd_openers) {
870                 ret = -EBUSY;
871                 goto out;
872         }
873
874         ret = kstrtou16(buf, 10, &do_reset);
875         if (ret)
876                 goto out;
877
878         if (!do_reset) {
879                 ret = -EINVAL;
880                 goto out;
881         }
882
883         /* Make sure all pending I/O is finished */
884         fsync_bdev(bdev);
885         zram_reset_device(zram);
886
887         mutex_unlock(&bdev->bd_mutex);
888         revalidate_disk(zram->disk);
889         bdput(bdev);
890
891         return len;
892
893 out:
894         mutex_unlock(&bdev->bd_mutex);
895         bdput(bdev);
896         return ret;
897 }
898
899 static void __zram_make_request(struct zram *zram, struct bio *bio)
900 {
901         int offset, rw;
902         u32 index;
903         struct bio_vec bvec;
904         struct bvec_iter iter;
905
906         index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
907         offset = (bio->bi_iter.bi_sector &
908                   (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
909
910         if (unlikely(bio->bi_rw & REQ_DISCARD)) {
911                 zram_bio_discard(zram, index, offset, bio);
912                 bio_endio(bio, 0);
913                 return;
914         }
915
916         rw = bio_data_dir(bio);
917         bio_for_each_segment(bvec, bio, iter) {
918                 int max_transfer_size = PAGE_SIZE - offset;
919
920                 if (bvec.bv_len > max_transfer_size) {
921                         /*
922                          * zram_bvec_rw() can only make operation on a single
923                          * zram page. Split the bio vector.
924                          */
925                         struct bio_vec bv;
926
927                         bv.bv_page = bvec.bv_page;
928                         bv.bv_len = max_transfer_size;
929                         bv.bv_offset = bvec.bv_offset;
930
931                         if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
932                                 goto out;
933
934                         bv.bv_len = bvec.bv_len - max_transfer_size;
935                         bv.bv_offset += max_transfer_size;
936                         if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
937                                 goto out;
938                 } else
939                         if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
940                                 goto out;
941
942                 update_position(&index, &offset, &bvec);
943         }
944
945         set_bit(BIO_UPTODATE, &bio->bi_flags);
946         bio_endio(bio, 0);
947         return;
948
949 out:
950         bio_io_error(bio);
951 }
952
953 /*
954  * Handler function for all zram I/O requests.
955  */
956 static void zram_make_request(struct request_queue *queue, struct bio *bio)
957 {
958         struct zram *zram = queue->queuedata;
959
960         if (unlikely(!zram_meta_get(zram)))
961                 goto error;
962
963         if (!valid_io_request(zram, bio->bi_iter.bi_sector,
964                                         bio->bi_iter.bi_size)) {
965                 atomic64_inc(&zram->stats.invalid_io);
966                 goto put_zram;
967         }
968
969         __zram_make_request(zram, bio);
970         zram_meta_put(zram);
971         return;
972 put_zram:
973         zram_meta_put(zram);
974 error:
975         bio_io_error(bio);
976 }
977
978 static void zram_slot_free_notify(struct block_device *bdev,
979                                 unsigned long index)
980 {
981         struct zram *zram;
982         struct zram_meta *meta;
983
984         zram = bdev->bd_disk->private_data;
985         meta = zram->meta;
986
987         bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
988         zram_free_page(zram, index);
989         bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
990         atomic64_inc(&zram->stats.notify_free);
991 }
992
993 static int zram_rw_page(struct block_device *bdev, sector_t sector,
994                        struct page *page, int rw)
995 {
996         int offset, err = -EIO;
997         u32 index;
998         struct zram *zram;
999         struct bio_vec bv;
1000
1001         zram = bdev->bd_disk->private_data;
1002         if (unlikely(!zram_meta_get(zram)))
1003                 goto out;
1004
1005         if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1006                 atomic64_inc(&zram->stats.invalid_io);
1007                 err = -EINVAL;
1008                 goto put_zram;
1009         }
1010
1011         index = sector >> SECTORS_PER_PAGE_SHIFT;
1012         offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
1013
1014         bv.bv_page = page;
1015         bv.bv_len = PAGE_SIZE;
1016         bv.bv_offset = 0;
1017
1018         err = zram_bvec_rw(zram, &bv, index, offset, rw);
1019 put_zram:
1020         zram_meta_put(zram);
1021 out:
1022         /*
1023          * If I/O fails, just return error(ie, non-zero) without
1024          * calling page_endio.
1025          * It causes resubmit the I/O with bio request by upper functions
1026          * of rw_page(e.g., swap_readpage, __swap_writepage) and
1027          * bio->bi_end_io does things to handle the error
1028          * (e.g., SetPageError, set_page_dirty and extra works).
1029          */
1030         if (err == 0)
1031                 page_endio(page, rw, 0);
1032         return err;
1033 }
1034
1035 static const struct block_device_operations zram_devops = {
1036         .swap_slot_free_notify = zram_slot_free_notify,
1037         .rw_page = zram_rw_page,
1038         .owner = THIS_MODULE
1039 };
1040
1041 static DEVICE_ATTR_WO(compact);
1042 static DEVICE_ATTR_RW(disksize);
1043 static DEVICE_ATTR_RO(initstate);
1044 static DEVICE_ATTR_WO(reset);
1045 static DEVICE_ATTR_RO(orig_data_size);
1046 static DEVICE_ATTR_RO(mem_used_total);
1047 static DEVICE_ATTR_RW(mem_limit);
1048 static DEVICE_ATTR_RW(mem_used_max);
1049 static DEVICE_ATTR_RW(max_comp_streams);
1050 static DEVICE_ATTR_RW(comp_algorithm);
1051
1052 ZRAM_ATTR_RO(num_reads);
1053 ZRAM_ATTR_RO(num_writes);
1054 ZRAM_ATTR_RO(failed_reads);
1055 ZRAM_ATTR_RO(failed_writes);
1056 ZRAM_ATTR_RO(invalid_io);
1057 ZRAM_ATTR_RO(notify_free);
1058 ZRAM_ATTR_RO(zero_pages);
1059 ZRAM_ATTR_RO(compr_data_size);
1060
1061 static struct attribute *zram_disk_attrs[] = {
1062         &dev_attr_disksize.attr,
1063         &dev_attr_initstate.attr,
1064         &dev_attr_reset.attr,
1065         &dev_attr_num_reads.attr,
1066         &dev_attr_num_writes.attr,
1067         &dev_attr_failed_reads.attr,
1068         &dev_attr_failed_writes.attr,
1069         &dev_attr_compact.attr,
1070         &dev_attr_invalid_io.attr,
1071         &dev_attr_notify_free.attr,
1072         &dev_attr_zero_pages.attr,
1073         &dev_attr_orig_data_size.attr,
1074         &dev_attr_compr_data_size.attr,
1075         &dev_attr_mem_used_total.attr,
1076         &dev_attr_mem_limit.attr,
1077         &dev_attr_mem_used_max.attr,
1078         &dev_attr_max_comp_streams.attr,
1079         &dev_attr_comp_algorithm.attr,
1080         NULL,
1081 };
1082
1083 static struct attribute_group zram_disk_attr_group = {
1084         .attrs = zram_disk_attrs,
1085 };
1086
1087 static int create_device(struct zram *zram, int device_id)
1088 {
1089         struct request_queue *queue;
1090         int ret = -ENOMEM;
1091
1092         init_rwsem(&zram->init_lock);
1093
1094         queue = blk_alloc_queue(GFP_KERNEL);
1095         if (!queue) {
1096                 pr_err("Error allocating disk queue for device %d\n",
1097                         device_id);
1098                 goto out;
1099         }
1100
1101         blk_queue_make_request(queue, zram_make_request);
1102
1103          /* gendisk structure */
1104         zram->disk = alloc_disk(1);
1105         if (!zram->disk) {
1106                 pr_warn("Error allocating disk structure for device %d\n",
1107                         device_id);
1108                 goto out_free_queue;
1109         }
1110
1111         zram->disk->major = zram_major;
1112         zram->disk->first_minor = device_id;
1113         zram->disk->fops = &zram_devops;
1114         zram->disk->queue = queue;
1115         zram->disk->queue->queuedata = zram;
1116         zram->disk->private_data = zram;
1117         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1118
1119         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1120         set_capacity(zram->disk, 0);
1121         /* zram devices sort of resembles non-rotational disks */
1122         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1123         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1124         /*
1125          * To ensure that we always get PAGE_SIZE aligned
1126          * and n*PAGE_SIZED sized I/O requests.
1127          */
1128         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1129         blk_queue_logical_block_size(zram->disk->queue,
1130                                         ZRAM_LOGICAL_BLOCK_SIZE);
1131         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1132         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1133         zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1134         zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1135         /*
1136          * zram_bio_discard() will clear all logical blocks if logical block
1137          * size is identical with physical block size(PAGE_SIZE). But if it is
1138          * different, we will skip discarding some parts of logical blocks in
1139          * the part of the request range which isn't aligned to physical block
1140          * size.  So we can't ensure that all discarded logical blocks are
1141          * zeroed.
1142          */
1143         if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1144                 zram->disk->queue->limits.discard_zeroes_data = 1;
1145         else
1146                 zram->disk->queue->limits.discard_zeroes_data = 0;
1147         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1148
1149         add_disk(zram->disk);
1150
1151         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1152                                 &zram_disk_attr_group);
1153         if (ret < 0) {
1154                 pr_warn("Error creating sysfs group");
1155                 goto out_free_disk;
1156         }
1157         strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1158         zram->meta = NULL;
1159         zram->max_comp_streams = 1;
1160         return 0;
1161
1162 out_free_disk:
1163         del_gendisk(zram->disk);
1164         put_disk(zram->disk);
1165 out_free_queue:
1166         blk_cleanup_queue(queue);
1167 out:
1168         return ret;
1169 }
1170
1171 static void destroy_devices(unsigned int nr)
1172 {
1173         struct zram *zram;
1174         unsigned int i;
1175
1176         for (i = 0; i < nr; i++) {
1177                 zram = &zram_devices[i];
1178                 /*
1179                  * Remove sysfs first, so no one will perform a disksize
1180                  * store while we destroy the devices
1181                  */
1182                 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1183                                 &zram_disk_attr_group);
1184
1185                 zram_reset_device(zram);
1186
1187                 blk_cleanup_queue(zram->disk->queue);
1188                 del_gendisk(zram->disk);
1189                 put_disk(zram->disk);
1190         }
1191
1192         kfree(zram_devices);
1193         unregister_blkdev(zram_major, "zram");
1194         pr_info("Destroyed %u device(s)\n", nr);
1195 }
1196
1197 static int __init zram_init(void)
1198 {
1199         int ret, dev_id;
1200
1201         if (num_devices > max_num_devices) {
1202                 pr_warn("Invalid value for num_devices: %u\n",
1203                                 num_devices);
1204                 return -EINVAL;
1205         }
1206
1207         zram_major = register_blkdev(0, "zram");
1208         if (zram_major <= 0) {
1209                 pr_warn("Unable to get major number\n");
1210                 return -EBUSY;
1211         }
1212
1213         /* Allocate the device array and initialize each one */
1214         zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1215         if (!zram_devices) {
1216                 unregister_blkdev(zram_major, "zram");
1217                 return -ENOMEM;
1218         }
1219
1220         for (dev_id = 0; dev_id < num_devices; dev_id++) {
1221                 ret = create_device(&zram_devices[dev_id], dev_id);
1222                 if (ret)
1223                         goto out_error;
1224         }
1225
1226         pr_info("Created %u device(s)\n", num_devices);
1227         return 0;
1228
1229 out_error:
1230         destroy_devices(dev_id);
1231         return ret;
1232 }
1233
1234 static void __exit zram_exit(void)
1235 {
1236         destroy_devices(num_devices);
1237 }
1238
1239 module_init(zram_init);
1240 module_exit(zram_exit);
1241
1242 module_param(num_devices, uint, 0);
1243 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1244
1245 MODULE_LICENSE("Dual BSD/GPL");
1246 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1247 MODULE_DESCRIPTION("Compressed RAM Block Device");