2 * linux/kernel/power/swap.c
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 * This file is released under the GPLv2.
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/vmalloc.h>
29 #include <linux/cpumask.h>
30 #include <linux/atomic.h>
31 #include <linux/kthread.h>
32 #include <linux/crc32.h>
33 #include <linux/ktime.h>
37 #define HIBERNATE_SIG "S1SUSPEND"
40 * When reading an {un,}compressed image, we may restore pages in place,
41 * in which case some architectures need these pages cleaning before they
42 * can be executed. We don't know which pages these may be, so clean the lot.
44 static bool clean_pages_on_read;
45 static bool clean_pages_on_decompress;
48 * The swap map is a data structure used for keeping track of each page
49 * written to a swap partition. It consists of many swap_map_page
50 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51 * These structures are stored on the swap and linked together with the
52 * help of the .next_swap member.
54 * The swap map is created during suspend. The swap map pages are
55 * allocated and populated one at a time, so we only need one memory
56 * page to set up the entire structure.
58 * During resume we pick up all swap_map_page structures into a list.
61 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
64 * Number of free pages that are not high.
66 static inline unsigned long low_free_pages(void)
68 return nr_free_pages() - nr_free_highpages();
72 * Number of pages required to be kept free while writing the image. Always
73 * half of all available low pages before the writing starts.
75 static inline unsigned long reqd_free_pages(void)
77 return low_free_pages() / 2;
80 struct swap_map_page {
81 sector_t entries[MAP_PAGE_ENTRIES];
85 struct swap_map_page_list {
86 struct swap_map_page *map;
87 struct swap_map_page_list *next;
91 * The swap_map_handle structure is used for handling swap in
95 struct swap_map_handle {
96 struct swap_map_page *cur;
97 struct swap_map_page_list *maps;
99 sector_t first_sector;
101 unsigned long reqd_free_pages;
105 struct swsusp_header {
106 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
110 unsigned int flags; /* Flags to pass to the "boot" kernel */
115 static struct swsusp_header *swsusp_header;
118 * The following functions are used for tracing the allocated
119 * swap pages, so that they can be freed in case of an error.
122 struct swsusp_extent {
128 static struct rb_root swsusp_extents = RB_ROOT;
130 static int swsusp_extents_insert(unsigned long swap_offset)
132 struct rb_node **new = &(swsusp_extents.rb_node);
133 struct rb_node *parent = NULL;
134 struct swsusp_extent *ext;
136 /* Figure out where to put the new node */
138 ext = rb_entry(*new, struct swsusp_extent, node);
140 if (swap_offset < ext->start) {
142 if (swap_offset == ext->start - 1) {
146 new = &((*new)->rb_left);
147 } else if (swap_offset > ext->end) {
149 if (swap_offset == ext->end + 1) {
153 new = &((*new)->rb_right);
155 /* It already is in the tree */
159 /* Add the new node and rebalance the tree. */
160 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
164 ext->start = swap_offset;
165 ext->end = swap_offset;
166 rb_link_node(&ext->node, parent, new);
167 rb_insert_color(&ext->node, &swsusp_extents);
172 * alloc_swapdev_block - allocate a swap page and register that it has
173 * been allocated, so that it can be freed in case of an error.
176 sector_t alloc_swapdev_block(int swap)
178 unsigned long offset;
180 offset = swp_offset(get_swap_page_of_type(swap));
182 if (swsusp_extents_insert(offset))
183 swap_free(swp_entry(swap, offset));
185 return swapdev_block(swap, offset);
191 * free_all_swap_pages - free swap pages allocated for saving image data.
192 * It also frees the extents used to register which swap entries had been
196 void free_all_swap_pages(int swap)
198 struct rb_node *node;
200 while ((node = swsusp_extents.rb_node)) {
201 struct swsusp_extent *ext;
202 unsigned long offset;
204 ext = container_of(node, struct swsusp_extent, node);
205 rb_erase(node, &swsusp_extents);
206 for (offset = ext->start; offset <= ext->end; offset++)
207 swap_free(swp_entry(swap, offset));
213 int swsusp_swap_in_use(void)
215 return (swsusp_extents.rb_node != NULL);
222 static unsigned short root_swap = 0xffff;
223 static struct block_device *hib_resume_bdev;
225 struct hib_bio_batch {
227 wait_queue_head_t wait;
231 static void hib_init_batch(struct hib_bio_batch *hb)
233 atomic_set(&hb->count, 0);
234 init_waitqueue_head(&hb->wait);
238 static void hib_end_io(struct bio *bio)
240 struct hib_bio_batch *hb = bio->bi_private;
241 struct page *page = bio->bi_io_vec[0].bv_page;
244 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
245 imajor(bio->bi_bdev->bd_inode),
246 iminor(bio->bi_bdev->bd_inode),
247 (unsigned long long)bio->bi_iter.bi_sector);
250 if (bio_data_dir(bio) == WRITE)
252 else if (clean_pages_on_read)
253 flush_icache_range((unsigned long)page_address(page),
254 (unsigned long)page_address(page) + PAGE_SIZE);
256 if (bio->bi_error && !hb->error)
257 hb->error = bio->bi_error;
258 if (atomic_dec_and_test(&hb->count))
264 static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
265 struct hib_bio_batch *hb)
267 struct page *page = virt_to_page(addr);
271 bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
272 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
273 bio->bi_bdev = hib_resume_bdev;
276 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
277 printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
278 (unsigned long long)bio->bi_iter.bi_sector);
284 bio->bi_end_io = hib_end_io;
285 bio->bi_private = hb;
286 atomic_inc(&hb->count);
289 error = submit_bio_wait(bio);
296 static int hib_wait_io(struct hib_bio_batch *hb)
298 wait_event(hb->wait, atomic_read(&hb->count) == 0);
306 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
310 hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
311 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
312 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
313 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
314 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
315 swsusp_header->image = handle->first_sector;
316 swsusp_header->flags = flags;
317 if (flags & SF_CRC32_MODE)
318 swsusp_header->crc32 = handle->crc32;
319 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
320 swsusp_header, NULL);
322 printk(KERN_ERR "PM: Swap header not found!\n");
329 * swsusp_swap_check - check if the resume device is a swap device
330 * and get its index (if so)
332 * This is called before saving image
334 static int swsusp_swap_check(void)
338 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
344 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
348 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
350 blkdev_put(hib_resume_bdev, FMODE_WRITE);
356 * write_page - Write one page to given swap location.
357 * @buf: Address we're writing.
358 * @offset: Offset of the swap page we're writing to.
359 * @hb: bio completion batch
362 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
371 src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
376 ret = hib_wait_io(hb); /* Free pages */
379 src = (void *)__get_free_page(__GFP_RECLAIM |
386 hb = NULL; /* Go synchronous */
393 return hib_submit_io(WRITE_SYNC, offset, src, hb);
396 static void release_swap_writer(struct swap_map_handle *handle)
399 free_page((unsigned long)handle->cur);
403 static int get_swap_writer(struct swap_map_handle *handle)
407 ret = swsusp_swap_check();
410 printk(KERN_ERR "PM: Cannot find swap device, try "
414 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
419 handle->cur_swap = alloc_swapdev_block(root_swap);
420 if (!handle->cur_swap) {
425 handle->reqd_free_pages = reqd_free_pages();
426 handle->first_sector = handle->cur_swap;
429 release_swap_writer(handle);
431 swsusp_close(FMODE_WRITE);
435 static int swap_write_page(struct swap_map_handle *handle, void *buf,
436 struct hib_bio_batch *hb)
443 offset = alloc_swapdev_block(root_swap);
444 error = write_page(buf, offset, hb);
447 handle->cur->entries[handle->k++] = offset;
448 if (handle->k >= MAP_PAGE_ENTRIES) {
449 offset = alloc_swapdev_block(root_swap);
452 handle->cur->next_swap = offset;
453 error = write_page(handle->cur, handle->cur_swap, hb);
456 clear_page(handle->cur);
457 handle->cur_swap = offset;
460 if (hb && low_free_pages() <= handle->reqd_free_pages) {
461 error = hib_wait_io(hb);
465 * Recalculate the number of required free pages, to
466 * make sure we never take more than half.
468 handle->reqd_free_pages = reqd_free_pages();
475 static int flush_swap_writer(struct swap_map_handle *handle)
477 if (handle->cur && handle->cur_swap)
478 return write_page(handle->cur, handle->cur_swap, NULL);
483 static int swap_writer_finish(struct swap_map_handle *handle,
484 unsigned int flags, int error)
487 flush_swap_writer(handle);
488 printk(KERN_INFO "PM: S");
489 error = mark_swapfiles(handle, flags);
494 free_all_swap_pages(root_swap);
495 release_swap_writer(handle);
496 swsusp_close(FMODE_WRITE);
501 /* We need to remember how much compressed data we need to read. */
502 #define LZO_HEADER sizeof(size_t)
504 /* Number of pages/bytes we'll compress at one time. */
505 #define LZO_UNC_PAGES 32
506 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
508 /* Number of pages/bytes we need for compressed data (worst case). */
509 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
510 LZO_HEADER, PAGE_SIZE)
511 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
513 /* Maximum number of threads for compression/decompression. */
514 #define LZO_THREADS 3
516 /* Minimum/maximum number of pages for read buffering. */
517 #define LZO_MIN_RD_PAGES 1024
518 #define LZO_MAX_RD_PAGES 8192
522 * save_image - save the suspend image data
525 static int save_image(struct swap_map_handle *handle,
526 struct snapshot_handle *snapshot,
527 unsigned int nr_to_write)
533 struct hib_bio_batch hb;
539 printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
541 m = nr_to_write / 10;
547 ret = snapshot_read_next(snapshot);
550 ret = swap_write_page(handle, data_of(*snapshot), &hb);
554 printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
558 err2 = hib_wait_io(&hb);
563 printk(KERN_INFO "PM: Image saving done.\n");
564 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
569 * Structure used for CRC32.
572 struct task_struct *thr; /* thread */
573 atomic_t ready; /* ready to start flag */
574 atomic_t stop; /* ready to stop flag */
575 unsigned run_threads; /* nr current threads */
576 wait_queue_head_t go; /* start crc update */
577 wait_queue_head_t done; /* crc update done */
578 u32 *crc32; /* points to handle's crc32 */
579 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
580 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
584 * CRC32 update function that runs in its own thread.
586 static int crc32_threadfn(void *data)
588 struct crc_data *d = data;
592 wait_event(d->go, atomic_read(&d->ready) ||
593 kthread_should_stop());
594 if (kthread_should_stop()) {
596 atomic_set(&d->stop, 1);
600 atomic_set(&d->ready, 0);
602 for (i = 0; i < d->run_threads; i++)
603 *d->crc32 = crc32_le(*d->crc32,
604 d->unc[i], *d->unc_len[i]);
605 atomic_set(&d->stop, 1);
611 * Structure used for LZO data compression.
614 struct task_struct *thr; /* thread */
615 atomic_t ready; /* ready to start flag */
616 atomic_t stop; /* ready to stop flag */
617 int ret; /* return code */
618 wait_queue_head_t go; /* start compression */
619 wait_queue_head_t done; /* compression done */
620 size_t unc_len; /* uncompressed length */
621 size_t cmp_len; /* compressed length */
622 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
623 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
624 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
628 * Compression function that runs in its own thread.
630 static int lzo_compress_threadfn(void *data)
632 struct cmp_data *d = data;
635 wait_event(d->go, atomic_read(&d->ready) ||
636 kthread_should_stop());
637 if (kthread_should_stop()) {
640 atomic_set(&d->stop, 1);
644 atomic_set(&d->ready, 0);
646 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
647 d->cmp + LZO_HEADER, &d->cmp_len,
649 atomic_set(&d->stop, 1);
656 * save_image_lzo - Save the suspend image data compressed with LZO.
657 * @handle: Swap map handle to use for saving the image.
658 * @snapshot: Image to read data from.
659 * @nr_to_write: Number of pages to save.
661 static int save_image_lzo(struct swap_map_handle *handle,
662 struct snapshot_handle *snapshot,
663 unsigned int nr_to_write)
669 struct hib_bio_batch hb;
673 unsigned thr, run_threads, nr_threads;
674 unsigned char *page = NULL;
675 struct cmp_data *data = NULL;
676 struct crc_data *crc = NULL;
681 * We'll limit the number of threads for compression to limit memory
684 nr_threads = num_online_cpus() - 1;
685 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
687 page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
689 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
694 data = vmalloc(sizeof(*data) * nr_threads);
696 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
700 for (thr = 0; thr < nr_threads; thr++)
701 memset(&data[thr], 0, offsetof(struct cmp_data, go));
703 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
705 printk(KERN_ERR "PM: Failed to allocate crc\n");
709 memset(crc, 0, offsetof(struct crc_data, go));
712 * Start the compression threads.
714 for (thr = 0; thr < nr_threads; thr++) {
715 init_waitqueue_head(&data[thr].go);
716 init_waitqueue_head(&data[thr].done);
718 data[thr].thr = kthread_run(lzo_compress_threadfn,
720 "image_compress/%u", thr);
721 if (IS_ERR(data[thr].thr)) {
722 data[thr].thr = NULL;
724 "PM: Cannot start compression threads\n");
731 * Start the CRC32 thread.
733 init_waitqueue_head(&crc->go);
734 init_waitqueue_head(&crc->done);
737 crc->crc32 = &handle->crc32;
738 for (thr = 0; thr < nr_threads; thr++) {
739 crc->unc[thr] = data[thr].unc;
740 crc->unc_len[thr] = &data[thr].unc_len;
743 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
744 if (IS_ERR(crc->thr)) {
746 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
752 * Adjust the number of required free pages after all allocations have
753 * been done. We don't want to run out of pages when writing.
755 handle->reqd_free_pages = reqd_free_pages();
758 "PM: Using %u thread(s) for compression.\n"
759 "PM: Compressing and saving image data (%u pages)...\n",
760 nr_threads, nr_to_write);
761 m = nr_to_write / 10;
767 for (thr = 0; thr < nr_threads; thr++) {
768 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
769 ret = snapshot_read_next(snapshot);
776 memcpy(data[thr].unc + off,
777 data_of(*snapshot), PAGE_SIZE);
781 "PM: Image saving progress: "
789 data[thr].unc_len = off;
791 atomic_set(&data[thr].ready, 1);
792 wake_up(&data[thr].go);
798 crc->run_threads = thr;
799 atomic_set(&crc->ready, 1);
802 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
803 wait_event(data[thr].done,
804 atomic_read(&data[thr].stop));
805 atomic_set(&data[thr].stop, 0);
810 printk(KERN_ERR "PM: LZO compression failed\n");
814 if (unlikely(!data[thr].cmp_len ||
816 lzo1x_worst_compress(data[thr].unc_len))) {
818 "PM: Invalid LZO compressed length\n");
823 *(size_t *)data[thr].cmp = data[thr].cmp_len;
826 * Given we are writing one page at a time to disk, we
827 * copy that much from the buffer, although the last
828 * bit will likely be smaller than full page. This is
829 * OK - we saved the length of the compressed data, so
830 * any garbage at the end will be discarded when we
834 off < LZO_HEADER + data[thr].cmp_len;
836 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
838 ret = swap_write_page(handle, page, &hb);
844 wait_event(crc->done, atomic_read(&crc->stop));
845 atomic_set(&crc->stop, 0);
849 err2 = hib_wait_io(&hb);
854 printk(KERN_INFO "PM: Image saving done.\n");
855 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
859 kthread_stop(crc->thr);
863 for (thr = 0; thr < nr_threads; thr++)
865 kthread_stop(data[thr].thr);
868 if (page) free_page((unsigned long)page);
874 * enough_swap - Make sure we have enough swap to save the image.
876 * Returns TRUE or FALSE after checking the total amount of swap
877 * space avaiable from the resume partition.
880 static int enough_swap(unsigned int nr_pages, unsigned int flags)
882 unsigned int free_swap = count_swap_pages(root_swap, 1);
883 unsigned int required;
885 pr_debug("PM: Free swap pages: %u\n", free_swap);
887 required = PAGES_FOR_IO + nr_pages;
888 return free_swap > required;
892 * swsusp_write - Write entire image and metadata.
893 * @flags: flags to pass to the "boot" kernel in the image header
895 * It is important _NOT_ to umount filesystems at this point. We want
896 * them synced (in case something goes wrong) but we DO not want to mark
897 * filesystem clean: it is not. (And it does not matter, if we resume
898 * correctly, we'll mark system clean, anyway.)
901 int swsusp_write(unsigned int flags)
903 struct swap_map_handle handle;
904 struct snapshot_handle snapshot;
905 struct swsusp_info *header;
909 pages = snapshot_get_image_size();
910 error = get_swap_writer(&handle);
912 printk(KERN_ERR "PM: Cannot get swap writer\n");
915 if (flags & SF_NOCOMPRESS_MODE) {
916 if (!enough_swap(pages, flags)) {
917 printk(KERN_ERR "PM: Not enough free swap\n");
922 memset(&snapshot, 0, sizeof(struct snapshot_handle));
923 error = snapshot_read_next(&snapshot);
924 if (error < PAGE_SIZE) {
930 header = (struct swsusp_info *)data_of(snapshot);
931 error = swap_write_page(&handle, header, NULL);
933 error = (flags & SF_NOCOMPRESS_MODE) ?
934 save_image(&handle, &snapshot, pages - 1) :
935 save_image_lzo(&handle, &snapshot, pages - 1);
938 error = swap_writer_finish(&handle, flags, error);
943 * The following functions allow us to read data using a swap map
944 * in a file-alike way
947 static void release_swap_reader(struct swap_map_handle *handle)
949 struct swap_map_page_list *tmp;
951 while (handle->maps) {
952 if (handle->maps->map)
953 free_page((unsigned long)handle->maps->map);
955 handle->maps = handle->maps->next;
961 static int get_swap_reader(struct swap_map_handle *handle,
962 unsigned int *flags_p)
965 struct swap_map_page_list *tmp, *last;
968 *flags_p = swsusp_header->flags;
970 if (!swsusp_header->image) /* how can this happen? */
974 last = handle->maps = NULL;
975 offset = swsusp_header->image;
977 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
979 release_swap_reader(handle);
982 memset(tmp, 0, sizeof(*tmp));
989 tmp->map = (struct swap_map_page *)
990 __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
992 release_swap_reader(handle);
996 error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
998 release_swap_reader(handle);
1001 offset = tmp->map->next_swap;
1004 handle->cur = handle->maps->map;
1008 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1009 struct hib_bio_batch *hb)
1013 struct swap_map_page_list *tmp;
1017 offset = handle->cur->entries[handle->k];
1020 error = hib_submit_io(READ_SYNC, offset, buf, hb);
1023 if (++handle->k >= MAP_PAGE_ENTRIES) {
1025 free_page((unsigned long)handle->maps->map);
1027 handle->maps = handle->maps->next;
1030 release_swap_reader(handle);
1032 handle->cur = handle->maps->map;
1037 static int swap_reader_finish(struct swap_map_handle *handle)
1039 release_swap_reader(handle);
1045 * load_image - load the image using the swap map handle
1046 * @handle and the snapshot handle @snapshot
1047 * (assume there are @nr_pages pages to load)
1050 static int load_image(struct swap_map_handle *handle,
1051 struct snapshot_handle *snapshot,
1052 unsigned int nr_to_read)
1058 struct hib_bio_batch hb;
1062 hib_init_batch(&hb);
1064 clean_pages_on_read = true;
1065 printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
1067 m = nr_to_read / 10;
1071 start = ktime_get();
1073 ret = snapshot_write_next(snapshot);
1076 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1079 if (snapshot->sync_read)
1080 ret = hib_wait_io(&hb);
1083 if (!(nr_pages % m))
1084 printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
1088 err2 = hib_wait_io(&hb);
1093 printk(KERN_INFO "PM: Image loading done.\n");
1094 snapshot_write_finalize(snapshot);
1095 if (!snapshot_image_loaded(snapshot))
1098 swsusp_show_speed(start, stop, nr_to_read, "Read");
1103 * Structure used for LZO data decompression.
1106 struct task_struct *thr; /* thread */
1107 atomic_t ready; /* ready to start flag */
1108 atomic_t stop; /* ready to stop flag */
1109 int ret; /* return code */
1110 wait_queue_head_t go; /* start decompression */
1111 wait_queue_head_t done; /* decompression done */
1112 size_t unc_len; /* uncompressed length */
1113 size_t cmp_len; /* compressed length */
1114 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1115 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1119 * Deompression function that runs in its own thread.
1121 static int lzo_decompress_threadfn(void *data)
1123 struct dec_data *d = data;
1126 wait_event(d->go, atomic_read(&d->ready) ||
1127 kthread_should_stop());
1128 if (kthread_should_stop()) {
1131 atomic_set(&d->stop, 1);
1135 atomic_set(&d->ready, 0);
1137 d->unc_len = LZO_UNC_SIZE;
1138 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1139 d->unc, &d->unc_len);
1140 if (clean_pages_on_decompress)
1141 flush_icache_range((unsigned long)d->unc,
1142 (unsigned long)d->unc + d->unc_len);
1144 atomic_set(&d->stop, 1);
1151 * load_image_lzo - Load compressed image data and decompress them with LZO.
1152 * @handle: Swap map handle to use for loading data.
1153 * @snapshot: Image to copy uncompressed data into.
1154 * @nr_to_read: Number of pages to load.
1156 static int load_image_lzo(struct swap_map_handle *handle,
1157 struct snapshot_handle *snapshot,
1158 unsigned int nr_to_read)
1163 struct hib_bio_batch hb;
1168 unsigned i, thr, run_threads, nr_threads;
1169 unsigned ring = 0, pg = 0, ring_size = 0,
1170 have = 0, want, need, asked = 0;
1171 unsigned long read_pages = 0;
1172 unsigned char **page = NULL;
1173 struct dec_data *data = NULL;
1174 struct crc_data *crc = NULL;
1176 hib_init_batch(&hb);
1179 * We'll limit the number of threads for decompression to limit memory
1182 nr_threads = num_online_cpus() - 1;
1183 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1185 page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
1187 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1192 data = vmalloc(sizeof(*data) * nr_threads);
1194 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1198 for (thr = 0; thr < nr_threads; thr++)
1199 memset(&data[thr], 0, offsetof(struct dec_data, go));
1201 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1203 printk(KERN_ERR "PM: Failed to allocate crc\n");
1207 memset(crc, 0, offsetof(struct crc_data, go));
1209 clean_pages_on_decompress = true;
1212 * Start the decompression threads.
1214 for (thr = 0; thr < nr_threads; thr++) {
1215 init_waitqueue_head(&data[thr].go);
1216 init_waitqueue_head(&data[thr].done);
1218 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1220 "image_decompress/%u", thr);
1221 if (IS_ERR(data[thr].thr)) {
1222 data[thr].thr = NULL;
1224 "PM: Cannot start decompression threads\n");
1231 * Start the CRC32 thread.
1233 init_waitqueue_head(&crc->go);
1234 init_waitqueue_head(&crc->done);
1237 crc->crc32 = &handle->crc32;
1238 for (thr = 0; thr < nr_threads; thr++) {
1239 crc->unc[thr] = data[thr].unc;
1240 crc->unc_len[thr] = &data[thr].unc_len;
1243 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1244 if (IS_ERR(crc->thr)) {
1246 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1252 * Set the number of pages for read buffering.
1253 * This is complete guesswork, because we'll only know the real
1254 * picture once prepare_image() is called, which is much later on
1255 * during the image load phase. We'll assume the worst case and
1256 * say that none of the image pages are from high memory.
1258 if (low_free_pages() > snapshot_get_image_size())
1259 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1260 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1262 for (i = 0; i < read_pages; i++) {
1263 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1264 __GFP_RECLAIM | __GFP_HIGH :
1265 __GFP_RECLAIM | __GFP_NOWARN |
1269 if (i < LZO_CMP_PAGES) {
1272 "PM: Failed to allocate LZO pages\n");
1280 want = ring_size = i;
1283 "PM: Using %u thread(s) for decompression.\n"
1284 "PM: Loading and decompressing image data (%u pages)...\n",
1285 nr_threads, nr_to_read);
1286 m = nr_to_read / 10;
1290 start = ktime_get();
1292 ret = snapshot_write_next(snapshot);
1297 for (i = 0; !eof && i < want; i++) {
1298 ret = swap_read_page(handle, page[ring], &hb);
1301 * On real read error, finish. On end of data,
1302 * set EOF flag and just exit the read loop.
1305 handle->cur->entries[handle->k]) {
1312 if (++ring >= ring_size)
1319 * We are out of data, wait for some more.
1325 ret = hib_wait_io(&hb);
1334 if (crc->run_threads) {
1335 wait_event(crc->done, atomic_read(&crc->stop));
1336 atomic_set(&crc->stop, 0);
1337 crc->run_threads = 0;
1340 for (thr = 0; have && thr < nr_threads; thr++) {
1341 data[thr].cmp_len = *(size_t *)page[pg];
1342 if (unlikely(!data[thr].cmp_len ||
1344 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1346 "PM: Invalid LZO compressed length\n");
1351 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1362 off < LZO_HEADER + data[thr].cmp_len;
1364 memcpy(data[thr].cmp + off,
1365 page[pg], PAGE_SIZE);
1368 if (++pg >= ring_size)
1372 atomic_set(&data[thr].ready, 1);
1373 wake_up(&data[thr].go);
1377 * Wait for more data while we are decompressing.
1379 if (have < LZO_CMP_PAGES && asked) {
1380 ret = hib_wait_io(&hb);
1389 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1390 wait_event(data[thr].done,
1391 atomic_read(&data[thr].stop));
1392 atomic_set(&data[thr].stop, 0);
1394 ret = data[thr].ret;
1398 "PM: LZO decompression failed\n");
1402 if (unlikely(!data[thr].unc_len ||
1403 data[thr].unc_len > LZO_UNC_SIZE ||
1404 data[thr].unc_len & (PAGE_SIZE - 1))) {
1406 "PM: Invalid LZO uncompressed length\n");
1412 off < data[thr].unc_len; off += PAGE_SIZE) {
1413 memcpy(data_of(*snapshot),
1414 data[thr].unc + off, PAGE_SIZE);
1416 if (!(nr_pages % m))
1418 "PM: Image loading progress: "
1423 ret = snapshot_write_next(snapshot);
1425 crc->run_threads = thr + 1;
1426 atomic_set(&crc->ready, 1);
1433 crc->run_threads = thr;
1434 atomic_set(&crc->ready, 1);
1439 if (crc->run_threads) {
1440 wait_event(crc->done, atomic_read(&crc->stop));
1441 atomic_set(&crc->stop, 0);
1445 printk(KERN_INFO "PM: Image loading done.\n");
1446 snapshot_write_finalize(snapshot);
1447 if (!snapshot_image_loaded(snapshot))
1450 if (swsusp_header->flags & SF_CRC32_MODE) {
1451 if(handle->crc32 != swsusp_header->crc32) {
1453 "PM: Invalid image CRC32!\n");
1459 swsusp_show_speed(start, stop, nr_to_read, "Read");
1461 for (i = 0; i < ring_size; i++)
1462 free_page((unsigned long)page[i]);
1465 kthread_stop(crc->thr);
1469 for (thr = 0; thr < nr_threads; thr++)
1471 kthread_stop(data[thr].thr);
1480 * swsusp_read - read the hibernation image.
1481 * @flags_p: flags passed by the "frozen" kernel in the image header should
1482 * be written into this memory location
1485 int swsusp_read(unsigned int *flags_p)
1488 struct swap_map_handle handle;
1489 struct snapshot_handle snapshot;
1490 struct swsusp_info *header;
1492 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1493 error = snapshot_write_next(&snapshot);
1494 if (error < PAGE_SIZE)
1495 return error < 0 ? error : -EFAULT;
1496 header = (struct swsusp_info *)data_of(snapshot);
1497 error = get_swap_reader(&handle, flags_p);
1501 error = swap_read_page(&handle, header, NULL);
1503 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1504 load_image(&handle, &snapshot, header->pages - 1) :
1505 load_image_lzo(&handle, &snapshot, header->pages - 1);
1507 swap_reader_finish(&handle);
1510 pr_debug("PM: Image successfully loaded\n");
1512 pr_debug("PM: Error %d resuming\n", error);
1517 * swsusp_check - Check for swsusp signature in the resume device
1520 int swsusp_check(void)
1524 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1526 if (!IS_ERR(hib_resume_bdev)) {
1527 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1528 clear_page(swsusp_header);
1529 error = hib_submit_io(READ_SYNC, swsusp_resume_block,
1530 swsusp_header, NULL);
1534 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1535 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1536 /* Reset swap signature now */
1537 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1538 swsusp_header, NULL);
1545 blkdev_put(hib_resume_bdev, FMODE_READ);
1547 pr_debug("PM: Image signature found, resuming\n");
1549 error = PTR_ERR(hib_resume_bdev);
1553 pr_debug("PM: Image not found (code %d)\n", error);
1559 * swsusp_close - close swap device.
1562 void swsusp_close(fmode_t mode)
1564 if (IS_ERR(hib_resume_bdev)) {
1565 pr_debug("PM: Image device not initialised\n");
1569 blkdev_put(hib_resume_bdev, mode);
1573 * swsusp_unmark - Unmark swsusp signature in the resume device
1576 #ifdef CONFIG_SUSPEND
1577 int swsusp_unmark(void)
1581 hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
1582 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1583 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1584 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1585 swsusp_header, NULL);
1587 printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
1592 * We just returned from suspend, we don't need the image any more.
1594 free_all_swap_pages(root_swap);
1600 static int swsusp_header_init(void)
1602 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1604 panic("Could not allocate memory for swsusp_header\n");
1608 core_initcall(swsusp_header_init);