swap.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. /*
  2. * linux/kernel/power/swap.c
  3. *
  4. * This file provides functions for reading the suspend image from
  5. * and writing it to a swap partition.
  6. *
  7. * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
  8. * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  9. * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  10. *
  11. * This file is released under the GPLv2.
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/file.h>
  16. #include <linux/delay.h>
  17. #include <linux/bitops.h>
  18. #include <linux/genhd.h>
  19. #include <linux/device.h>
  20. #include <linux/bio.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/swap.h>
  23. #include <linux/swapops.h>
  24. #include <linux/pm.h>
  25. #include <linux/slab.h>
  26. #include <linux/lzo.h>
  27. #include <linux/vmalloc.h>
  28. #include <linux/cpumask.h>
  29. #include <linux/atomic.h>
  30. #include <linux/kthread.h>
  31. #include <linux/crc32.h>
  32. #include <linux/ktime.h>
  33. #include "power.h"
  34. #define HIBERNATE_SIG "S1SUSPEND"
  35. /*
  36. * When reading an {un,}compressed image, we may restore pages in place,
  37. * in which case some architectures need these pages cleaning before they
  38. * can be executed. We don't know which pages these may be, so clean the lot.
  39. */
  40. static bool clean_pages_on_read;
  41. static bool clean_pages_on_decompress;
  42. /*
  43. * The swap map is a data structure used for keeping track of each page
  44. * written to a swap partition. It consists of many swap_map_page
  45. * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  46. * These structures are stored on the swap and linked together with the
  47. * help of the .next_swap member.
  48. *
  49. * The swap map is created during suspend. The swap map pages are
  50. * allocated and populated one at a time, so we only need one memory
  51. * page to set up the entire structure.
  52. *
  53. * During resume we pick up all swap_map_page structures into a list.
  54. */
  55. #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
  56. /*
  57. * Number of free pages that are not high.
  58. */
  59. static inline unsigned long low_free_pages(void)
  60. {
  61. return nr_free_pages() - nr_free_highpages();
  62. }
  63. /*
  64. * Number of pages required to be kept free while writing the image. Always
  65. * half of all available low pages before the writing starts.
  66. */
  67. static inline unsigned long reqd_free_pages(void)
  68. {
  69. return low_free_pages() / 2;
  70. }
  71. struct swap_map_page {
  72. sector_t entries[MAP_PAGE_ENTRIES];
  73. sector_t next_swap;
  74. };
  75. struct swap_map_page_list {
  76. struct swap_map_page *map;
  77. struct swap_map_page_list *next;
  78. };
  79. /**
  80. * The swap_map_handle structure is used for handling swap in
  81. * a file-alike way
  82. */
  83. struct swap_map_handle {
  84. struct swap_map_page *cur;
  85. struct swap_map_page_list *maps;
  86. sector_t cur_swap;
  87. sector_t first_sector;
  88. unsigned int k;
  89. unsigned long reqd_free_pages;
  90. u32 crc32;
  91. };
  92. struct swsusp_header {
  93. char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
  94. sizeof(u32)];
  95. u32 crc32;
  96. sector_t image;
  97. unsigned int flags; /* Flags to pass to the "boot" kernel */
  98. char orig_sig[10];
  99. char sig[10];
  100. } __packed;
  101. static struct swsusp_header *swsusp_header;
  102. /**
  103. * The following functions are used for tracing the allocated
  104. * swap pages, so that they can be freed in case of an error.
  105. */
  106. struct swsusp_extent {
  107. struct rb_node node;
  108. unsigned long start;
  109. unsigned long end;
  110. };
  111. static struct rb_root swsusp_extents = RB_ROOT;
  112. static int swsusp_extents_insert(unsigned long swap_offset)
  113. {
  114. struct rb_node **new = &(swsusp_extents.rb_node);
  115. struct rb_node *parent = NULL;
  116. struct swsusp_extent *ext;
  117. /* Figure out where to put the new node */
  118. while (*new) {
  119. ext = rb_entry(*new, struct swsusp_extent, node);
  120. parent = *new;
  121. if (swap_offset < ext->start) {
  122. /* Try to merge */
  123. if (swap_offset == ext->start - 1) {
  124. ext->start--;
  125. return 0;
  126. }
  127. new = &((*new)->rb_left);
  128. } else if (swap_offset > ext->end) {
  129. /* Try to merge */
  130. if (swap_offset == ext->end + 1) {
  131. ext->end++;
  132. return 0;
  133. }
  134. new = &((*new)->rb_right);
  135. } else {
  136. /* It already is in the tree */
  137. return -EINVAL;
  138. }
  139. }
  140. /* Add the new node and rebalance the tree. */
  141. ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
  142. if (!ext)
  143. return -ENOMEM;
  144. ext->start = swap_offset;
  145. ext->end = swap_offset;
  146. rb_link_node(&ext->node, parent, new);
  147. rb_insert_color(&ext->node, &swsusp_extents);
  148. return 0;
  149. }
  150. /**
  151. * alloc_swapdev_block - allocate a swap page and register that it has
  152. * been allocated, so that it can be freed in case of an error.
  153. */
  154. sector_t alloc_swapdev_block(int swap)
  155. {
  156. unsigned long offset;
  157. offset = swp_offset(get_swap_page_of_type(swap));
  158. if (offset) {
  159. if (swsusp_extents_insert(offset))
  160. swap_free(swp_entry(swap, offset));
  161. else
  162. return swapdev_block(swap, offset);
  163. }
  164. return 0;
  165. }
  166. /**
  167. * free_all_swap_pages - free swap pages allocated for saving image data.
  168. * It also frees the extents used to register which swap entries had been
  169. * allocated.
  170. */
  171. void free_all_swap_pages(int swap)
  172. {
  173. struct rb_node *node;
  174. while ((node = swsusp_extents.rb_node)) {
  175. struct swsusp_extent *ext;
  176. unsigned long offset;
  177. ext = container_of(node, struct swsusp_extent, node);
  178. rb_erase(node, &swsusp_extents);
  179. for (offset = ext->start; offset <= ext->end; offset++)
  180. swap_free(swp_entry(swap, offset));
  181. kfree(ext);
  182. }
  183. }
  184. int swsusp_swap_in_use(void)
  185. {
  186. return (swsusp_extents.rb_node != NULL);
  187. }
  188. /*
  189. * General things
  190. */
  191. static unsigned short root_swap = 0xffff;
  192. static struct block_device *hib_resume_bdev;
  193. struct hib_bio_batch {
  194. atomic_t count;
  195. wait_queue_head_t wait;
  196. int error;
  197. };
  198. static void hib_init_batch(struct hib_bio_batch *hb)
  199. {
  200. atomic_set(&hb->count, 0);
  201. init_waitqueue_head(&hb->wait);
  202. hb->error = 0;
  203. }
  204. static void hib_end_io(struct bio *bio)
  205. {
  206. struct hib_bio_batch *hb = bio->bi_private;
  207. struct page *page = bio->bi_io_vec[0].bv_page;
  208. if (bio->bi_error) {
  209. printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
  210. imajor(bio->bi_bdev->bd_inode),
  211. iminor(bio->bi_bdev->bd_inode),
  212. (unsigned long long)bio->bi_iter.bi_sector);
  213. }
  214. if (bio_data_dir(bio) == WRITE)
  215. put_page(page);
  216. else if (clean_pages_on_read)
  217. flush_icache_range((unsigned long)page_address(page),
  218. (unsigned long)page_address(page) + PAGE_SIZE);
  219. if (bio->bi_error && !hb->error)
  220. hb->error = bio->bi_error;
  221. if (atomic_dec_and_test(&hb->count))
  222. wake_up(&hb->wait);
  223. bio_put(bio);
  224. }
  225. static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
  226. struct hib_bio_batch *hb)
  227. {
  228. struct page *page = virt_to_page(addr);
  229. struct bio *bio;
  230. int error = 0;
  231. bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
  232. bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
  233. bio->bi_bdev = hib_resume_bdev;
  234. bio_set_op_attrs(bio, op, op_flags);
  235. if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
  236. printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
  237. (unsigned long long)bio->bi_iter.bi_sector);
  238. bio_put(bio);
  239. return -EFAULT;
  240. }
  241. if (hb) {
  242. bio->bi_end_io = hib_end_io;
  243. bio->bi_private = hb;
  244. atomic_inc(&hb->count);
  245. submit_bio(bio);
  246. } else {
  247. error = submit_bio_wait(bio);
  248. bio_put(bio);
  249. }
  250. return error;
  251. }
  252. static int hib_wait_io(struct hib_bio_batch *hb)
  253. {
  254. wait_event(hb->wait, atomic_read(&hb->count) == 0);
  255. return hb->error;
  256. }
  257. /*
  258. * Saving part
  259. */
  260. static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
  261. {
  262. int error;
  263. hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
  264. swsusp_header, NULL);
  265. if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
  266. !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
  267. memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
  268. memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
  269. swsusp_header->image = handle->first_sector;
  270. swsusp_header->flags = flags;
  271. if (flags & SF_CRC32_MODE)
  272. swsusp_header->crc32 = handle->crc32;
  273. error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
  274. swsusp_resume_block, swsusp_header, NULL);
  275. } else {
  276. printk(KERN_ERR "PM: Swap header not found!\n");
  277. error = -ENODEV;
  278. }
  279. return error;
  280. }
  281. /**
  282. * swsusp_swap_check - check if the resume device is a swap device
  283. * and get its index (if so)
  284. *
  285. * This is called before saving image
  286. */
  287. static int swsusp_swap_check(void)
  288. {
  289. int res;
  290. res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
  291. &hib_resume_bdev);
  292. if (res < 0)
  293. return res;
  294. root_swap = res;
  295. res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
  296. if (res)
  297. return res;
  298. res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
  299. if (res < 0)
  300. blkdev_put(hib_resume_bdev, FMODE_WRITE);
  301. /*
  302. * Update the resume device to the one actually used,
  303. * so the test_resume mode can use it in case it is
  304. * invoked from hibernate() to test the snapshot.
  305. */
  306. swsusp_resume_device = hib_resume_bdev->bd_dev;
  307. return res;
  308. }
  309. /**
  310. * write_page - Write one page to given swap location.
  311. * @buf: Address we're writing.
  312. * @offset: Offset of the swap page we're writing to.
  313. * @hb: bio completion batch
  314. */
  315. static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
  316. {
  317. void *src;
  318. int ret;
  319. if (!offset)
  320. return -ENOSPC;
  321. if (hb) {
  322. src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
  323. __GFP_NORETRY);
  324. if (src) {
  325. copy_page(src, buf);
  326. } else {
  327. ret = hib_wait_io(hb); /* Free pages */
  328. if (ret)
  329. return ret;
  330. src = (void *)__get_free_page(__GFP_RECLAIM |
  331. __GFP_NOWARN |
  332. __GFP_NORETRY);
  333. if (src) {
  334. copy_page(src, buf);
  335. } else {
  336. WARN_ON_ONCE(1);
  337. hb = NULL; /* Go synchronous */
  338. src = buf;
  339. }
  340. }
  341. } else {
  342. src = buf;
  343. }
  344. return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb);
  345. }
  346. static void release_swap_writer(struct swap_map_handle *handle)
  347. {
  348. if (handle->cur)
  349. free_page((unsigned long)handle->cur);
  350. handle->cur = NULL;
  351. }
  352. static int get_swap_writer(struct swap_map_handle *handle)
  353. {
  354. int ret;
  355. ret = swsusp_swap_check();
  356. if (ret) {
  357. if (ret != -ENOSPC)
  358. printk(KERN_ERR "PM: Cannot find swap device, try "
  359. "swapon -a.\n");
  360. return ret;
  361. }
  362. handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
  363. if (!handle->cur) {
  364. ret = -ENOMEM;
  365. goto err_close;
  366. }
  367. handle->cur_swap = alloc_swapdev_block(root_swap);
  368. if (!handle->cur_swap) {
  369. ret = -ENOSPC;
  370. goto err_rel;
  371. }
  372. handle->k = 0;
  373. handle->reqd_free_pages = reqd_free_pages();
  374. handle->first_sector = handle->cur_swap;
  375. return 0;
  376. err_rel:
  377. release_swap_writer(handle);
  378. err_close:
  379. swsusp_close(FMODE_WRITE);
  380. return ret;
  381. }
  382. static int swap_write_page(struct swap_map_handle *handle, void *buf,
  383. struct hib_bio_batch *hb)
  384. {
  385. int error = 0;
  386. sector_t offset;
  387. if (!handle->cur)
  388. return -EINVAL;
  389. offset = alloc_swapdev_block(root_swap);
  390. error = write_page(buf, offset, hb);
  391. if (error)
  392. return error;
  393. handle->cur->entries[handle->k++] = offset;
  394. if (handle->k >= MAP_PAGE_ENTRIES) {
  395. offset = alloc_swapdev_block(root_swap);
  396. if (!offset)
  397. return -ENOSPC;
  398. handle->cur->next_swap = offset;
  399. error = write_page(handle->cur, handle->cur_swap, hb);
  400. if (error)
  401. goto out;
  402. clear_page(handle->cur);
  403. handle->cur_swap = offset;
  404. handle->k = 0;
  405. if (hb && low_free_pages() <= handle->reqd_free_pages) {
  406. error = hib_wait_io(hb);
  407. if (error)
  408. goto out;
  409. /*
  410. * Recalculate the number of required free pages, to
  411. * make sure we never take more than half.
  412. */
  413. handle->reqd_free_pages = reqd_free_pages();
  414. }
  415. }
  416. out:
  417. return error;
  418. }
  419. static int flush_swap_writer(struct swap_map_handle *handle)
  420. {
  421. if (handle->cur && handle->cur_swap)
  422. return write_page(handle->cur, handle->cur_swap, NULL);
  423. else
  424. return -EINVAL;
  425. }
  426. static int swap_writer_finish(struct swap_map_handle *handle,
  427. unsigned int flags, int error)
  428. {
  429. if (!error) {
  430. flush_swap_writer(handle);
  431. printk(KERN_INFO "PM: S");
  432. error = mark_swapfiles(handle, flags);
  433. printk("|\n");
  434. }
  435. if (error)
  436. free_all_swap_pages(root_swap);
  437. release_swap_writer(handle);
  438. swsusp_close(FMODE_WRITE);
  439. return error;
  440. }
  441. /* We need to remember how much compressed data we need to read. */
  442. #define LZO_HEADER sizeof(size_t)
  443. /* Number of pages/bytes we'll compress at one time. */
  444. #define LZO_UNC_PAGES 32
  445. #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
  446. /* Number of pages/bytes we need for compressed data (worst case). */
  447. #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
  448. LZO_HEADER, PAGE_SIZE)
  449. #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
  450. /* Maximum number of threads for compression/decompression. */
  451. #define LZO_THREADS 3
  452. /* Minimum/maximum number of pages for read buffering. */
  453. #define LZO_MIN_RD_PAGES 1024
  454. #define LZO_MAX_RD_PAGES 8192
  455. /**
  456. * save_image - save the suspend image data
  457. */
  458. static int save_image(struct swap_map_handle *handle,
  459. struct snapshot_handle *snapshot,
  460. unsigned int nr_to_write)
  461. {
  462. unsigned int m;
  463. int ret;
  464. int nr_pages;
  465. int err2;
  466. struct hib_bio_batch hb;
  467. ktime_t start;
  468. ktime_t stop;
  469. hib_init_batch(&hb);
  470. printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
  471. nr_to_write);
  472. m = nr_to_write / 10;
  473. if (!m)
  474. m = 1;
  475. nr_pages = 0;
  476. start = ktime_get();
  477. while (1) {
  478. ret = snapshot_read_next(snapshot);
  479. if (ret <= 0)
  480. break;
  481. ret = swap_write_page(handle, data_of(*snapshot), &hb);
  482. if (ret)
  483. break;
  484. if (!(nr_pages % m))
  485. printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
  486. nr_pages / m * 10);
  487. nr_pages++;
  488. }
  489. err2 = hib_wait_io(&hb);
  490. stop = ktime_get();
  491. if (!ret)
  492. ret = err2;
  493. if (!ret)
  494. printk(KERN_INFO "PM: Image saving done.\n");
  495. swsusp_show_speed(start, stop, nr_to_write, "Wrote");
  496. return ret;
  497. }
  498. /**
  499. * Structure used for CRC32.
  500. */
  501. struct crc_data {
  502. struct task_struct *thr; /* thread */
  503. atomic_t ready; /* ready to start flag */
  504. atomic_t stop; /* ready to stop flag */
  505. unsigned run_threads; /* nr current threads */
  506. wait_queue_head_t go; /* start crc update */
  507. wait_queue_head_t done; /* crc update done */
  508. u32 *crc32; /* points to handle's crc32 */
  509. size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
  510. unsigned char *unc[LZO_THREADS]; /* uncompressed data */
  511. };
  512. /**
  513. * CRC32 update function that runs in its own thread.
  514. */
  515. static int crc32_threadfn(void *data)
  516. {
  517. struct crc_data *d = data;
  518. unsigned i;
  519. while (1) {
  520. wait_event(d->go, atomic_read(&d->ready) ||
  521. kthread_should_stop());
  522. if (kthread_should_stop()) {
  523. d->thr = NULL;
  524. atomic_set(&d->stop, 1);
  525. wake_up(&d->done);
  526. break;
  527. }
  528. atomic_set(&d->ready, 0);
  529. for (i = 0; i < d->run_threads; i++)
  530. *d->crc32 = crc32_le(*d->crc32,
  531. d->unc[i], *d->unc_len[i]);
  532. atomic_set(&d->stop, 1);
  533. wake_up(&d->done);
  534. }
  535. return 0;
  536. }
  537. /**
  538. * Structure used for LZO data compression.
  539. */
  540. struct cmp_data {
  541. struct task_struct *thr; /* thread */
  542. atomic_t ready; /* ready to start flag */
  543. atomic_t stop; /* ready to stop flag */
  544. int ret; /* return code */
  545. wait_queue_head_t go; /* start compression */
  546. wait_queue_head_t done; /* compression done */
  547. size_t unc_len; /* uncompressed length */
  548. size_t cmp_len; /* compressed length */
  549. unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
  550. unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
  551. unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
  552. };
  553. /**
  554. * Compression function that runs in its own thread.
  555. */
  556. static int lzo_compress_threadfn(void *data)
  557. {
  558. struct cmp_data *d = data;
  559. while (1) {
  560. wait_event(d->go, atomic_read(&d->ready) ||
  561. kthread_should_stop());
  562. if (kthread_should_stop()) {
  563. d->thr = NULL;
  564. d->ret = -1;
  565. atomic_set(&d->stop, 1);
  566. wake_up(&d->done);
  567. break;
  568. }
  569. atomic_set(&d->ready, 0);
  570. d->ret = lzo1x_1_compress(d->unc, d->unc_len,
  571. d->cmp + LZO_HEADER, &d->cmp_len,
  572. d->wrk);
  573. atomic_set(&d->stop, 1);
  574. wake_up(&d->done);
  575. }
  576. return 0;
  577. }
  578. /**
  579. * save_image_lzo - Save the suspend image data compressed with LZO.
  580. * @handle: Swap map handle to use for saving the image.
  581. * @snapshot: Image to read data from.
  582. * @nr_to_write: Number of pages to save.
  583. */
  584. static int save_image_lzo(struct swap_map_handle *handle,
  585. struct snapshot_handle *snapshot,
  586. unsigned int nr_to_write)
  587. {
  588. unsigned int m;
  589. int ret = 0;
  590. int nr_pages;
  591. int err2;
  592. struct hib_bio_batch hb;
  593. ktime_t start;
  594. ktime_t stop;
  595. size_t off;
  596. unsigned thr, run_threads, nr_threads;
  597. unsigned char *page = NULL;
  598. struct cmp_data *data = NULL;
  599. struct crc_data *crc = NULL;
  600. hib_init_batch(&hb);
  601. /*
  602. * We'll limit the number of threads for compression to limit memory
  603. * footprint.
  604. */
  605. nr_threads = num_online_cpus() - 1;
  606. nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
  607. page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
  608. if (!page) {
  609. printk(KERN_ERR "PM: Failed to allocate LZO page\n");
  610. ret = -ENOMEM;
  611. goto out_clean;
  612. }
  613. data = vmalloc(sizeof(*data) * nr_threads);
  614. if (!data) {
  615. printk(KERN_ERR "PM: Failed to allocate LZO data\n");
  616. ret = -ENOMEM;
  617. goto out_clean;
  618. }
  619. for (thr = 0; thr < nr_threads; thr++)
  620. memset(&data[thr], 0, offsetof(struct cmp_data, go));
  621. crc = kmalloc(sizeof(*crc), GFP_KERNEL);
  622. if (!crc) {
  623. printk(KERN_ERR "PM: Failed to allocate crc\n");
  624. ret = -ENOMEM;
  625. goto out_clean;
  626. }
  627. memset(crc, 0, offsetof(struct crc_data, go));
  628. /*
  629. * Start the compression threads.
  630. */
  631. for (thr = 0; thr < nr_threads; thr++) {
  632. init_waitqueue_head(&data[thr].go);
  633. init_waitqueue_head(&data[thr].done);
  634. data[thr].thr = kthread_run(lzo_compress_threadfn,
  635. &data[thr],
  636. "image_compress/%u", thr);
  637. if (IS_ERR(data[thr].thr)) {
  638. data[thr].thr = NULL;
  639. printk(KERN_ERR
  640. "PM: Cannot start compression threads\n");
  641. ret = -ENOMEM;
  642. goto out_clean;
  643. }
  644. }
  645. /*
  646. * Start the CRC32 thread.
  647. */
  648. init_waitqueue_head(&crc->go);
  649. init_waitqueue_head(&crc->done);
  650. handle->crc32 = 0;
  651. crc->crc32 = &handle->crc32;
  652. for (thr = 0; thr < nr_threads; thr++) {
  653. crc->unc[thr] = data[thr].unc;
  654. crc->unc_len[thr] = &data[thr].unc_len;
  655. }
  656. crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
  657. if (IS_ERR(crc->thr)) {
  658. crc->thr = NULL;
  659. printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
  660. ret = -ENOMEM;
  661. goto out_clean;
  662. }
  663. /*
  664. * Adjust the number of required free pages after all allocations have
  665. * been done. We don't want to run out of pages when writing.
  666. */
  667. handle->reqd_free_pages = reqd_free_pages();
  668. printk(KERN_INFO
  669. "PM: Using %u thread(s) for compression.\n"
  670. "PM: Compressing and saving image data (%u pages)...\n",
  671. nr_threads, nr_to_write);
  672. m = nr_to_write / 10;
  673. if (!m)
  674. m = 1;
  675. nr_pages = 0;
  676. start = ktime_get();
  677. for (;;) {
  678. for (thr = 0; thr < nr_threads; thr++) {
  679. for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
  680. ret = snapshot_read_next(snapshot);
  681. if (ret < 0)
  682. goto out_finish;
  683. if (!ret)
  684. break;
  685. memcpy(data[thr].unc + off,
  686. data_of(*snapshot), PAGE_SIZE);
  687. if (!(nr_pages % m))
  688. printk(KERN_INFO
  689. "PM: Image saving progress: "
  690. "%3d%%\n",
  691. nr_pages / m * 10);
  692. nr_pages++;
  693. }
  694. if (!off)
  695. break;
  696. data[thr].unc_len = off;
  697. atomic_set(&data[thr].ready, 1);
  698. wake_up(&data[thr].go);
  699. }
  700. if (!thr)
  701. break;
  702. crc->run_threads = thr;
  703. atomic_set(&crc->ready, 1);
  704. wake_up(&crc->go);
  705. for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
  706. wait_event(data[thr].done,
  707. atomic_read(&data[thr].stop));
  708. atomic_set(&data[thr].stop, 0);
  709. ret = data[thr].ret;
  710. if (ret < 0) {
  711. printk(KERN_ERR "PM: LZO compression failed\n");
  712. goto out_finish;
  713. }
  714. if (unlikely(!data[thr].cmp_len ||
  715. data[thr].cmp_len >
  716. lzo1x_worst_compress(data[thr].unc_len))) {
  717. printk(KERN_ERR
  718. "PM: Invalid LZO compressed length\n");
  719. ret = -1;
  720. goto out_finish;
  721. }
  722. *(size_t *)data[thr].cmp = data[thr].cmp_len;
  723. /*
  724. * Given we are writing one page at a time to disk, we
  725. * copy that much from the buffer, although the last
  726. * bit will likely be smaller than full page. This is
  727. * OK - we saved the length of the compressed data, so
  728. * any garbage at the end will be discarded when we
  729. * read it.
  730. */
  731. for (off = 0;
  732. off < LZO_HEADER + data[thr].cmp_len;
  733. off += PAGE_SIZE) {
  734. memcpy(page, data[thr].cmp + off, PAGE_SIZE);
  735. ret = swap_write_page(handle, page, &hb);
  736. if (ret)
  737. goto out_finish;
  738. }
  739. }
  740. wait_event(crc->done, atomic_read(&crc->stop));
  741. atomic_set(&crc->stop, 0);
  742. }
  743. out_finish:
  744. err2 = hib_wait_io(&hb);
  745. stop = ktime_get();
  746. if (!ret)
  747. ret = err2;
  748. if (!ret)
  749. printk(KERN_INFO "PM: Image saving done.\n");
  750. swsusp_show_speed(start, stop, nr_to_write, "Wrote");
  751. out_clean:
  752. if (crc) {
  753. if (crc->thr)
  754. kthread_stop(crc->thr);
  755. kfree(crc);
  756. }
  757. if (data) {
  758. for (thr = 0; thr < nr_threads; thr++)
  759. if (data[thr].thr)
  760. kthread_stop(data[thr].thr);
  761. vfree(data);
  762. }
  763. if (page) free_page((unsigned long)page);
  764. return ret;
  765. }
  766. /**
  767. * enough_swap - Make sure we have enough swap to save the image.
  768. *
  769. * Returns TRUE or FALSE after checking the total amount of swap
  770. * space avaiable from the resume partition.
  771. */
  772. static int enough_swap(unsigned int nr_pages, unsigned int flags)
  773. {
  774. unsigned int free_swap = count_swap_pages(root_swap, 1);
  775. unsigned int required;
  776. pr_debug("PM: Free swap pages: %u\n", free_swap);
  777. required = PAGES_FOR_IO + nr_pages;
  778. return free_swap > required;
  779. }
  780. /**
  781. * swsusp_write - Write entire image and metadata.
  782. * @flags: flags to pass to the "boot" kernel in the image header
  783. *
  784. * It is important _NOT_ to umount filesystems at this point. We want
  785. * them synced (in case something goes wrong) but we DO not want to mark
  786. * filesystem clean: it is not. (And it does not matter, if we resume
  787. * correctly, we'll mark system clean, anyway.)
  788. */
  789. int swsusp_write(unsigned int flags)
  790. {
  791. struct swap_map_handle handle;
  792. struct snapshot_handle snapshot;
  793. struct swsusp_info *header;
  794. unsigned long pages;
  795. int error;
  796. pages = snapshot_get_image_size();
  797. error = get_swap_writer(&handle);
  798. if (error) {
  799. printk(KERN_ERR "PM: Cannot get swap writer\n");
  800. return error;
  801. }
  802. if (flags & SF_NOCOMPRESS_MODE) {
  803. if (!enough_swap(pages, flags)) {
  804. printk(KERN_ERR "PM: Not enough free swap\n");
  805. error = -ENOSPC;
  806. goto out_finish;
  807. }
  808. }
  809. memset(&snapshot, 0, sizeof(struct snapshot_handle));
  810. error = snapshot_read_next(&snapshot);
  811. if (error < PAGE_SIZE) {
  812. if (error >= 0)
  813. error = -EFAULT;
  814. goto out_finish;
  815. }
  816. header = (struct swsusp_info *)data_of(snapshot);
  817. error = swap_write_page(&handle, header, NULL);
  818. if (!error) {
  819. error = (flags & SF_NOCOMPRESS_MODE) ?
  820. save_image(&handle, &snapshot, pages - 1) :
  821. save_image_lzo(&handle, &snapshot, pages - 1);
  822. }
  823. out_finish:
  824. error = swap_writer_finish(&handle, flags, error);
  825. return error;
  826. }
  827. /**
  828. * The following functions allow us to read data using a swap map
  829. * in a file-alike way
  830. */
  831. static void release_swap_reader(struct swap_map_handle *handle)
  832. {
  833. struct swap_map_page_list *tmp;
  834. while (handle->maps) {
  835. if (handle->maps->map)
  836. free_page((unsigned long)handle->maps->map);
  837. tmp = handle->maps;
  838. handle->maps = handle->maps->next;
  839. kfree(tmp);
  840. }
  841. handle->cur = NULL;
  842. }
  843. static int get_swap_reader(struct swap_map_handle *handle,
  844. unsigned int *flags_p)
  845. {
  846. int error;
  847. struct swap_map_page_list *tmp, *last;
  848. sector_t offset;
  849. *flags_p = swsusp_header->flags;
  850. if (!swsusp_header->image) /* how can this happen? */
  851. return -EINVAL;
  852. handle->cur = NULL;
  853. last = handle->maps = NULL;
  854. offset = swsusp_header->image;
  855. while (offset) {
  856. tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
  857. if (!tmp) {
  858. release_swap_reader(handle);
  859. return -ENOMEM;
  860. }
  861. memset(tmp, 0, sizeof(*tmp));
  862. if (!handle->maps)
  863. handle->maps = tmp;
  864. if (last)
  865. last->next = tmp;
  866. last = tmp;
  867. tmp->map = (struct swap_map_page *)
  868. __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
  869. if (!tmp->map) {
  870. release_swap_reader(handle);
  871. return -ENOMEM;
  872. }
  873. error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset,
  874. tmp->map, NULL);
  875. if (error) {
  876. release_swap_reader(handle);
  877. return error;
  878. }
  879. offset = tmp->map->next_swap;
  880. }
  881. handle->k = 0;
  882. handle->cur = handle->maps->map;
  883. return 0;
  884. }
  885. static int swap_read_page(struct swap_map_handle *handle, void *buf,
  886. struct hib_bio_batch *hb)
  887. {
  888. sector_t offset;
  889. int error;
  890. struct swap_map_page_list *tmp;
  891. if (!handle->cur)
  892. return -EINVAL;
  893. offset = handle->cur->entries[handle->k];
  894. if (!offset)
  895. return -EFAULT;
  896. error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb);
  897. if (error)
  898. return error;
  899. if (++handle->k >= MAP_PAGE_ENTRIES) {
  900. handle->k = 0;
  901. free_page((unsigned long)handle->maps->map);
  902. tmp = handle->maps;
  903. handle->maps = handle->maps->next;
  904. kfree(tmp);
  905. if (!handle->maps)
  906. release_swap_reader(handle);
  907. else
  908. handle->cur = handle->maps->map;
  909. }
  910. return error;
  911. }
  912. static int swap_reader_finish(struct swap_map_handle *handle)
  913. {
  914. release_swap_reader(handle);
  915. return 0;
  916. }
  917. /**
  918. * load_image - load the image using the swap map handle
  919. * @handle and the snapshot handle @snapshot
  920. * (assume there are @nr_pages pages to load)
  921. */
  922. static int load_image(struct swap_map_handle *handle,
  923. struct snapshot_handle *snapshot,
  924. unsigned int nr_to_read)
  925. {
  926. unsigned int m;
  927. int ret = 0;
  928. ktime_t start;
  929. ktime_t stop;
  930. struct hib_bio_batch hb;
  931. int err2;
  932. unsigned nr_pages;
  933. hib_init_batch(&hb);
  934. clean_pages_on_read = true;
  935. printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
  936. nr_to_read);
  937. m = nr_to_read / 10;
  938. if (!m)
  939. m = 1;
  940. nr_pages = 0;
  941. start = ktime_get();
  942. for ( ; ; ) {
  943. ret = snapshot_write_next(snapshot);
  944. if (ret <= 0)
  945. break;
  946. ret = swap_read_page(handle, data_of(*snapshot), &hb);
  947. if (ret)
  948. break;
  949. if (snapshot->sync_read)
  950. ret = hib_wait_io(&hb);
  951. if (ret)
  952. break;
  953. if (!(nr_pages % m))
  954. printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
  955. nr_pages / m * 10);
  956. nr_pages++;
  957. }
  958. err2 = hib_wait_io(&hb);
  959. stop = ktime_get();
  960. if (!ret)
  961. ret = err2;
  962. if (!ret) {
  963. printk(KERN_INFO "PM: Image loading done.\n");
  964. snapshot_write_finalize(snapshot);
  965. if (!snapshot_image_loaded(snapshot))
  966. ret = -ENODATA;
  967. }
  968. swsusp_show_speed(start, stop, nr_to_read, "Read");
  969. return ret;
  970. }
  971. /**
  972. * Structure used for LZO data decompression.
  973. */
  974. struct dec_data {
  975. struct task_struct *thr; /* thread */
  976. atomic_t ready; /* ready to start flag */
  977. atomic_t stop; /* ready to stop flag */
  978. int ret; /* return code */
  979. wait_queue_head_t go; /* start decompression */
  980. wait_queue_head_t done; /* decompression done */
  981. size_t unc_len; /* uncompressed length */
  982. size_t cmp_len; /* compressed length */
  983. unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
  984. unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
  985. };
  986. /**
  987. * Deompression function that runs in its own thread.
  988. */
  989. static int lzo_decompress_threadfn(void *data)
  990. {
  991. struct dec_data *d = data;
  992. while (1) {
  993. wait_event(d->go, atomic_read(&d->ready) ||
  994. kthread_should_stop());
  995. if (kthread_should_stop()) {
  996. d->thr = NULL;
  997. d->ret = -1;
  998. atomic_set(&d->stop, 1);
  999. wake_up(&d->done);
  1000. break;
  1001. }
  1002. atomic_set(&d->ready, 0);
  1003. d->unc_len = LZO_UNC_SIZE;
  1004. d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
  1005. d->unc, &d->unc_len);
  1006. if (clean_pages_on_decompress)
  1007. flush_icache_range((unsigned long)d->unc,
  1008. (unsigned long)d->unc + d->unc_len);
  1009. atomic_set(&d->stop, 1);
  1010. wake_up(&d->done);
  1011. }
  1012. return 0;
  1013. }
  1014. /**
  1015. * load_image_lzo - Load compressed image data and decompress them with LZO.
  1016. * @handle: Swap map handle to use for loading data.
  1017. * @snapshot: Image to copy uncompressed data into.
  1018. * @nr_to_read: Number of pages to load.
  1019. */
  1020. static int load_image_lzo(struct swap_map_handle *handle,
  1021. struct snapshot_handle *snapshot,
  1022. unsigned int nr_to_read)
  1023. {
  1024. unsigned int m;
  1025. int ret = 0;
  1026. int eof = 0;
  1027. struct hib_bio_batch hb;
  1028. ktime_t start;
  1029. ktime_t stop;
  1030. unsigned nr_pages;
  1031. size_t off;
  1032. unsigned i, thr, run_threads, nr_threads;
  1033. unsigned ring = 0, pg = 0, ring_size = 0,
  1034. have = 0, want, need, asked = 0;
  1035. unsigned long read_pages = 0;
  1036. unsigned char **page = NULL;
  1037. struct dec_data *data = NULL;
  1038. struct crc_data *crc = NULL;
  1039. hib_init_batch(&hb);
  1040. /*
  1041. * We'll limit the number of threads for decompression to limit memory
  1042. * footprint.
  1043. */
  1044. nr_threads = num_online_cpus() - 1;
  1045. nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
  1046. page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
  1047. if (!page) {
  1048. printk(KERN_ERR "PM: Failed to allocate LZO page\n");
  1049. ret = -ENOMEM;
  1050. goto out_clean;
  1051. }
  1052. data = vmalloc(sizeof(*data) * nr_threads);
  1053. if (!data) {
  1054. printk(KERN_ERR "PM: Failed to allocate LZO data\n");
  1055. ret = -ENOMEM;
  1056. goto out_clean;
  1057. }
  1058. for (thr = 0; thr < nr_threads; thr++)
  1059. memset(&data[thr], 0, offsetof(struct dec_data, go));
  1060. crc = kmalloc(sizeof(*crc), GFP_KERNEL);
  1061. if (!crc) {
  1062. printk(KERN_ERR "PM: Failed to allocate crc\n");
  1063. ret = -ENOMEM;
  1064. goto out_clean;
  1065. }
  1066. memset(crc, 0, offsetof(struct crc_data, go));
  1067. clean_pages_on_decompress = true;
  1068. /*
  1069. * Start the decompression threads.
  1070. */
  1071. for (thr = 0; thr < nr_threads; thr++) {
  1072. init_waitqueue_head(&data[thr].go);
  1073. init_waitqueue_head(&data[thr].done);
  1074. data[thr].thr = kthread_run(lzo_decompress_threadfn,
  1075. &data[thr],
  1076. "image_decompress/%u", thr);
  1077. if (IS_ERR(data[thr].thr)) {
  1078. data[thr].thr = NULL;
  1079. printk(KERN_ERR
  1080. "PM: Cannot start decompression threads\n");
  1081. ret = -ENOMEM;
  1082. goto out_clean;
  1083. }
  1084. }
  1085. /*
  1086. * Start the CRC32 thread.
  1087. */
  1088. init_waitqueue_head(&crc->go);
  1089. init_waitqueue_head(&crc->done);
  1090. handle->crc32 = 0;
  1091. crc->crc32 = &handle->crc32;
  1092. for (thr = 0; thr < nr_threads; thr++) {
  1093. crc->unc[thr] = data[thr].unc;
  1094. crc->unc_len[thr] = &data[thr].unc_len;
  1095. }
  1096. crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
  1097. if (IS_ERR(crc->thr)) {
  1098. crc->thr = NULL;
  1099. printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
  1100. ret = -ENOMEM;
  1101. goto out_clean;
  1102. }
  1103. /*
  1104. * Set the number of pages for read buffering.
  1105. * This is complete guesswork, because we'll only know the real
  1106. * picture once prepare_image() is called, which is much later on
  1107. * during the image load phase. We'll assume the worst case and
  1108. * say that none of the image pages are from high memory.
  1109. */
  1110. if (low_free_pages() > snapshot_get_image_size())
  1111. read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
  1112. read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
  1113. for (i = 0; i < read_pages; i++) {
  1114. page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
  1115. __GFP_RECLAIM | __GFP_HIGH :
  1116. __GFP_RECLAIM | __GFP_NOWARN |
  1117. __GFP_NORETRY);
  1118. if (!page[i]) {
  1119. if (i < LZO_CMP_PAGES) {
  1120. ring_size = i;
  1121. printk(KERN_ERR
  1122. "PM: Failed to allocate LZO pages\n");
  1123. ret = -ENOMEM;
  1124. goto out_clean;
  1125. } else {
  1126. break;
  1127. }
  1128. }
  1129. }
  1130. want = ring_size = i;
  1131. printk(KERN_INFO
  1132. "PM: Using %u thread(s) for decompression.\n"
  1133. "PM: Loading and decompressing image data (%u pages)...\n",
  1134. nr_threads, nr_to_read);
  1135. m = nr_to_read / 10;
  1136. if (!m)
  1137. m = 1;
  1138. nr_pages = 0;
  1139. start = ktime_get();
  1140. ret = snapshot_write_next(snapshot);
  1141. if (ret <= 0)
  1142. goto out_finish;
  1143. for(;;) {
  1144. for (i = 0; !eof && i < want; i++) {
  1145. ret = swap_read_page(handle, page[ring], &hb);
  1146. if (ret) {
  1147. /*
  1148. * On real read error, finish. On end of data,
  1149. * set EOF flag and just exit the read loop.
  1150. */
  1151. if (handle->cur &&
  1152. handle->cur->entries[handle->k]) {
  1153. goto out_finish;
  1154. } else {
  1155. eof = 1;
  1156. break;
  1157. }
  1158. }
  1159. if (++ring >= ring_size)
  1160. ring = 0;
  1161. }
  1162. asked += i;
  1163. want -= i;
  1164. /*
  1165. * We are out of data, wait for some more.
  1166. */
  1167. if (!have) {
  1168. if (!asked)
  1169. break;
  1170. ret = hib_wait_io(&hb);
  1171. if (ret)
  1172. goto out_finish;
  1173. have += asked;
  1174. asked = 0;
  1175. if (eof)
  1176. eof = 2;
  1177. }
  1178. if (crc->run_threads) {
  1179. wait_event(crc->done, atomic_read(&crc->stop));
  1180. atomic_set(&crc->stop, 0);
  1181. crc->run_threads = 0;
  1182. }
  1183. for (thr = 0; have && thr < nr_threads; thr++) {
  1184. data[thr].cmp_len = *(size_t *)page[pg];
  1185. if (unlikely(!data[thr].cmp_len ||
  1186. data[thr].cmp_len >
  1187. lzo1x_worst_compress(LZO_UNC_SIZE))) {
  1188. printk(KERN_ERR
  1189. "PM: Invalid LZO compressed length\n");
  1190. ret = -1;
  1191. goto out_finish;
  1192. }
  1193. need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
  1194. PAGE_SIZE);
  1195. if (need > have) {
  1196. if (eof > 1) {
  1197. ret = -1;
  1198. goto out_finish;
  1199. }
  1200. break;
  1201. }
  1202. for (off = 0;
  1203. off < LZO_HEADER + data[thr].cmp_len;
  1204. off += PAGE_SIZE) {
  1205. memcpy(data[thr].cmp + off,
  1206. page[pg], PAGE_SIZE);
  1207. have--;
  1208. want++;
  1209. if (++pg >= ring_size)
  1210. pg = 0;
  1211. }
  1212. atomic_set(&data[thr].ready, 1);
  1213. wake_up(&data[thr].go);
  1214. }
  1215. /*
  1216. * Wait for more data while we are decompressing.
  1217. */
  1218. if (have < LZO_CMP_PAGES && asked) {
  1219. ret = hib_wait_io(&hb);
  1220. if (ret)
  1221. goto out_finish;
  1222. have += asked;
  1223. asked = 0;
  1224. if (eof)
  1225. eof = 2;
  1226. }
  1227. for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
  1228. wait_event(data[thr].done,
  1229. atomic_read(&data[thr].stop));
  1230. atomic_set(&data[thr].stop, 0);
  1231. ret = data[thr].ret;
  1232. if (ret < 0) {
  1233. printk(KERN_ERR
  1234. "PM: LZO decompression failed\n");
  1235. goto out_finish;
  1236. }
  1237. if (unlikely(!data[thr].unc_len ||
  1238. data[thr].unc_len > LZO_UNC_SIZE ||
  1239. data[thr].unc_len & (PAGE_SIZE - 1))) {
  1240. printk(KERN_ERR
  1241. "PM: Invalid LZO uncompressed length\n");
  1242. ret = -1;
  1243. goto out_finish;
  1244. }
  1245. for (off = 0;
  1246. off < data[thr].unc_len; off += PAGE_SIZE) {
  1247. memcpy(data_of(*snapshot),
  1248. data[thr].unc + off, PAGE_SIZE);
  1249. if (!(nr_pages % m))
  1250. printk(KERN_INFO
  1251. "PM: Image loading progress: "
  1252. "%3d%%\n",
  1253. nr_pages / m * 10);
  1254. nr_pages++;
  1255. ret = snapshot_write_next(snapshot);
  1256. if (ret <= 0) {
  1257. crc->run_threads = thr + 1;
  1258. atomic_set(&crc->ready, 1);
  1259. wake_up(&crc->go);
  1260. goto out_finish;
  1261. }
  1262. }
  1263. }
  1264. crc->run_threads = thr;
  1265. atomic_set(&crc->ready, 1);
  1266. wake_up(&crc->go);
  1267. }
  1268. out_finish:
  1269. if (crc->run_threads) {
  1270. wait_event(crc->done, atomic_read(&crc->stop));
  1271. atomic_set(&crc->stop, 0);
  1272. }
  1273. stop = ktime_get();
  1274. if (!ret) {
  1275. printk(KERN_INFO "PM: Image loading done.\n");
  1276. snapshot_write_finalize(snapshot);
  1277. if (!snapshot_image_loaded(snapshot))
  1278. ret = -ENODATA;
  1279. if (!ret) {
  1280. if (swsusp_header->flags & SF_CRC32_MODE) {
  1281. if(handle->crc32 != swsusp_header->crc32) {
  1282. printk(KERN_ERR
  1283. "PM: Invalid image CRC32!\n");
  1284. ret = -ENODATA;
  1285. }
  1286. }
  1287. }
  1288. }
  1289. swsusp_show_speed(start, stop, nr_to_read, "Read");
  1290. out_clean:
  1291. for (i = 0; i < ring_size; i++)
  1292. free_page((unsigned long)page[i]);
  1293. if (crc) {
  1294. if (crc->thr)
  1295. kthread_stop(crc->thr);
  1296. kfree(crc);
  1297. }
  1298. if (data) {
  1299. for (thr = 0; thr < nr_threads; thr++)
  1300. if (data[thr].thr)
  1301. kthread_stop(data[thr].thr);
  1302. vfree(data);
  1303. }
  1304. vfree(page);
  1305. return ret;
  1306. }
  1307. /**
  1308. * swsusp_read - read the hibernation image.
  1309. * @flags_p: flags passed by the "frozen" kernel in the image header should
  1310. * be written into this memory location
  1311. */
  1312. int swsusp_read(unsigned int *flags_p)
  1313. {
  1314. int error;
  1315. struct swap_map_handle handle;
  1316. struct snapshot_handle snapshot;
  1317. struct swsusp_info *header;
  1318. memset(&snapshot, 0, sizeof(struct snapshot_handle));
  1319. error = snapshot_write_next(&snapshot);
  1320. if (error < PAGE_SIZE)
  1321. return error < 0 ? error : -EFAULT;
  1322. header = (struct swsusp_info *)data_of(snapshot);
  1323. error = get_swap_reader(&handle, flags_p);
  1324. if (error)
  1325. goto end;
  1326. if (!error)
  1327. error = swap_read_page(&handle, header, NULL);
  1328. if (!error) {
  1329. error = (*flags_p & SF_NOCOMPRESS_MODE) ?
  1330. load_image(&handle, &snapshot, header->pages - 1) :
  1331. load_image_lzo(&handle, &snapshot, header->pages - 1);
  1332. }
  1333. swap_reader_finish(&handle);
  1334. end:
  1335. if (!error)
  1336. pr_debug("PM: Image successfully loaded\n");
  1337. else
  1338. pr_debug("PM: Error %d resuming\n", error);
  1339. return error;
  1340. }
  1341. /**
  1342. * swsusp_check - Check for swsusp signature in the resume device
  1343. */
  1344. int swsusp_check(void)
  1345. {
  1346. int error;
  1347. hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
  1348. FMODE_READ, NULL);
  1349. if (!IS_ERR(hib_resume_bdev)) {
  1350. set_blocksize(hib_resume_bdev, PAGE_SIZE);
  1351. clear_page(swsusp_header);
  1352. error = hib_submit_io(REQ_OP_READ, READ_SYNC,
  1353. swsusp_resume_block,
  1354. swsusp_header, NULL);
  1355. if (error)
  1356. goto put;
  1357. if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
  1358. memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
  1359. /* Reset swap signature now */
  1360. error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
  1361. swsusp_resume_block,
  1362. swsusp_header, NULL);
  1363. } else {
  1364. error = -EINVAL;
  1365. }
  1366. put:
  1367. if (error)
  1368. blkdev_put(hib_resume_bdev, FMODE_READ);
  1369. else
  1370. pr_debug("PM: Image signature found, resuming\n");
  1371. } else {
  1372. error = PTR_ERR(hib_resume_bdev);
  1373. }
  1374. if (error)
  1375. pr_debug("PM: Image not found (code %d)\n", error);
  1376. return error;
  1377. }
  1378. /**
  1379. * swsusp_close - close swap device.
  1380. */
  1381. void swsusp_close(fmode_t mode)
  1382. {
  1383. if (IS_ERR(hib_resume_bdev)) {
  1384. pr_debug("PM: Image device not initialised\n");
  1385. return;
  1386. }
  1387. blkdev_put(hib_resume_bdev, mode);
  1388. }
  1389. /**
  1390. * swsusp_unmark - Unmark swsusp signature in the resume device
  1391. */
  1392. #ifdef CONFIG_SUSPEND
  1393. int swsusp_unmark(void)
  1394. {
  1395. int error;
  1396. hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
  1397. swsusp_header, NULL);
  1398. if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
  1399. memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
  1400. error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
  1401. swsusp_resume_block,
  1402. swsusp_header, NULL);
  1403. } else {
  1404. printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
  1405. error = -ENODEV;
  1406. }
  1407. /*
  1408. * We just returned from suspend, we don't need the image any more.
  1409. */
  1410. free_all_swap_pages(root_swap);
  1411. return error;
  1412. }
  1413. #endif
  1414. static int swsusp_header_init(void)
  1415. {
  1416. swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
  1417. if (!swsusp_header)
  1418. panic("Could not allocate memory for swsusp_header\n");
  1419. return 0;
  1420. }
  1421. core_initcall(swsusp_header_init);