swap.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /*
  2. * linux/kernel/power/swap.c
  3. *
  4. * This file provides functions for reading the suspend image from
  5. * and writing it to a swap partition.
  6. *
  7. * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
  8. * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  9. *
  10. * This file is released under the GPLv2.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/file.h>
  15. #include <linux/delay.h>
  16. #include <linux/bitops.h>
  17. #include <linux/genhd.h>
  18. #include <linux/device.h>
  19. #include <linux/buffer_head.h>
  20. #include <linux/bio.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/swap.h>
  23. #include <linux/swapops.h>
  24. #include <linux/pm.h>
  25. #include <linux/slab.h>
  26. #include <linux/lzo.h>
  27. #include <linux/vmalloc.h>
  28. #include "power.h"
  29. #define HIBERNATE_SIG "LINHIB0001"
  30. /*
  31. * The swap map is a data structure used for keeping track of each page
  32. * written to a swap partition. It consists of many swap_map_page
  33. * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  34. * These structures are stored on the swap and linked together with the
  35. * help of the .next_swap member.
  36. *
  37. * The swap map is created during suspend. The swap map pages are
  38. * allocated and populated one at a time, so we only need one memory
  39. * page to set up the entire structure.
  40. *
  41. * During resume we also only need to use one swap_map_page structure
  42. * at a time.
  43. */
  44. #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
  45. struct swap_map_page {
  46. sector_t entries[MAP_PAGE_ENTRIES];
  47. sector_t next_swap;
  48. };
  49. /**
  50. * The swap_map_handle structure is used for handling swap in
  51. * a file-alike way
  52. */
  53. struct swap_map_handle {
  54. struct swap_map_page *cur;
  55. sector_t cur_swap;
  56. sector_t first_sector;
  57. unsigned int k;
  58. };
  59. struct swsusp_header {
  60. char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int)];
  61. sector_t image;
  62. unsigned int flags; /* Flags to pass to the "boot" kernel */
  63. char orig_sig[10];
  64. char sig[10];
  65. } __attribute__((packed));
  66. static struct swsusp_header *swsusp_header;
  67. /**
  68. * The following functions are used for tracing the allocated
  69. * swap pages, so that they can be freed in case of an error.
  70. */
  71. struct swsusp_extent {
  72. struct rb_node node;
  73. unsigned long start;
  74. unsigned long end;
  75. };
  76. static struct rb_root swsusp_extents = RB_ROOT;
  77. static int swsusp_extents_insert(unsigned long swap_offset)
  78. {
  79. struct rb_node **new = &(swsusp_extents.rb_node);
  80. struct rb_node *parent = NULL;
  81. struct swsusp_extent *ext;
  82. /* Figure out where to put the new node */
  83. while (*new) {
  84. ext = container_of(*new, struct swsusp_extent, node);
  85. parent = *new;
  86. if (swap_offset < ext->start) {
  87. /* Try to merge */
  88. if (swap_offset == ext->start - 1) {
  89. ext->start--;
  90. return 0;
  91. }
  92. new = &((*new)->rb_left);
  93. } else if (swap_offset > ext->end) {
  94. /* Try to merge */
  95. if (swap_offset == ext->end + 1) {
  96. ext->end++;
  97. return 0;
  98. }
  99. new = &((*new)->rb_right);
  100. } else {
  101. /* It already is in the tree */
  102. return -EINVAL;
  103. }
  104. }
  105. /* Add the new node and rebalance the tree. */
  106. ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
  107. if (!ext)
  108. return -ENOMEM;
  109. ext->start = swap_offset;
  110. ext->end = swap_offset;
  111. rb_link_node(&ext->node, parent, new);
  112. rb_insert_color(&ext->node, &swsusp_extents);
  113. return 0;
  114. }
  115. /**
  116. * alloc_swapdev_block - allocate a swap page and register that it has
  117. * been allocated, so that it can be freed in case of an error.
  118. */
  119. sector_t alloc_swapdev_block(int swap)
  120. {
  121. unsigned long offset;
  122. offset = swp_offset(get_swap_page_of_type(swap));
  123. if (offset) {
  124. if (swsusp_extents_insert(offset))
  125. swap_free(swp_entry(swap, offset));
  126. else
  127. return swapdev_block(swap, offset);
  128. }
  129. return 0;
  130. }
  131. /**
  132. * free_all_swap_pages - free swap pages allocated for saving image data.
  133. * It also frees the extents used to register which swap entries had been
  134. * allocated.
  135. */
  136. void free_all_swap_pages(int swap)
  137. {
  138. struct rb_node *node;
  139. while ((node = swsusp_extents.rb_node)) {
  140. struct swsusp_extent *ext;
  141. unsigned long offset;
  142. ext = container_of(node, struct swsusp_extent, node);
  143. rb_erase(node, &swsusp_extents);
  144. for (offset = ext->start; offset <= ext->end; offset++)
  145. swap_free(swp_entry(swap, offset));
  146. kfree(ext);
  147. }
  148. }
  149. int swsusp_swap_in_use(void)
  150. {
  151. return (swsusp_extents.rb_node != NULL);
  152. }
  153. /*
  154. * General things
  155. */
  156. static unsigned short root_swap = 0xffff;
  157. struct block_device *hib_resume_bdev;
  158. /*
  159. * Saving part
  160. */
  161. static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
  162. {
  163. int error;
  164. hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
  165. if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
  166. !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
  167. memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
  168. memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
  169. swsusp_header->image = handle->first_sector;
  170. swsusp_header->flags = flags;
  171. error = hib_bio_write_page(swsusp_resume_block,
  172. swsusp_header, NULL);
  173. } else {
  174. printk(KERN_ERR "PM: Swap header not found!\n");
  175. error = -ENODEV;
  176. }
  177. return error;
  178. }
  179. /**
  180. * swsusp_swap_check - check if the resume device is a swap device
  181. * and get its index (if so)
  182. *
  183. * This is called before saving image
  184. */
  185. static int swsusp_swap_check(void)
  186. {
  187. int res;
  188. res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
  189. &hib_resume_bdev);
  190. if (res < 0)
  191. return res;
  192. root_swap = res;
  193. res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
  194. if (res)
  195. return res;
  196. res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
  197. if (res < 0)
  198. blkdev_put(hib_resume_bdev, FMODE_WRITE);
  199. return res;
  200. }
  201. /**
  202. * write_page - Write one page to given swap location.
  203. * @buf: Address we're writing.
  204. * @offset: Offset of the swap page we're writing to.
  205. * @bio_chain: Link the next write BIO here
  206. */
  207. static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
  208. {
  209. void *src;
  210. if (!offset)
  211. return -ENOSPC;
  212. if (bio_chain) {
  213. src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
  214. if (src) {
  215. copy_page(src, buf);
  216. } else {
  217. WARN_ON_ONCE(1);
  218. bio_chain = NULL; /* Go synchronous */
  219. src = buf;
  220. }
  221. } else {
  222. src = buf;
  223. }
  224. return hib_bio_write_page(offset, src, bio_chain);
  225. }
  226. static void release_swap_writer(struct swap_map_handle *handle)
  227. {
  228. if (handle->cur)
  229. free_page((unsigned long)handle->cur);
  230. handle->cur = NULL;
  231. }
  232. static int get_swap_writer(struct swap_map_handle *handle)
  233. {
  234. int ret;
  235. ret = swsusp_swap_check();
  236. if (ret) {
  237. if (ret != -ENOSPC)
  238. printk(KERN_ERR "PM: Cannot find swap device, try "
  239. "swapon -a.\n");
  240. return ret;
  241. }
  242. handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
  243. if (!handle->cur) {
  244. ret = -ENOMEM;
  245. goto err_close;
  246. }
  247. handle->cur_swap = alloc_swapdev_block(root_swap);
  248. if (!handle->cur_swap) {
  249. ret = -ENOSPC;
  250. goto err_rel;
  251. }
  252. handle->k = 0;
  253. handle->first_sector = handle->cur_swap;
  254. return 0;
  255. err_rel:
  256. release_swap_writer(handle);
  257. err_close:
  258. swsusp_close(FMODE_WRITE);
  259. return ret;
  260. }
  261. static int swap_write_page(struct swap_map_handle *handle, void *buf,
  262. struct bio **bio_chain)
  263. {
  264. int error = 0;
  265. sector_t offset;
  266. if (!handle->cur)
  267. return -EINVAL;
  268. offset = alloc_swapdev_block(root_swap);
  269. error = write_page(buf, offset, bio_chain);
  270. if (error)
  271. return error;
  272. handle->cur->entries[handle->k++] = offset;
  273. if (handle->k >= MAP_PAGE_ENTRIES) {
  274. error = hib_wait_on_bio_chain(bio_chain);
  275. if (error)
  276. goto out;
  277. offset = alloc_swapdev_block(root_swap);
  278. if (!offset)
  279. return -ENOSPC;
  280. handle->cur->next_swap = offset;
  281. error = write_page(handle->cur, handle->cur_swap, NULL);
  282. if (error)
  283. goto out;
  284. clear_page(handle->cur);
  285. handle->cur_swap = offset;
  286. handle->k = 0;
  287. }
  288. out:
  289. return error;
  290. }
  291. static int flush_swap_writer(struct swap_map_handle *handle)
  292. {
  293. if (handle->cur && handle->cur_swap)
  294. return write_page(handle->cur, handle->cur_swap, NULL);
  295. else
  296. return -EINVAL;
  297. }
  298. static int swap_writer_finish(struct swap_map_handle *handle,
  299. unsigned int flags, int error)
  300. {
  301. if (!error) {
  302. flush_swap_writer(handle);
  303. printk(KERN_INFO "PM: S");
  304. error = mark_swapfiles(handle, flags);
  305. printk("|\n");
  306. }
  307. if (error)
  308. free_all_swap_pages(root_swap);
  309. release_swap_writer(handle);
  310. swsusp_close(FMODE_WRITE);
  311. return error;
  312. }
  313. /* We need to remember how much compressed data we need to read. */
  314. #define LZO_HEADER sizeof(size_t)
  315. /* Number of pages/bytes we'll compress at one time. */
  316. #define LZO_UNC_PAGES 32
  317. #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
  318. /* Number of pages/bytes we need for compressed data (worst case). */
  319. #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
  320. LZO_HEADER, PAGE_SIZE)
  321. #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
  322. /**
  323. * save_image - save the suspend image data
  324. */
  325. static int save_image(struct swap_map_handle *handle,
  326. struct snapshot_handle *snapshot,
  327. unsigned int nr_to_write)
  328. {
  329. unsigned int m;
  330. int ret;
  331. int nr_pages;
  332. int err2;
  333. struct bio *bio;
  334. struct timeval start;
  335. struct timeval stop;
  336. printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
  337. nr_to_write);
  338. m = nr_to_write / 100;
  339. if (!m)
  340. m = 1;
  341. nr_pages = 0;
  342. bio = NULL;
  343. do_gettimeofday(&start);
  344. while (1) {
  345. ret = snapshot_read_next(snapshot);
  346. if (ret <= 0)
  347. break;
  348. ret = swap_write_page(handle, data_of(*snapshot), &bio);
  349. if (ret)
  350. break;
  351. if (!(nr_pages % m))
  352. printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
  353. nr_pages++;
  354. }
  355. err2 = hib_wait_on_bio_chain(&bio);
  356. do_gettimeofday(&stop);
  357. if (!ret)
  358. ret = err2;
  359. if (!ret)
  360. printk(KERN_CONT "\b\b\b\bdone\n");
  361. else
  362. printk(KERN_CONT "\n");
  363. swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
  364. return ret;
  365. }
  366. /**
  367. * save_image_lzo - Save the suspend image data compressed with LZO.
  368. * @handle: Swap mam handle to use for saving the image.
  369. * @snapshot: Image to read data from.
  370. * @nr_to_write: Number of pages to save.
  371. */
  372. static int save_image_lzo(struct swap_map_handle *handle,
  373. struct snapshot_handle *snapshot,
  374. unsigned int nr_to_write)
  375. {
  376. unsigned int m;
  377. int ret = 0;
  378. int nr_pages;
  379. int err2;
  380. struct bio *bio;
  381. struct timeval start;
  382. struct timeval stop;
  383. size_t off, unc_len, cmp_len;
  384. unsigned char *unc, *cmp, *wrk, *page;
  385. page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
  386. if (!page) {
  387. printk(KERN_ERR "PM: Failed to allocate LZO page\n");
  388. return -ENOMEM;
  389. }
  390. wrk = vmalloc(LZO1X_1_MEM_COMPRESS);
  391. if (!wrk) {
  392. printk(KERN_ERR "PM: Failed to allocate LZO workspace\n");
  393. free_page((unsigned long)page);
  394. return -ENOMEM;
  395. }
  396. unc = vmalloc(LZO_UNC_SIZE);
  397. if (!unc) {
  398. printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
  399. vfree(wrk);
  400. free_page((unsigned long)page);
  401. return -ENOMEM;
  402. }
  403. cmp = vmalloc(LZO_CMP_SIZE);
  404. if (!cmp) {
  405. printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
  406. vfree(unc);
  407. vfree(wrk);
  408. free_page((unsigned long)page);
  409. return -ENOMEM;
  410. }
  411. printk(KERN_INFO
  412. "PM: Compressing and saving image data (%u pages) ... ",
  413. nr_to_write);
  414. m = nr_to_write / 100;
  415. if (!m)
  416. m = 1;
  417. nr_pages = 0;
  418. bio = NULL;
  419. do_gettimeofday(&start);
  420. for (;;) {
  421. for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
  422. ret = snapshot_read_next(snapshot);
  423. if (ret < 0)
  424. goto out_finish;
  425. if (!ret)
  426. break;
  427. memcpy(unc + off, data_of(*snapshot), PAGE_SIZE);
  428. if (!(nr_pages % m))
  429. printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
  430. nr_pages++;
  431. }
  432. if (!off)
  433. break;
  434. unc_len = off;
  435. ret = lzo1x_1_compress(unc, unc_len,
  436. cmp + LZO_HEADER, &cmp_len, wrk);
  437. if (ret < 0) {
  438. printk(KERN_ERR "PM: LZO compression failed\n");
  439. break;
  440. }
  441. if (unlikely(!cmp_len ||
  442. cmp_len > lzo1x_worst_compress(unc_len))) {
  443. printk(KERN_ERR "PM: Invalid LZO compressed length\n");
  444. ret = -1;
  445. break;
  446. }
  447. *(size_t *)cmp = cmp_len;
  448. /*
  449. * Given we are writing one page at a time to disk, we copy
  450. * that much from the buffer, although the last bit will likely
  451. * be smaller than full page. This is OK - we saved the length
  452. * of the compressed data, so any garbage at the end will be
  453. * discarded when we read it.
  454. */
  455. for (off = 0; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
  456. memcpy(page, cmp + off, PAGE_SIZE);
  457. ret = swap_write_page(handle, page, &bio);
  458. if (ret)
  459. goto out_finish;
  460. }
  461. }
  462. out_finish:
  463. err2 = hib_wait_on_bio_chain(&bio);
  464. do_gettimeofday(&stop);
  465. if (!ret)
  466. ret = err2;
  467. if (!ret)
  468. printk(KERN_CONT "\b\b\b\bdone\n");
  469. else
  470. printk(KERN_CONT "\n");
  471. swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
  472. vfree(cmp);
  473. vfree(unc);
  474. vfree(wrk);
  475. free_page((unsigned long)page);
  476. return ret;
  477. }
  478. /**
  479. * enough_swap - Make sure we have enough swap to save the image.
  480. *
  481. * Returns TRUE or FALSE after checking the total amount of swap
  482. * space avaiable from the resume partition.
  483. */
  484. static int enough_swap(unsigned int nr_pages, unsigned int flags)
  485. {
  486. unsigned int free_swap = count_swap_pages(root_swap, 1);
  487. unsigned int required;
  488. pr_debug("PM: Free swap pages: %u\n", free_swap);
  489. required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ?
  490. nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1);
  491. return free_swap > required;
  492. }
  493. /**
  494. * swsusp_write - Write entire image and metadata.
  495. * @flags: flags to pass to the "boot" kernel in the image header
  496. *
  497. * It is important _NOT_ to umount filesystems at this point. We want
  498. * them synced (in case something goes wrong) but we DO not want to mark
  499. * filesystem clean: it is not. (And it does not matter, if we resume
  500. * correctly, we'll mark system clean, anyway.)
  501. */
  502. int swsusp_write(unsigned int flags)
  503. {
  504. struct swap_map_handle handle;
  505. struct snapshot_handle snapshot;
  506. struct swsusp_info *header;
  507. unsigned long pages;
  508. int error;
  509. pages = snapshot_get_image_size();
  510. error = get_swap_writer(&handle);
  511. if (error) {
  512. printk(KERN_ERR "PM: Cannot get swap writer\n");
  513. return error;
  514. }
  515. if (!enough_swap(pages, flags)) {
  516. printk(KERN_ERR "PM: Not enough free swap\n");
  517. error = -ENOSPC;
  518. goto out_finish;
  519. }
  520. memset(&snapshot, 0, sizeof(struct snapshot_handle));
  521. error = snapshot_read_next(&snapshot);
  522. if (error < PAGE_SIZE) {
  523. if (error >= 0)
  524. error = -EFAULT;
  525. goto out_finish;
  526. }
  527. header = (struct swsusp_info *)data_of(snapshot);
  528. error = swap_write_page(&handle, header, NULL);
  529. if (!error) {
  530. error = (flags & SF_NOCOMPRESS_MODE) ?
  531. save_image(&handle, &snapshot, pages - 1) :
  532. save_image_lzo(&handle, &snapshot, pages - 1);
  533. }
  534. out_finish:
  535. error = swap_writer_finish(&handle, flags, error);
  536. return error;
  537. }
  538. /**
  539. * The following functions allow us to read data using a swap map
  540. * in a file-alike way
  541. */
  542. static void release_swap_reader(struct swap_map_handle *handle)
  543. {
  544. if (handle->cur)
  545. free_page((unsigned long)handle->cur);
  546. handle->cur = NULL;
  547. }
  548. static int get_swap_reader(struct swap_map_handle *handle,
  549. unsigned int *flags_p)
  550. {
  551. int error;
  552. *flags_p = swsusp_header->flags;
  553. if (!swsusp_header->image) /* how can this happen? */
  554. return -EINVAL;
  555. handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
  556. if (!handle->cur)
  557. return -ENOMEM;
  558. error = hib_bio_read_page(swsusp_header->image, handle->cur, NULL);
  559. if (error) {
  560. release_swap_reader(handle);
  561. return error;
  562. }
  563. handle->k = 0;
  564. return 0;
  565. }
  566. static int swap_read_page(struct swap_map_handle *handle, void *buf,
  567. struct bio **bio_chain)
  568. {
  569. sector_t offset;
  570. int error;
  571. if (!handle->cur)
  572. return -EINVAL;
  573. offset = handle->cur->entries[handle->k];
  574. if (!offset)
  575. return -EFAULT;
  576. error = hib_bio_read_page(offset, buf, bio_chain);
  577. if (error)
  578. return error;
  579. if (++handle->k >= MAP_PAGE_ENTRIES) {
  580. error = hib_wait_on_bio_chain(bio_chain);
  581. handle->k = 0;
  582. offset = handle->cur->next_swap;
  583. if (!offset)
  584. release_swap_reader(handle);
  585. else if (!error)
  586. error = hib_bio_read_page(offset, handle->cur, NULL);
  587. }
  588. return error;
  589. }
  590. static int swap_reader_finish(struct swap_map_handle *handle)
  591. {
  592. release_swap_reader(handle);
  593. return 0;
  594. }
  595. /**
  596. * load_image - load the image using the swap map handle
  597. * @handle and the snapshot handle @snapshot
  598. * (assume there are @nr_pages pages to load)
  599. */
  600. static int load_image(struct swap_map_handle *handle,
  601. struct snapshot_handle *snapshot,
  602. unsigned int nr_to_read)
  603. {
  604. unsigned int m;
  605. int error = 0;
  606. struct timeval start;
  607. struct timeval stop;
  608. struct bio *bio;
  609. int err2;
  610. unsigned nr_pages;
  611. printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
  612. nr_to_read);
  613. m = nr_to_read / 100;
  614. if (!m)
  615. m = 1;
  616. nr_pages = 0;
  617. bio = NULL;
  618. do_gettimeofday(&start);
  619. for ( ; ; ) {
  620. error = snapshot_write_next(snapshot);
  621. if (error <= 0)
  622. break;
  623. error = swap_read_page(handle, data_of(*snapshot), &bio);
  624. if (error)
  625. break;
  626. if (snapshot->sync_read)
  627. error = hib_wait_on_bio_chain(&bio);
  628. if (error)
  629. break;
  630. if (!(nr_pages % m))
  631. printk("\b\b\b\b%3d%%", nr_pages / m);
  632. nr_pages++;
  633. }
  634. err2 = hib_wait_on_bio_chain(&bio);
  635. do_gettimeofday(&stop);
  636. if (!error)
  637. error = err2;
  638. if (!error) {
  639. printk("\b\b\b\bdone\n");
  640. snapshot_write_finalize(snapshot);
  641. if (!snapshot_image_loaded(snapshot))
  642. error = -ENODATA;
  643. } else
  644. printk("\n");
  645. swsusp_show_speed(&start, &stop, nr_to_read, "Read");
  646. return error;
  647. }
  648. /**
  649. * load_image_lzo - Load compressed image data and decompress them with LZO.
  650. * @handle: Swap map handle to use for loading data.
  651. * @snapshot: Image to copy uncompressed data into.
  652. * @nr_to_read: Number of pages to load.
  653. */
  654. static int load_image_lzo(struct swap_map_handle *handle,
  655. struct snapshot_handle *snapshot,
  656. unsigned int nr_to_read)
  657. {
  658. unsigned int m;
  659. int error = 0;
  660. struct timeval start;
  661. struct timeval stop;
  662. unsigned nr_pages;
  663. size_t off, unc_len, cmp_len;
  664. unsigned char *unc, *cmp, *page;
  665. page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
  666. if (!page) {
  667. printk(KERN_ERR "PM: Failed to allocate LZO page\n");
  668. return -ENOMEM;
  669. }
  670. unc = vmalloc(LZO_UNC_SIZE);
  671. if (!unc) {
  672. printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
  673. free_page((unsigned long)page);
  674. return -ENOMEM;
  675. }
  676. cmp = vmalloc(LZO_CMP_SIZE);
  677. if (!cmp) {
  678. printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
  679. vfree(unc);
  680. free_page((unsigned long)page);
  681. return -ENOMEM;
  682. }
  683. printk(KERN_INFO
  684. "PM: Loading and decompressing image data (%u pages) ... ",
  685. nr_to_read);
  686. m = nr_to_read / 100;
  687. if (!m)
  688. m = 1;
  689. nr_pages = 0;
  690. do_gettimeofday(&start);
  691. error = snapshot_write_next(snapshot);
  692. if (error <= 0)
  693. goto out_finish;
  694. for (;;) {
  695. error = swap_read_page(handle, page, NULL); /* sync */
  696. if (error)
  697. break;
  698. cmp_len = *(size_t *)page;
  699. if (unlikely(!cmp_len ||
  700. cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
  701. printk(KERN_ERR "PM: Invalid LZO compressed length\n");
  702. error = -1;
  703. break;
  704. }
  705. memcpy(cmp, page, PAGE_SIZE);
  706. for (off = PAGE_SIZE; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) {
  707. error = swap_read_page(handle, page, NULL); /* sync */
  708. if (error)
  709. goto out_finish;
  710. memcpy(cmp + off, page, PAGE_SIZE);
  711. }
  712. unc_len = LZO_UNC_SIZE;
  713. error = lzo1x_decompress_safe(cmp + LZO_HEADER, cmp_len,
  714. unc, &unc_len);
  715. if (error < 0) {
  716. printk(KERN_ERR "PM: LZO decompression failed\n");
  717. break;
  718. }
  719. if (unlikely(!unc_len ||
  720. unc_len > LZO_UNC_SIZE ||
  721. unc_len & (PAGE_SIZE - 1))) {
  722. printk(KERN_ERR "PM: Invalid LZO uncompressed length\n");
  723. error = -1;
  724. break;
  725. }
  726. for (off = 0; off < unc_len; off += PAGE_SIZE) {
  727. memcpy(data_of(*snapshot), unc + off, PAGE_SIZE);
  728. if (!(nr_pages % m))
  729. printk("\b\b\b\b%3d%%", nr_pages / m);
  730. nr_pages++;
  731. error = snapshot_write_next(snapshot);
  732. if (error <= 0)
  733. goto out_finish;
  734. }
  735. }
  736. out_finish:
  737. do_gettimeofday(&stop);
  738. if (!error) {
  739. printk("\b\b\b\bdone\n");
  740. snapshot_write_finalize(snapshot);
  741. if (!snapshot_image_loaded(snapshot))
  742. error = -ENODATA;
  743. } else
  744. printk("\n");
  745. swsusp_show_speed(&start, &stop, nr_to_read, "Read");
  746. vfree(cmp);
  747. vfree(unc);
  748. free_page((unsigned long)page);
  749. return error;
  750. }
  751. /**
  752. * swsusp_read - read the hibernation image.
  753. * @flags_p: flags passed by the "frozen" kernel in the image header should
  754. * be written into this memeory location
  755. */
  756. int swsusp_read(unsigned int *flags_p)
  757. {
  758. int error;
  759. struct swap_map_handle handle;
  760. struct snapshot_handle snapshot;
  761. struct swsusp_info *header;
  762. memset(&snapshot, 0, sizeof(struct snapshot_handle));
  763. error = snapshot_write_next(&snapshot);
  764. if (error < PAGE_SIZE)
  765. return error < 0 ? error : -EFAULT;
  766. header = (struct swsusp_info *)data_of(snapshot);
  767. error = get_swap_reader(&handle, flags_p);
  768. if (error)
  769. goto end;
  770. if (!error)
  771. error = swap_read_page(&handle, header, NULL);
  772. if (!error) {
  773. error = (*flags_p & SF_NOCOMPRESS_MODE) ?
  774. load_image(&handle, &snapshot, header->pages - 1) :
  775. load_image_lzo(&handle, &snapshot, header->pages - 1);
  776. }
  777. swap_reader_finish(&handle);
  778. end:
  779. if (!error)
  780. pr_debug("PM: Image successfully loaded\n");
  781. else
  782. pr_debug("PM: Error %d resuming\n", error);
  783. return error;
  784. }
  785. /**
  786. * swsusp_check - Check for swsusp signature in the resume device
  787. */
  788. int swsusp_check(void)
  789. {
  790. int error;
  791. hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
  792. if (!IS_ERR(hib_resume_bdev)) {
  793. set_blocksize(hib_resume_bdev, PAGE_SIZE);
  794. clear_page(swsusp_header);
  795. error = hib_bio_read_page(swsusp_resume_block,
  796. swsusp_header, NULL);
  797. if (error)
  798. goto put;
  799. if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
  800. memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
  801. /* Reset swap signature now */
  802. error = hib_bio_write_page(swsusp_resume_block,
  803. swsusp_header, NULL);
  804. } else {
  805. error = -EINVAL;
  806. }
  807. put:
  808. if (error)
  809. blkdev_put(hib_resume_bdev, FMODE_READ);
  810. else
  811. pr_debug("PM: Image signature found, resuming\n");
  812. } else {
  813. error = PTR_ERR(hib_resume_bdev);
  814. }
  815. if (error)
  816. pr_debug("PM: Image not found (code %d)\n", error);
  817. return error;
  818. }
  819. /**
  820. * swsusp_close - close swap device.
  821. */
  822. void swsusp_close(fmode_t mode)
  823. {
  824. if (IS_ERR(hib_resume_bdev)) {
  825. pr_debug("PM: Image device not initialised\n");
  826. return;
  827. }
  828. blkdev_put(hib_resume_bdev, mode);
  829. }
  830. static int swsusp_header_init(void)
  831. {
  832. swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
  833. if (!swsusp_header)
  834. panic("Could not allocate memory for swsusp_header\n");
  835. return 0;
  836. }
  837. core_initcall(swsusp_header_init);