page_io.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * linux/mm/page_io.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95,
  7. * Asynchronous swapping added 30.12.95. Stephen Tweedie
  8. * Removed race in async swapping. 14.4.1996. Bruno Haible
  9. * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
  10. * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/swap.h>
  16. #include <linux/bio.h>
  17. #include <linux/swapops.h>
  18. #include <linux/writeback.h>
  19. #include <asm/pgtable.h>
  20. static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
  21. struct page *page, bio_end_io_t end_io)
  22. {
  23. struct bio *bio;
  24. bio = bio_alloc(gfp_flags, 1);
  25. if (bio) {
  26. struct swap_info_struct *sis;
  27. swp_entry_t entry = { .val = index, };
  28. sis = get_swap_info_struct(swp_type(entry));
  29. bio->bi_sector = map_swap_page(sis, swp_offset(entry)) *
  30. (PAGE_SIZE >> 9);
  31. bio->bi_bdev = sis->bdev;
  32. bio->bi_io_vec[0].bv_page = page;
  33. bio->bi_io_vec[0].bv_len = PAGE_SIZE;
  34. bio->bi_io_vec[0].bv_offset = 0;
  35. bio->bi_vcnt = 1;
  36. bio->bi_idx = 0;
  37. bio->bi_size = PAGE_SIZE;
  38. bio->bi_end_io = end_io;
  39. }
  40. return bio;
  41. }
  42. static void end_swap_bio_write(struct bio *bio, int err)
  43. {
  44. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  45. struct page *page = bio->bi_io_vec[0].bv_page;
  46. if (!uptodate) {
  47. SetPageError(page);
  48. /*
  49. * We failed to write the page out to swap-space.
  50. * Re-dirty the page in order to avoid it being reclaimed.
  51. * Also print a dire warning that things will go BAD (tm)
  52. * very quickly.
  53. *
  54. * Also clear PG_reclaim to avoid rotate_reclaimable_page()
  55. */
  56. set_page_dirty(page);
  57. printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
  58. imajor(bio->bi_bdev->bd_inode),
  59. iminor(bio->bi_bdev->bd_inode),
  60. (unsigned long long)bio->bi_sector);
  61. ClearPageReclaim(page);
  62. }
  63. end_page_writeback(page);
  64. bio_put(bio);
  65. }
  66. void end_swap_bio_read(struct bio *bio, int err)
  67. {
  68. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  69. struct page *page = bio->bi_io_vec[0].bv_page;
  70. if (!uptodate) {
  71. SetPageError(page);
  72. ClearPageUptodate(page);
  73. printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
  74. imajor(bio->bi_bdev->bd_inode),
  75. iminor(bio->bi_bdev->bd_inode),
  76. (unsigned long long)bio->bi_sector);
  77. } else {
  78. SetPageUptodate(page);
  79. }
  80. unlock_page(page);
  81. bio_put(bio);
  82. }
  83. /*
  84. * We may have stale swap cache pages in memory: notice
  85. * them here and get rid of the unnecessary final write.
  86. */
  87. int swap_writepage(struct page *page, struct writeback_control *wbc)
  88. {
  89. struct bio *bio;
  90. int ret = 0, rw = WRITE;
  91. if (try_to_free_swap(page)) {
  92. unlock_page(page);
  93. goto out;
  94. }
  95. bio = get_swap_bio(GFP_NOIO, page_private(page), page,
  96. end_swap_bio_write);
  97. if (bio == NULL) {
  98. set_page_dirty(page);
  99. unlock_page(page);
  100. ret = -ENOMEM;
  101. goto out;
  102. }
  103. if (wbc->sync_mode == WB_SYNC_ALL)
  104. rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
  105. count_vm_event(PSWPOUT);
  106. set_page_writeback(page);
  107. unlock_page(page);
  108. submit_bio(rw, bio);
  109. out:
  110. return ret;
  111. }
  112. int swap_readpage(struct page *page)
  113. {
  114. struct bio *bio;
  115. int ret = 0;
  116. VM_BUG_ON(!PageLocked(page));
  117. VM_BUG_ON(PageUptodate(page));
  118. bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
  119. end_swap_bio_read);
  120. if (bio == NULL) {
  121. unlock_page(page);
  122. ret = -ENOMEM;
  123. goto out;
  124. }
  125. count_vm_event(PSWPIN);
  126. submit_bio(READ, bio);
  127. out:
  128. return ret;
  129. }