page_io.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * linux/mm/page_io.c
  3. *
  4. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  5. *
  6. * Swap reorganised 29.12.95,
  7. * Asynchronous swapping added 30.12.95. Stephen Tweedie
  8. * Removed race in async swapping. 14.4.1996. Bruno Haible
  9. * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
  10. * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
  11. */
  12. #include <linux/mm.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/gfp.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/swap.h>
  17. #include <linux/bio.h>
  18. #include <linux/swapops.h>
  19. #include <linux/writeback.h>
  20. #include <linux/blkdev.h>
  21. #include <linux/frontswap.h>
  22. #include <asm/pgtable.h>
  23. static struct bio *get_swap_bio(gfp_t gfp_flags,
  24. struct page *page, bio_end_io_t end_io)
  25. {
  26. struct bio *bio;
  27. bio = bio_alloc(gfp_flags, 1);
  28. if (bio) {
  29. bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
  30. bio->bi_sector <<= PAGE_SHIFT - 9;
  31. bio->bi_io_vec[0].bv_page = page;
  32. bio->bi_io_vec[0].bv_len = PAGE_SIZE;
  33. bio->bi_io_vec[0].bv_offset = 0;
  34. bio->bi_vcnt = 1;
  35. bio->bi_idx = 0;
  36. bio->bi_size = PAGE_SIZE;
  37. bio->bi_end_io = end_io;
  38. }
  39. return bio;
  40. }
  41. void end_swap_bio_write(struct bio *bio, int err)
  42. {
  43. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  44. struct page *page = bio->bi_io_vec[0].bv_page;
  45. if (!uptodate) {
  46. SetPageError(page);
  47. /*
  48. * We failed to write the page out to swap-space.
  49. * Re-dirty the page in order to avoid it being reclaimed.
  50. * Also print a dire warning that things will go BAD (tm)
  51. * very quickly.
  52. *
  53. * Also clear PG_reclaim to avoid rotate_reclaimable_page()
  54. */
  55. set_page_dirty(page);
  56. #ifndef CONFIG_VNSWAP
  57. printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
  58. imajor(bio->bi_bdev->bd_inode),
  59. iminor(bio->bi_bdev->bd_inode),
  60. (unsigned long long)bio->bi_sector);
  61. #endif
  62. ClearPageReclaim(page);
  63. }
  64. end_page_writeback(page);
  65. bio_put(bio);
  66. }
  67. void end_swap_bio_read(struct bio *bio, int err)
  68. {
  69. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  70. struct page *page = bio->bi_io_vec[0].bv_page;
  71. if (!uptodate) {
  72. SetPageError(page);
  73. ClearPageUptodate(page);
  74. printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
  75. imajor(bio->bi_bdev->bd_inode),
  76. iminor(bio->bi_bdev->bd_inode),
  77. (unsigned long long)bio->bi_sector);
  78. goto out;
  79. }
  80. SetPageUptodate(page);
  81. /*
  82. * There is no guarantee that the page is in swap cache - the software
  83. * suspend code (at least) uses end_swap_bio_read() against a non-
  84. * swapcache page. So we must check PG_swapcache before proceeding with
  85. * this optimization.
  86. */
  87. if (likely(PageSwapCache(page))) {
  88. struct swap_info_struct *sis;
  89. sis = page_swap_info(page);
  90. if (sis->flags & SWP_BLKDEV) {
  91. /*
  92. * The swap subsystem performs lazy swap slot freeing,
  93. * expecting that the page will be swapped out again.
  94. * So we can avoid an unnecessary write if the page
  95. * isn't redirtied.
  96. * This is good for real swap storage because we can
  97. * reduce unnecessary I/O and enhance wear-leveling
  98. * if an SSD is used as the as swap device.
  99. * But if in-memory swap device (eg zram) is used,
  100. * this causes a duplicated copy between uncompressed
  101. * data in VM-owned memory and compressed data in
  102. * zram-owned memory. So let's free zram-owned memory
  103. * and make the VM-owned decompressed page *dirty*,
  104. * so the page should be swapped out somewhere again if
  105. * we again wish to reclaim it.
  106. */
  107. struct gendisk *disk = sis->bdev->bd_disk;
  108. if (disk->fops->swap_slot_free_notify) {
  109. swp_entry_t entry;
  110. unsigned long offset;
  111. entry.val = page_private(page);
  112. offset = swp_offset(entry);
  113. SetPageDirty(page);
  114. disk->fops->swap_slot_free_notify(sis->bdev,
  115. offset);
  116. }
  117. }
  118. }
  119. out:
  120. unlock_page(page);
  121. bio_put(bio);
  122. }
  123. int __swap_writepage(struct page *page, struct writeback_control *wbc,
  124. void (*end_write_func)(struct bio *, int));
  125. /*
  126. * We may have stale swap cache pages in memory: notice
  127. * them here and get rid of the unnecessary final write.
  128. */
  129. int swap_writepage(struct page *page, struct writeback_control *wbc)
  130. {
  131. int ret = 0;
  132. if (try_to_free_swap(page)) {
  133. unlock_page(page);
  134. goto out;
  135. }
  136. if (frontswap_store(page) == 0) {
  137. set_page_writeback(page);
  138. unlock_page(page);
  139. end_page_writeback(page);
  140. goto out;
  141. }
  142. ret = __swap_writepage(page, wbc, end_swap_bio_write);
  143. out:
  144. return ret;
  145. }
  146. int __swap_writepage(struct page *page, struct writeback_control *wbc,
  147. void (*end_write_func)(struct bio *, int))
  148. {
  149. struct bio *bio;
  150. int ret = 0, rw = WRITE;
  151. bio = get_swap_bio(GFP_NOIO, page, end_write_func);
  152. if (bio == NULL) {
  153. set_page_dirty(page);
  154. unlock_page(page);
  155. ret = -ENOMEM;
  156. goto out;
  157. }
  158. if (wbc->sync_mode == WB_SYNC_ALL)
  159. rw |= REQ_SYNC;
  160. count_vm_event(PSWPOUT);
  161. set_page_writeback(page);
  162. unlock_page(page);
  163. submit_bio(rw, bio);
  164. out:
  165. return ret;
  166. }
  167. int swap_readpage(struct page *page)
  168. {
  169. struct bio *bio;
  170. int ret = 0;
  171. VM_BUG_ON(!PageLocked(page));
  172. VM_BUG_ON(PageUptodate(page));
  173. if (frontswap_load(page) == 0) {
  174. SetPageUptodate(page);
  175. unlock_page(page);
  176. goto out;
  177. }
  178. bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
  179. if (bio == NULL) {
  180. unlock_page(page);
  181. ret = -ENOMEM;
  182. goto out;
  183. }
  184. count_vm_event(PSWPIN);
  185. submit_bio(READ, bio);
  186. out:
  187. return ret;
  188. }