page.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mm.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include "gfs2.h"
  18. #include "lm_interface.h"
  19. #include "incore.h"
  20. #include "bmap.h"
  21. #include "inode.h"
  22. #include "page.h"
  23. #include "trans.h"
  24. #include "ops_address.h"
  25. #include "util.h"
  26. /**
  27. * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
  28. * @gl: the glock
  29. *
  30. */
  31. void gfs2_pte_inval(struct gfs2_glock *gl)
  32. {
  33. struct gfs2_inode *ip;
  34. struct inode *inode;
  35. ip = gl->gl_object;
  36. inode = &ip->i_inode;
  37. if (!ip || !S_ISREG(ip->i_di.di_mode))
  38. return;
  39. if (!test_bit(GIF_PAGED, &ip->i_flags))
  40. return;
  41. unmap_shared_mapping_range(inode->i_mapping, 0, 0);
  42. if (test_bit(GIF_SW_PAGED, &ip->i_flags))
  43. set_bit(GLF_DIRTY, &gl->gl_flags);
  44. clear_bit(GIF_SW_PAGED, &ip->i_flags);
  45. }
  46. /**
  47. * gfs2_page_inval - Invalidate all pages associated with a glock
  48. * @gl: the glock
  49. *
  50. */
  51. void gfs2_page_inval(struct gfs2_glock *gl)
  52. {
  53. struct gfs2_inode *ip;
  54. struct inode *inode;
  55. ip = gl->gl_object;
  56. inode = &ip->i_inode;
  57. if (!ip || !S_ISREG(ip->i_di.di_mode))
  58. return;
  59. truncate_inode_pages(inode->i_mapping, 0);
  60. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
  61. clear_bit(GIF_PAGED, &ip->i_flags);
  62. }
  63. /**
  64. * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
  65. * @gl: the glock
  66. * @flags: DIO_START | DIO_WAIT
  67. *
  68. * Syncs data (not metadata) for a regular file.
  69. * No-op for all other types.
  70. */
  71. void gfs2_page_sync(struct gfs2_glock *gl, int flags)
  72. {
  73. struct gfs2_inode *ip;
  74. struct inode *inode;
  75. struct address_space *mapping;
  76. int error = 0;
  77. ip = gl->gl_object;
  78. inode = &ip->i_inode;
  79. if (!ip || !S_ISREG(ip->i_di.di_mode))
  80. return;
  81. mapping = inode->i_mapping;
  82. if (flags & DIO_START)
  83. filemap_fdatawrite(mapping);
  84. if (!error && (flags & DIO_WAIT))
  85. error = filemap_fdatawait(mapping);
  86. /* Put back any errors cleared by filemap_fdatawait()
  87. so they can be caught by someone who can pass them
  88. up to user space. */
  89. if (error == -ENOSPC)
  90. set_bit(AS_ENOSPC, &mapping->flags);
  91. else if (error)
  92. set_bit(AS_EIO, &mapping->flags);
  93. }
  94. /**
  95. * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  96. * @ip: the inode
  97. * @dibh: the dinode buffer
  98. * @block: the block number that was allocated
  99. * @private: any locked page held by the caller process
  100. *
  101. * Returns: errno
  102. */
  103. int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  104. uint64_t block, void *private)
  105. {
  106. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  107. struct inode *inode = &ip->i_inode;
  108. struct page *page = (struct page *)private;
  109. struct buffer_head *bh;
  110. int release = 0;
  111. if (!page || page->index) {
  112. page = grab_cache_page(inode->i_mapping, 0);
  113. if (!page)
  114. return -ENOMEM;
  115. release = 1;
  116. }
  117. if (!PageUptodate(page)) {
  118. void *kaddr = kmap(page);
  119. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
  120. ip->i_di.di_size);
  121. memset(kaddr + ip->i_di.di_size, 0,
  122. PAGE_CACHE_SIZE - ip->i_di.di_size);
  123. kunmap(page);
  124. SetPageUptodate(page);
  125. }
  126. if (!page_has_buffers(page))
  127. create_empty_buffers(page, 1 << inode->i_blkbits,
  128. (1 << BH_Uptodate));
  129. bh = page_buffers(page);
  130. if (!buffer_mapped(bh))
  131. map_bh(bh, inode->i_sb, block);
  132. set_buffer_uptodate(bh);
  133. if ((sdp->sd_args.ar_data == GFS2_DATA_ORDERED) || gfs2_is_jdata(ip))
  134. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  135. mark_buffer_dirty(bh);
  136. if (release) {
  137. unlock_page(page);
  138. page_cache_release(page);
  139. }
  140. return 0;
  141. }
  142. /**
  143. * gfs2_block_truncate_page - Deal with zeroing out data for truncate
  144. *
  145. * This is partly borrowed from ext3.
  146. */
  147. int gfs2_block_truncate_page(struct address_space *mapping)
  148. {
  149. struct inode *inode = mapping->host;
  150. struct gfs2_inode *ip = GFS2_I(inode);
  151. struct gfs2_sbd *sdp = GFS2_SB(inode);
  152. loff_t from = inode->i_size;
  153. unsigned long index = from >> PAGE_CACHE_SHIFT;
  154. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  155. unsigned blocksize, iblock, length, pos;
  156. struct buffer_head *bh;
  157. struct page *page;
  158. void *kaddr;
  159. int err;
  160. page = grab_cache_page(mapping, index);
  161. if (!page)
  162. return 0;
  163. blocksize = inode->i_sb->s_blocksize;
  164. length = blocksize - (offset & (blocksize - 1));
  165. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  166. if (!page_has_buffers(page))
  167. create_empty_buffers(page, blocksize, 0);
  168. /* Find the buffer that contains "offset" */
  169. bh = page_buffers(page);
  170. pos = blocksize;
  171. while (offset >= pos) {
  172. bh = bh->b_this_page;
  173. iblock++;
  174. pos += blocksize;
  175. }
  176. err = 0;
  177. if (!buffer_mapped(bh)) {
  178. gfs2_get_block(inode, iblock, bh, 0);
  179. /* unmapped? It's a hole - nothing to do */
  180. if (!buffer_mapped(bh))
  181. goto unlock;
  182. }
  183. /* Ok, it's mapped. Make sure it's up-to-date */
  184. if (PageUptodate(page))
  185. set_buffer_uptodate(bh);
  186. if (!buffer_uptodate(bh)) {
  187. err = -EIO;
  188. ll_rw_block(READ, 1, &bh);
  189. wait_on_buffer(bh);
  190. /* Uhhuh. Read error. Complain and punt. */
  191. if (!buffer_uptodate(bh))
  192. goto unlock;
  193. }
  194. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
  195. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  196. kaddr = kmap_atomic(page, KM_USER0);
  197. memset(kaddr + offset, 0, length);
  198. flush_dcache_page(page);
  199. kunmap_atomic(kaddr, KM_USER0);
  200. unlock:
  201. unlock_page(page);
  202. page_cache_release(page);
  203. return err;
  204. }
  205. void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  206. unsigned int from, unsigned int to)
  207. {
  208. struct buffer_head *head = page_buffers(page);
  209. unsigned int bsize = head->b_size;
  210. struct buffer_head *bh;
  211. unsigned int start, end;
  212. for (bh = head, start = 0;
  213. bh != head || !start;
  214. bh = bh->b_this_page, start = end) {
  215. end = start + bsize;
  216. if (end <= from || start >= to)
  217. continue;
  218. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  219. }
  220. }