page.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mm.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include "gfs2.h"
  18. #include "lm_interface.h"
  19. #include "incore.h"
  20. #include "bmap.h"
  21. #include "inode.h"
  22. #include "page.h"
  23. #include "trans.h"
  24. #include "ops_address.h"
  25. #include "util.h"
  26. /**
  27. * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
  28. * @gl: the glock
  29. *
  30. */
  31. void gfs2_pte_inval(struct gfs2_glock *gl)
  32. {
  33. struct gfs2_inode *ip;
  34. struct inode *inode;
  35. ip = gl->gl_object;
  36. if (!ip || !S_ISREG(ip->i_di.di_mode))
  37. return;
  38. if (!test_bit(GIF_PAGED, &ip->i_flags))
  39. return;
  40. inode = gfs2_ip2v_lookup(ip);
  41. if (inode) {
  42. unmap_shared_mapping_range(inode->i_mapping, 0, 0);
  43. iput(inode);
  44. if (test_bit(GIF_SW_PAGED, &ip->i_flags))
  45. set_bit(GLF_DIRTY, &gl->gl_flags);
  46. }
  47. clear_bit(GIF_SW_PAGED, &ip->i_flags);
  48. }
  49. /**
  50. * gfs2_page_inval - Invalidate all pages associated with a glock
  51. * @gl: the glock
  52. *
  53. */
  54. void gfs2_page_inval(struct gfs2_glock *gl)
  55. {
  56. struct gfs2_inode *ip;
  57. struct inode *inode;
  58. ip = gl->gl_object;
  59. if (!ip || !S_ISREG(ip->i_di.di_mode))
  60. return;
  61. inode = gfs2_ip2v_lookup(ip);
  62. if (inode) {
  63. struct address_space *mapping = inode->i_mapping;
  64. truncate_inode_pages(mapping, 0);
  65. gfs2_assert_withdraw(ip->i_sbd, !mapping->nrpages);
  66. iput(inode);
  67. }
  68. clear_bit(GIF_PAGED, &ip->i_flags);
  69. }
  70. /**
  71. * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
  72. * @gl: the glock
  73. * @flags: DIO_START | DIO_WAIT
  74. *
  75. * Syncs data (not metadata) for a regular file.
  76. * No-op for all other types.
  77. */
  78. void gfs2_page_sync(struct gfs2_glock *gl, int flags)
  79. {
  80. struct gfs2_inode *ip;
  81. struct inode *inode;
  82. ip = gl->gl_object;
  83. if (!ip || !S_ISREG(ip->i_di.di_mode))
  84. return;
  85. inode = gfs2_ip2v_lookup(ip);
  86. if (inode) {
  87. struct address_space *mapping = inode->i_mapping;
  88. int error = 0;
  89. if (flags & DIO_START)
  90. filemap_fdatawrite(mapping);
  91. if (!error && (flags & DIO_WAIT))
  92. error = filemap_fdatawait(mapping);
  93. /* Put back any errors cleared by filemap_fdatawait()
  94. so they can be caught by someone who can pass them
  95. up to user space. */
  96. if (error == -ENOSPC)
  97. set_bit(AS_ENOSPC, &mapping->flags);
  98. else if (error)
  99. set_bit(AS_EIO, &mapping->flags);
  100. iput(inode);
  101. }
  102. }
  103. /**
  104. * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  105. * @ip: the inode
  106. * @dibh: the dinode buffer
  107. * @block: the block number that was allocated
  108. * @private: any locked page held by the caller process
  109. *
  110. * Returns: errno
  111. */
  112. int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  113. uint64_t block, void *private)
  114. {
  115. struct gfs2_sbd *sdp = ip->i_sbd;
  116. struct inode *inode = ip->i_vnode;
  117. struct page *page = (struct page *)private;
  118. struct buffer_head *bh;
  119. int release = 0;
  120. if (!page || page->index) {
  121. page = grab_cache_page(inode->i_mapping, 0);
  122. if (!page)
  123. return -ENOMEM;
  124. release = 1;
  125. }
  126. if (!PageUptodate(page)) {
  127. void *kaddr = kmap(page);
  128. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
  129. ip->i_di.di_size);
  130. memset(kaddr + ip->i_di.di_size, 0,
  131. PAGE_CACHE_SIZE - ip->i_di.di_size);
  132. kunmap(page);
  133. SetPageUptodate(page);
  134. }
  135. if (!page_has_buffers(page))
  136. create_empty_buffers(page, 1 << inode->i_blkbits,
  137. (1 << BH_Uptodate));
  138. bh = page_buffers(page);
  139. if (!buffer_mapped(bh))
  140. map_bh(bh, inode->i_sb, block);
  141. set_buffer_uptodate(bh);
  142. if ((sdp->sd_args.ar_data == GFS2_DATA_ORDERED) || gfs2_is_jdata(ip))
  143. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  144. mark_buffer_dirty(bh);
  145. if (release) {
  146. unlock_page(page);
  147. page_cache_release(page);
  148. }
  149. return 0;
  150. }
  151. /**
  152. * gfs2_block_truncate_page - Deal with zeroing out data for truncate
  153. *
  154. * This is partly borrowed from ext3.
  155. */
  156. int gfs2_block_truncate_page(struct address_space *mapping)
  157. {
  158. struct inode *inode = mapping->host;
  159. struct gfs2_inode *ip = inode->u.generic_ip;
  160. struct gfs2_sbd *sdp = ip->i_sbd;
  161. loff_t from = inode->i_size;
  162. unsigned long index = from >> PAGE_CACHE_SHIFT;
  163. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  164. unsigned blocksize, iblock, length, pos;
  165. struct buffer_head *bh;
  166. struct page *page;
  167. void *kaddr;
  168. int err;
  169. page = grab_cache_page(mapping, index);
  170. if (!page)
  171. return 0;
  172. blocksize = inode->i_sb->s_blocksize;
  173. length = blocksize - (offset & (blocksize - 1));
  174. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  175. if (!page_has_buffers(page))
  176. create_empty_buffers(page, blocksize, 0);
  177. /* Find the buffer that contains "offset" */
  178. bh = page_buffers(page);
  179. pos = blocksize;
  180. while (offset >= pos) {
  181. bh = bh->b_this_page;
  182. iblock++;
  183. pos += blocksize;
  184. }
  185. err = 0;
  186. if (!buffer_mapped(bh)) {
  187. gfs2_get_block(inode, iblock, bh, 0);
  188. /* unmapped? It's a hole - nothing to do */
  189. if (!buffer_mapped(bh))
  190. goto unlock;
  191. }
  192. /* Ok, it's mapped. Make sure it's up-to-date */
  193. if (PageUptodate(page))
  194. set_buffer_uptodate(bh);
  195. if (!buffer_uptodate(bh)) {
  196. err = -EIO;
  197. ll_rw_block(READ, 1, &bh);
  198. wait_on_buffer(bh);
  199. /* Uhhuh. Read error. Complain and punt. */
  200. if (!buffer_uptodate(bh))
  201. goto unlock;
  202. }
  203. if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
  204. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  205. kaddr = kmap_atomic(page, KM_USER0);
  206. memset(kaddr + offset, 0, length);
  207. flush_dcache_page(page);
  208. kunmap_atomic(kaddr, KM_USER0);
  209. unlock:
  210. unlock_page(page);
  211. page_cache_release(page);
  212. return err;
  213. }
  214. void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  215. unsigned int from, unsigned int to)
  216. {
  217. struct buffer_head *head = page_buffers(page);
  218. unsigned int bsize = head->b_size;
  219. struct buffer_head *bh;
  220. unsigned int start, end;
  221. for (bh = head, start = 0;
  222. bh != head || !start;
  223. bh = bh->b_this_page, start = end) {
  224. end = start + bsize;
  225. if (end <= from || start >= to)
  226. continue;
  227. gfs2_trans_add_bh(ip->i_gl, bh, 0);
  228. }
  229. }