mmap.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * mmap.c
  5. *
  6. * Code to deal with the mess that is clustered mmap.
  7. *
  8. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. */
  25. #include <linux/fs.h>
  26. #include <linux/types.h>
  27. #include <linux/slab.h>
  28. #include <linux/highmem.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/uio.h>
  31. #include <linux/signal.h>
  32. #include <linux/rbtree.h>
  33. #define MLOG_MASK_PREFIX ML_FILE_IO
  34. #include <cluster/masklog.h>
  35. #include "ocfs2.h"
  36. #include "aops.h"
  37. #include "dlmglue.h"
  38. #include "file.h"
  39. #include "inode.h"
  40. #include "mmap.h"
  41. static inline int ocfs2_vm_op_block_sigs(sigset_t *blocked, sigset_t *oldset)
  42. {
  43. /* The best way to deal with signals in the vm path is
  44. * to block them upfront, rather than allowing the
  45. * locking paths to return -ERESTARTSYS. */
  46. sigfillset(blocked);
  47. /* We should technically never get a bad return value
  48. * from sigprocmask */
  49. return sigprocmask(SIG_BLOCK, blocked, oldset);
  50. }
  51. static inline int ocfs2_vm_op_unblock_sigs(sigset_t *oldset)
  52. {
  53. return sigprocmask(SIG_SETMASK, oldset, NULL);
  54. }
  55. static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf)
  56. {
  57. sigset_t blocked, oldset;
  58. int error, ret;
  59. mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff);
  60. error = ocfs2_vm_op_block_sigs(&blocked, &oldset);
  61. if (error < 0) {
  62. mlog_errno(error);
  63. ret = VM_FAULT_SIGBUS;
  64. goto out;
  65. }
  66. ret = filemap_fault(area, vmf);
  67. error = ocfs2_vm_op_unblock_sigs(&oldset);
  68. if (error < 0)
  69. mlog_errno(error);
  70. out:
  71. mlog_exit_ptr(vmf->page);
  72. return ret;
  73. }
  74. static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
  75. struct page *page)
  76. {
  77. int ret;
  78. struct address_space *mapping = inode->i_mapping;
  79. loff_t pos = page_offset(page);
  80. unsigned int len = PAGE_CACHE_SIZE;
  81. pgoff_t last_index;
  82. struct page *locked_page = NULL;
  83. void *fsdata;
  84. loff_t size = i_size_read(inode);
  85. /*
  86. * Another node might have truncated while we were waiting on
  87. * cluster locks.
  88. */
  89. last_index = size >> PAGE_CACHE_SHIFT;
  90. if (page->index > last_index) {
  91. ret = -EINVAL;
  92. goto out;
  93. }
  94. /*
  95. * The i_size check above doesn't catch the case where nodes
  96. * truncated and then re-extended the file. We'll re-check the
  97. * page mapping after taking the page lock inside of
  98. * ocfs2_write_begin_nolock().
  99. */
  100. if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
  101. ret = -EINVAL;
  102. goto out;
  103. }
  104. /*
  105. * Call ocfs2_write_begin() and ocfs2_write_end() to take
  106. * advantage of the allocation code there. We pass a write
  107. * length of the whole page (chopped to i_size) to make sure
  108. * the whole thing is allocated.
  109. *
  110. * Since we know the page is up to date, we don't have to
  111. * worry about ocfs2_write_begin() skipping some buffer reads
  112. * because the "write" would invalidate their data.
  113. */
  114. if (page->index == last_index)
  115. len = size & ~PAGE_CACHE_MASK;
  116. ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
  117. &fsdata, di_bh, page);
  118. if (ret) {
  119. if (ret != -ENOSPC)
  120. mlog_errno(ret);
  121. goto out;
  122. }
  123. ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page,
  124. fsdata);
  125. if (ret < 0) {
  126. mlog_errno(ret);
  127. goto out;
  128. }
  129. BUG_ON(ret != len);
  130. ret = 0;
  131. out:
  132. return ret;
  133. }
  134. static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
  135. {
  136. struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
  137. struct buffer_head *di_bh = NULL;
  138. sigset_t blocked, oldset;
  139. int ret, ret2;
  140. ret = ocfs2_vm_op_block_sigs(&blocked, &oldset);
  141. if (ret < 0) {
  142. mlog_errno(ret);
  143. return ret;
  144. }
  145. /*
  146. * The cluster locks taken will block a truncate from another
  147. * node. Taking the data lock will also ensure that we don't
  148. * attempt page truncation as part of a downconvert.
  149. */
  150. ret = ocfs2_meta_lock(inode, &di_bh, 1);
  151. if (ret < 0) {
  152. mlog_errno(ret);
  153. goto out;
  154. }
  155. /*
  156. * The alloc sem should be enough to serialize with
  157. * ocfs2_truncate_file() changing i_size as well as any thread
  158. * modifying the inode btree.
  159. */
  160. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  161. ret = ocfs2_data_lock(inode, 1);
  162. if (ret < 0) {
  163. mlog_errno(ret);
  164. goto out_meta_unlock;
  165. }
  166. ret = __ocfs2_page_mkwrite(inode, di_bh, page);
  167. ocfs2_data_unlock(inode, 1);
  168. out_meta_unlock:
  169. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  170. brelse(di_bh);
  171. ocfs2_meta_unlock(inode, 1);
  172. out:
  173. ret2 = ocfs2_vm_op_unblock_sigs(&oldset);
  174. if (ret2 < 0)
  175. mlog_errno(ret2);
  176. return ret;
  177. }
  178. static struct vm_operations_struct ocfs2_file_vm_ops = {
  179. .fault = ocfs2_fault,
  180. .page_mkwrite = ocfs2_page_mkwrite,
  181. };
  182. int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
  183. {
  184. int ret = 0, lock_level = 0;
  185. ret = ocfs2_meta_lock_atime(file->f_dentry->d_inode,
  186. file->f_vfsmnt, &lock_level);
  187. if (ret < 0) {
  188. mlog_errno(ret);
  189. goto out;
  190. }
  191. ocfs2_meta_unlock(file->f_dentry->d_inode, lock_level);
  192. out:
  193. vma->vm_ops = &ocfs2_file_vm_ops;
  194. vma->vm_flags |= VM_CAN_NONLINEAR;
  195. return 0;
  196. }