ops_vm.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/mm.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/gfs2_ondisk.h>
  16. #include <linux/lm_interface.h>
  17. #include "gfs2.h"
  18. #include "incore.h"
  19. #include "bmap.h"
  20. #include "glock.h"
  21. #include "inode.h"
  22. #include "ops_vm.h"
  23. #include "quota.h"
  24. #include "rgrp.h"
  25. #include "trans.h"
  26. #include "util.h"
  27. static int gfs2_private_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  28. {
  29. struct gfs2_inode *ip = GFS2_I(vma->vm_file->f_mapping->host);
  30. set_bit(GIF_PAGED, &ip->i_flags);
  31. return filemap_fault(vma, vmf);
  32. }
  33. static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
  34. {
  35. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  36. unsigned long index = page->index;
  37. u64 lblock = index << (PAGE_CACHE_SHIFT -
  38. sdp->sd_sb.sb_bsize_shift);
  39. unsigned int blocks = PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift;
  40. struct gfs2_alloc *al;
  41. unsigned int data_blocks, ind_blocks;
  42. unsigned int x;
  43. int error;
  44. al = gfs2_alloc_get(ip);
  45. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  46. if (error)
  47. goto out;
  48. error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
  49. if (error)
  50. goto out_gunlock_q;
  51. gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
  52. al->al_requested = data_blocks + ind_blocks;
  53. error = gfs2_inplace_reserve(ip);
  54. if (error)
  55. goto out_gunlock_q;
  56. error = gfs2_trans_begin(sdp, al->al_rgd->rd_length +
  57. ind_blocks + RES_DINODE +
  58. RES_STATFS + RES_QUOTA, 0);
  59. if (error)
  60. goto out_ipres;
  61. if (gfs2_is_stuffed(ip)) {
  62. error = gfs2_unstuff_dinode(ip, NULL);
  63. if (error)
  64. goto out_trans;
  65. }
  66. for (x = 0; x < blocks; ) {
  67. u64 dblock;
  68. unsigned int extlen;
  69. int new = 1;
  70. error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen);
  71. if (error)
  72. goto out_trans;
  73. lblock += extlen;
  74. x += extlen;
  75. }
  76. gfs2_assert_warn(sdp, al->al_alloced);
  77. out_trans:
  78. gfs2_trans_end(sdp);
  79. out_ipres:
  80. gfs2_inplace_release(ip);
  81. out_gunlock_q:
  82. gfs2_quota_unlock(ip);
  83. out:
  84. gfs2_alloc_put(ip);
  85. return error;
  86. }
  87. static int gfs2_sharewrite_fault(struct vm_area_struct *vma,
  88. struct vm_fault *vmf)
  89. {
  90. struct file *file = vma->vm_file;
  91. struct gfs2_file *gf = file->private_data;
  92. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  93. struct gfs2_holder i_gh;
  94. int alloc_required;
  95. int error;
  96. int ret = 0;
  97. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  98. if (error)
  99. goto out;
  100. set_bit(GIF_PAGED, &ip->i_flags);
  101. set_bit(GIF_SW_PAGED, &ip->i_flags);
  102. error = gfs2_write_alloc_required(ip,
  103. (u64)vmf->pgoff << PAGE_CACHE_SHIFT,
  104. PAGE_CACHE_SIZE, &alloc_required);
  105. if (error) {
  106. ret = VM_FAULT_OOM; /* XXX: are these right? */
  107. goto out_unlock;
  108. }
  109. set_bit(GFF_EXLOCK, &gf->f_flags);
  110. ret = filemap_fault(vma, vmf);
  111. clear_bit(GFF_EXLOCK, &gf->f_flags);
  112. if (ret & VM_FAULT_ERROR)
  113. goto out_unlock;
  114. if (alloc_required) {
  115. /* XXX: do we need to drop page lock around alloc_page_backing?*/
  116. error = alloc_page_backing(ip, vmf->page);
  117. if (error) {
  118. /*
  119. * VM_FAULT_LOCKED should always be the case for
  120. * filemap_fault, but it may not be in a future
  121. * implementation.
  122. */
  123. if (ret & VM_FAULT_LOCKED)
  124. unlock_page(vmf->page);
  125. page_cache_release(vmf->page);
  126. ret = VM_FAULT_OOM;
  127. goto out_unlock;
  128. }
  129. set_page_dirty(vmf->page);
  130. }
  131. out_unlock:
  132. gfs2_glock_dq_uninit(&i_gh);
  133. out:
  134. return ret;
  135. }
  136. struct vm_operations_struct gfs2_vm_ops_private = {
  137. .fault = gfs2_private_fault,
  138. };
  139. struct vm_operations_struct gfs2_vm_ops_sharewrite = {
  140. .fault = gfs2_sharewrite_fault,
  141. };