file.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/uio.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/mm.h>
  17. #include <linux/mount.h>
  18. #include <linux/fs.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <linux/ext2_fs.h>
  21. #include <linux/falloc.h>
  22. #include <linux/swap.h>
  23. #include <linux/crc32.h>
  24. #include <linux/writeback.h>
  25. #include <asm/uaccess.h>
  26. #include <linux/dlm.h>
  27. #include <linux/dlm_plock.h>
  28. #include "gfs2.h"
  29. #include "incore.h"
  30. #include "bmap.h"
  31. #include "dir.h"
  32. #include "glock.h"
  33. #include "glops.h"
  34. #include "inode.h"
  35. #include "log.h"
  36. #include "meta_io.h"
  37. #include "quota.h"
  38. #include "rgrp.h"
  39. #include "trans.h"
  40. #include "util.h"
  41. /**
  42. * gfs2_llseek - seek to a location in a file
  43. * @file: the file
  44. * @offset: the offset
  45. * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  46. *
  47. * SEEK_END requires the glock for the file because it references the
  48. * file's size.
  49. *
  50. * Returns: The new offset, or errno
  51. */
  52. static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  53. {
  54. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  55. struct gfs2_holder i_gh;
  56. loff_t error;
  57. if (origin == 2) {
  58. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  59. &i_gh);
  60. if (!error) {
  61. error = generic_file_llseek_unlocked(file, offset, origin);
  62. gfs2_glock_dq_uninit(&i_gh);
  63. }
  64. } else
  65. error = generic_file_llseek_unlocked(file, offset, origin);
  66. return error;
  67. }
  68. /**
  69. * gfs2_readdir - Read directory entries from a directory
  70. * @file: The directory to read from
  71. * @dirent: Buffer for dirents
  72. * @filldir: Function used to do the copying
  73. *
  74. * Returns: errno
  75. */
  76. static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
  77. {
  78. struct inode *dir = file->f_mapping->host;
  79. struct gfs2_inode *dip = GFS2_I(dir);
  80. struct gfs2_holder d_gh;
  81. u64 offset = file->f_pos;
  82. int error;
  83. gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  84. error = gfs2_glock_nq(&d_gh);
  85. if (error) {
  86. gfs2_holder_uninit(&d_gh);
  87. return error;
  88. }
  89. error = gfs2_dir_read(dir, &offset, dirent, filldir);
  90. gfs2_glock_dq_uninit(&d_gh);
  91. file->f_pos = offset;
  92. return error;
  93. }
  94. /**
  95. * fsflags_cvt
  96. * @table: A table of 32 u32 flags
  97. * @val: a 32 bit value to convert
  98. *
  99. * This function can be used to convert between fsflags values and
  100. * GFS2's own flags values.
  101. *
  102. * Returns: the converted flags
  103. */
  104. static u32 fsflags_cvt(const u32 *table, u32 val)
  105. {
  106. u32 res = 0;
  107. while(val) {
  108. if (val & 1)
  109. res |= *table;
  110. table++;
  111. val >>= 1;
  112. }
  113. return res;
  114. }
  115. static const u32 fsflags_to_gfs2[32] = {
  116. [3] = GFS2_DIF_SYNC,
  117. [4] = GFS2_DIF_IMMUTABLE,
  118. [5] = GFS2_DIF_APPENDONLY,
  119. [7] = GFS2_DIF_NOATIME,
  120. [12] = GFS2_DIF_EXHASH,
  121. [14] = GFS2_DIF_INHERIT_JDATA,
  122. };
  123. static const u32 gfs2_to_fsflags[32] = {
  124. [gfs2fl_Sync] = FS_SYNC_FL,
  125. [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
  126. [gfs2fl_AppendOnly] = FS_APPEND_FL,
  127. [gfs2fl_NoAtime] = FS_NOATIME_FL,
  128. [gfs2fl_ExHash] = FS_INDEX_FL,
  129. [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
  130. };
  131. static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
  132. {
  133. struct inode *inode = filp->f_path.dentry->d_inode;
  134. struct gfs2_inode *ip = GFS2_I(inode);
  135. struct gfs2_holder gh;
  136. int error;
  137. u32 fsflags;
  138. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  139. error = gfs2_glock_nq(&gh);
  140. if (error)
  141. return error;
  142. fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
  143. if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
  144. fsflags |= FS_JOURNAL_DATA_FL;
  145. if (put_user(fsflags, ptr))
  146. error = -EFAULT;
  147. gfs2_glock_dq(&gh);
  148. gfs2_holder_uninit(&gh);
  149. return error;
  150. }
  151. void gfs2_set_inode_flags(struct inode *inode)
  152. {
  153. struct gfs2_inode *ip = GFS2_I(inode);
  154. unsigned int flags = inode->i_flags;
  155. flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
  156. if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
  157. inode->i_flags |= S_NOSEC;
  158. if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
  159. flags |= S_IMMUTABLE;
  160. if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
  161. flags |= S_APPEND;
  162. if (ip->i_diskflags & GFS2_DIF_NOATIME)
  163. flags |= S_NOATIME;
  164. if (ip->i_diskflags & GFS2_DIF_SYNC)
  165. flags |= S_SYNC;
  166. inode->i_flags = flags;
  167. }
  168. /* Flags that can be set by user space */
  169. #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
  170. GFS2_DIF_IMMUTABLE| \
  171. GFS2_DIF_APPENDONLY| \
  172. GFS2_DIF_NOATIME| \
  173. GFS2_DIF_SYNC| \
  174. GFS2_DIF_SYSTEM| \
  175. GFS2_DIF_INHERIT_JDATA)
  176. /**
  177. * gfs2_set_flags - set flags on an inode
  178. * @inode: The inode
  179. * @flags: The flags to set
  180. * @mask: Indicates which flags are valid
  181. *
  182. */
  183. static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
  184. {
  185. struct inode *inode = filp->f_path.dentry->d_inode;
  186. struct gfs2_inode *ip = GFS2_I(inode);
  187. struct gfs2_sbd *sdp = GFS2_SB(inode);
  188. struct buffer_head *bh;
  189. struct gfs2_holder gh;
  190. int error;
  191. u32 new_flags, flags;
  192. error = mnt_want_write(filp->f_path.mnt);
  193. if (error)
  194. return error;
  195. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  196. if (error)
  197. goto out_drop_write;
  198. error = -EACCES;
  199. if (!inode_owner_or_capable(inode))
  200. goto out;
  201. error = 0;
  202. flags = ip->i_diskflags;
  203. new_flags = (flags & ~mask) | (reqflags & mask);
  204. if ((new_flags ^ flags) == 0)
  205. goto out;
  206. error = -EINVAL;
  207. if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
  208. goto out;
  209. error = -EPERM;
  210. if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
  211. goto out;
  212. if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
  213. goto out;
  214. if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
  215. !capable(CAP_LINUX_IMMUTABLE))
  216. goto out;
  217. if (!IS_IMMUTABLE(inode)) {
  218. error = gfs2_permission(inode, MAY_WRITE);
  219. if (error)
  220. goto out;
  221. }
  222. if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
  223. if (flags & GFS2_DIF_JDATA)
  224. gfs2_log_flush(sdp, ip->i_gl);
  225. error = filemap_fdatawrite(inode->i_mapping);
  226. if (error)
  227. goto out;
  228. error = filemap_fdatawait(inode->i_mapping);
  229. if (error)
  230. goto out;
  231. }
  232. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  233. if (error)
  234. goto out;
  235. error = gfs2_meta_inode_buffer(ip, &bh);
  236. if (error)
  237. goto out_trans_end;
  238. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  239. ip->i_diskflags = new_flags;
  240. gfs2_dinode_out(ip, bh->b_data);
  241. brelse(bh);
  242. gfs2_set_inode_flags(inode);
  243. gfs2_set_aops(inode);
  244. out_trans_end:
  245. gfs2_trans_end(sdp);
  246. out:
  247. gfs2_glock_dq_uninit(&gh);
  248. out_drop_write:
  249. mnt_drop_write(filp->f_path.mnt);
  250. return error;
  251. }
  252. static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
  253. {
  254. struct inode *inode = filp->f_path.dentry->d_inode;
  255. u32 fsflags, gfsflags;
  256. if (get_user(fsflags, ptr))
  257. return -EFAULT;
  258. gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
  259. if (!S_ISDIR(inode->i_mode)) {
  260. if (gfsflags & GFS2_DIF_INHERIT_JDATA)
  261. gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
  262. return do_gfs2_set_flags(filp, gfsflags, ~0);
  263. }
  264. return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
  265. }
  266. static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  267. {
  268. switch(cmd) {
  269. case FS_IOC_GETFLAGS:
  270. return gfs2_get_flags(filp, (u32 __user *)arg);
  271. case FS_IOC_SETFLAGS:
  272. return gfs2_set_flags(filp, (u32 __user *)arg);
  273. }
  274. return -ENOTTY;
  275. }
  276. /**
  277. * gfs2_allocate_page_backing - Use bmap to allocate blocks
  278. * @page: The (locked) page to allocate backing for
  279. *
  280. * We try to allocate all the blocks required for the page in
  281. * one go. This might fail for various reasons, so we keep
  282. * trying until all the blocks to back this page are allocated.
  283. * If some of the blocks are already allocated, thats ok too.
  284. */
  285. static int gfs2_allocate_page_backing(struct page *page)
  286. {
  287. struct inode *inode = page->mapping->host;
  288. struct buffer_head bh;
  289. unsigned long size = PAGE_CACHE_SIZE;
  290. u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  291. do {
  292. bh.b_state = 0;
  293. bh.b_size = size;
  294. gfs2_block_map(inode, lblock, &bh, 1);
  295. if (!buffer_mapped(&bh))
  296. return -EIO;
  297. size -= bh.b_size;
  298. lblock += (bh.b_size >> inode->i_blkbits);
  299. } while(size > 0);
  300. return 0;
  301. }
  302. /**
  303. * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
  304. * @vma: The virtual memory area
  305. * @page: The page which is about to become writable
  306. *
  307. * When the page becomes writable, we need to ensure that we have
  308. * blocks allocated on disk to back that page.
  309. */
  310. static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  311. {
  312. struct page *page = vmf->page;
  313. struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
  314. struct gfs2_inode *ip = GFS2_I(inode);
  315. struct gfs2_sbd *sdp = GFS2_SB(inode);
  316. unsigned long last_index;
  317. u64 pos = page->index << PAGE_CACHE_SHIFT;
  318. unsigned int data_blocks, ind_blocks, rblocks;
  319. struct gfs2_holder gh;
  320. struct gfs2_alloc *al;
  321. int ret;
  322. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  323. ret = gfs2_glock_nq(&gh);
  324. if (ret)
  325. goto out;
  326. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  327. set_bit(GIF_SW_PAGED, &ip->i_flags);
  328. if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
  329. goto out_unlock;
  330. ret = -ENOMEM;
  331. al = gfs2_alloc_get(ip);
  332. if (al == NULL)
  333. goto out_unlock;
  334. ret = gfs2_quota_lock_check(ip);
  335. if (ret)
  336. goto out_alloc_put;
  337. gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
  338. al->al_requested = data_blocks + ind_blocks;
  339. ret = gfs2_inplace_reserve(ip);
  340. if (ret)
  341. goto out_quota_unlock;
  342. rblocks = RES_DINODE + ind_blocks;
  343. if (gfs2_is_jdata(ip))
  344. rblocks += data_blocks ? data_blocks : 1;
  345. if (ind_blocks || data_blocks) {
  346. rblocks += RES_STATFS + RES_QUOTA;
  347. rblocks += gfs2_rg_blocks(al);
  348. }
  349. ret = gfs2_trans_begin(sdp, rblocks, 0);
  350. if (ret)
  351. goto out_trans_fail;
  352. lock_page(page);
  353. ret = -EINVAL;
  354. last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
  355. if (page->index > last_index)
  356. goto out_unlock_page;
  357. ret = 0;
  358. if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
  359. goto out_unlock_page;
  360. if (gfs2_is_stuffed(ip)) {
  361. ret = gfs2_unstuff_dinode(ip, page);
  362. if (ret)
  363. goto out_unlock_page;
  364. }
  365. ret = gfs2_allocate_page_backing(page);
  366. out_unlock_page:
  367. unlock_page(page);
  368. gfs2_trans_end(sdp);
  369. out_trans_fail:
  370. gfs2_inplace_release(ip);
  371. out_quota_unlock:
  372. gfs2_quota_unlock(ip);
  373. out_alloc_put:
  374. gfs2_alloc_put(ip);
  375. out_unlock:
  376. gfs2_glock_dq(&gh);
  377. out:
  378. gfs2_holder_uninit(&gh);
  379. if (ret == -ENOMEM)
  380. ret = VM_FAULT_OOM;
  381. else if (ret)
  382. ret = VM_FAULT_SIGBUS;
  383. return ret;
  384. }
  385. static const struct vm_operations_struct gfs2_vm_ops = {
  386. .fault = filemap_fault,
  387. .page_mkwrite = gfs2_page_mkwrite,
  388. };
  389. /**
  390. * gfs2_mmap -
  391. * @file: The file to map
  392. * @vma: The VMA which described the mapping
  393. *
  394. * There is no need to get a lock here unless we should be updating
  395. * atime. We ignore any locking errors since the only consequence is
  396. * a missed atime update (which will just be deferred until later).
  397. *
  398. * Returns: 0
  399. */
  400. static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
  401. {
  402. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  403. if (!(file->f_flags & O_NOATIME) &&
  404. !IS_NOATIME(&ip->i_inode)) {
  405. struct gfs2_holder i_gh;
  406. int error;
  407. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  408. error = gfs2_glock_nq(&i_gh);
  409. if (error == 0) {
  410. file_accessed(file);
  411. gfs2_glock_dq(&i_gh);
  412. }
  413. gfs2_holder_uninit(&i_gh);
  414. if (error)
  415. return error;
  416. }
  417. vma->vm_ops = &gfs2_vm_ops;
  418. vma->vm_flags |= VM_CAN_NONLINEAR;
  419. return 0;
  420. }
  421. /**
  422. * gfs2_open - open a file
  423. * @inode: the inode to open
  424. * @file: the struct file for this opening
  425. *
  426. * Returns: errno
  427. */
  428. static int gfs2_open(struct inode *inode, struct file *file)
  429. {
  430. struct gfs2_inode *ip = GFS2_I(inode);
  431. struct gfs2_holder i_gh;
  432. struct gfs2_file *fp;
  433. int error;
  434. fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
  435. if (!fp)
  436. return -ENOMEM;
  437. mutex_init(&fp->f_fl_mutex);
  438. gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
  439. file->private_data = fp;
  440. if (S_ISREG(ip->i_inode.i_mode)) {
  441. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  442. &i_gh);
  443. if (error)
  444. goto fail;
  445. if (!(file->f_flags & O_LARGEFILE) &&
  446. i_size_read(inode) > MAX_NON_LFS) {
  447. error = -EOVERFLOW;
  448. goto fail_gunlock;
  449. }
  450. gfs2_glock_dq_uninit(&i_gh);
  451. }
  452. return 0;
  453. fail_gunlock:
  454. gfs2_glock_dq_uninit(&i_gh);
  455. fail:
  456. file->private_data = NULL;
  457. kfree(fp);
  458. return error;
  459. }
  460. /**
  461. * gfs2_close - called to close a struct file
  462. * @inode: the inode the struct file belongs to
  463. * @file: the struct file being closed
  464. *
  465. * Returns: errno
  466. */
  467. static int gfs2_close(struct inode *inode, struct file *file)
  468. {
  469. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  470. struct gfs2_file *fp;
  471. fp = file->private_data;
  472. file->private_data = NULL;
  473. if (gfs2_assert_warn(sdp, fp))
  474. return -EIO;
  475. kfree(fp);
  476. return 0;
  477. }
  478. /**
  479. * gfs2_fsync - sync the dirty data for a file (across the cluster)
  480. * @file: the file that points to the dentry
  481. * @start: the start position in the file to sync
  482. * @end: the end position in the file to sync
  483. * @datasync: set if we can ignore timestamp changes
  484. *
  485. * We split the data flushing here so that we don't wait for the data
  486. * until after we've also sent the metadata to disk. Note that for
  487. * data=ordered, we will write & wait for the data at the log flush
  488. * stage anyway, so this is unlikely to make much of a difference
  489. * except in the data=writeback case.
  490. *
  491. * If the fdatawrite fails due to any reason except -EIO, we will
  492. * continue the remainder of the fsync, although we'll still report
  493. * the error at the end. This is to match filemap_write_and_wait_range()
  494. * behaviour.
  495. *
  496. * Returns: errno
  497. */
  498. static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
  499. int datasync)
  500. {
  501. struct address_space *mapping = file->f_mapping;
  502. struct inode *inode = mapping->host;
  503. int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
  504. struct gfs2_inode *ip = GFS2_I(inode);
  505. int ret, ret1 = 0;
  506. if (mapping->nrpages) {
  507. ret1 = filemap_fdatawrite_range(mapping, start, end);
  508. if (ret1 == -EIO)
  509. return ret1;
  510. }
  511. if (datasync)
  512. sync_state &= ~I_DIRTY_SYNC;
  513. if (sync_state) {
  514. mutex_lock(&inode->i_mutex);
  515. ret = sync_inode_metadata(inode, 1);
  516. if (ret) {
  517. mutex_unlock(&inode->i_mutex);
  518. return ret;
  519. }
  520. if (gfs2_is_jdata(ip))
  521. filemap_write_and_wait(mapping);
  522. gfs2_ail_flush(ip->i_gl);
  523. mutex_unlock(&inode->i_mutex);
  524. }
  525. if (mapping->nrpages)
  526. ret = filemap_fdatawait_range(mapping, start, end);
  527. return ret ? ret : ret1;
  528. }
  529. /**
  530. * gfs2_file_aio_write - Perform a write to a file
  531. * @iocb: The io context
  532. * @iov: The data to write
  533. * @nr_segs: Number of @iov segments
  534. * @pos: The file position
  535. *
  536. * We have to do a lock/unlock here to refresh the inode size for
  537. * O_APPEND writes, otherwise we can land up writing at the wrong
  538. * offset. There is still a race, but provided the app is using its
  539. * own file locking, this will make O_APPEND work as expected.
  540. *
  541. */
  542. static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  543. unsigned long nr_segs, loff_t pos)
  544. {
  545. struct file *file = iocb->ki_filp;
  546. if (file->f_flags & O_APPEND) {
  547. struct dentry *dentry = file->f_dentry;
  548. struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
  549. struct gfs2_holder gh;
  550. int ret;
  551. ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  552. if (ret)
  553. return ret;
  554. gfs2_glock_dq_uninit(&gh);
  555. }
  556. return generic_file_aio_write(iocb, iov, nr_segs, pos);
  557. }
  558. static int empty_write_end(struct page *page, unsigned from,
  559. unsigned to, int mode)
  560. {
  561. struct inode *inode = page->mapping->host;
  562. struct gfs2_inode *ip = GFS2_I(inode);
  563. struct buffer_head *bh;
  564. unsigned offset, blksize = 1 << inode->i_blkbits;
  565. pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  566. zero_user(page, from, to-from);
  567. mark_page_accessed(page);
  568. if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
  569. if (!gfs2_is_writeback(ip))
  570. gfs2_page_add_databufs(ip, page, from, to);
  571. block_commit_write(page, from, to);
  572. return 0;
  573. }
  574. offset = 0;
  575. bh = page_buffers(page);
  576. while (offset < to) {
  577. if (offset >= from) {
  578. set_buffer_uptodate(bh);
  579. mark_buffer_dirty(bh);
  580. clear_buffer_new(bh);
  581. write_dirty_buffer(bh, WRITE);
  582. }
  583. offset += blksize;
  584. bh = bh->b_this_page;
  585. }
  586. offset = 0;
  587. bh = page_buffers(page);
  588. while (offset < to) {
  589. if (offset >= from) {
  590. wait_on_buffer(bh);
  591. if (!buffer_uptodate(bh))
  592. return -EIO;
  593. }
  594. offset += blksize;
  595. bh = bh->b_this_page;
  596. }
  597. return 0;
  598. }
  599. static int needs_empty_write(sector_t block, struct inode *inode)
  600. {
  601. int error;
  602. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  603. bh_map.b_size = 1 << inode->i_blkbits;
  604. error = gfs2_block_map(inode, block, &bh_map, 0);
  605. if (unlikely(error))
  606. return error;
  607. return !buffer_mapped(&bh_map);
  608. }
  609. static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
  610. int mode)
  611. {
  612. struct inode *inode = page->mapping->host;
  613. unsigned start, end, next, blksize;
  614. sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  615. int ret;
  616. blksize = 1 << inode->i_blkbits;
  617. next = end = 0;
  618. while (next < from) {
  619. next += blksize;
  620. block++;
  621. }
  622. start = next;
  623. do {
  624. next += blksize;
  625. ret = needs_empty_write(block, inode);
  626. if (unlikely(ret < 0))
  627. return ret;
  628. if (ret == 0) {
  629. if (end) {
  630. ret = __block_write_begin(page, start, end - start,
  631. gfs2_block_map);
  632. if (unlikely(ret))
  633. return ret;
  634. ret = empty_write_end(page, start, end, mode);
  635. if (unlikely(ret))
  636. return ret;
  637. end = 0;
  638. }
  639. start = next;
  640. }
  641. else
  642. end = next;
  643. block++;
  644. } while (next < to);
  645. if (end) {
  646. ret = __block_write_begin(page, start, end - start, gfs2_block_map);
  647. if (unlikely(ret))
  648. return ret;
  649. ret = empty_write_end(page, start, end, mode);
  650. if (unlikely(ret))
  651. return ret;
  652. }
  653. return 0;
  654. }
  655. static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
  656. int mode)
  657. {
  658. struct gfs2_inode *ip = GFS2_I(inode);
  659. struct buffer_head *dibh;
  660. int error;
  661. u64 start = offset >> PAGE_CACHE_SHIFT;
  662. unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
  663. u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
  664. pgoff_t curr;
  665. struct page *page;
  666. unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
  667. unsigned int from, to;
  668. if (!end_offset)
  669. end_offset = PAGE_CACHE_SIZE;
  670. error = gfs2_meta_inode_buffer(ip, &dibh);
  671. if (unlikely(error))
  672. goto out;
  673. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  674. if (gfs2_is_stuffed(ip)) {
  675. error = gfs2_unstuff_dinode(ip, NULL);
  676. if (unlikely(error))
  677. goto out;
  678. }
  679. curr = start;
  680. offset = start << PAGE_CACHE_SHIFT;
  681. from = start_offset;
  682. to = PAGE_CACHE_SIZE;
  683. while (curr <= end) {
  684. page = grab_cache_page_write_begin(inode->i_mapping, curr,
  685. AOP_FLAG_NOFS);
  686. if (unlikely(!page)) {
  687. error = -ENOMEM;
  688. goto out;
  689. }
  690. if (curr == end)
  691. to = end_offset;
  692. error = write_empty_blocks(page, from, to, mode);
  693. if (!error && offset + to > inode->i_size &&
  694. !(mode & FALLOC_FL_KEEP_SIZE)) {
  695. i_size_write(inode, offset + to);
  696. }
  697. unlock_page(page);
  698. page_cache_release(page);
  699. if (error)
  700. goto out;
  701. curr++;
  702. offset += PAGE_CACHE_SIZE;
  703. from = 0;
  704. }
  705. gfs2_dinode_out(ip, dibh->b_data);
  706. mark_inode_dirty(inode);
  707. brelse(dibh);
  708. out:
  709. return error;
  710. }
  711. static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
  712. unsigned int *data_blocks, unsigned int *ind_blocks)
  713. {
  714. const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  715. unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
  716. unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
  717. for (tmp = max_data; tmp > sdp->sd_diptrs;) {
  718. tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
  719. max_data -= tmp;
  720. }
  721. /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
  722. so it might end up with fewer data blocks */
  723. if (max_data <= *data_blocks)
  724. return;
  725. *data_blocks = max_data;
  726. *ind_blocks = max_blocks - max_data;
  727. *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
  728. if (*len > max) {
  729. *len = max;
  730. gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
  731. }
  732. }
  733. static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
  734. loff_t len)
  735. {
  736. struct inode *inode = file->f_path.dentry->d_inode;
  737. struct gfs2_sbd *sdp = GFS2_SB(inode);
  738. struct gfs2_inode *ip = GFS2_I(inode);
  739. unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
  740. loff_t bytes, max_bytes;
  741. struct gfs2_alloc *al;
  742. int error;
  743. loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
  744. loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
  745. next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
  746. /* We only support the FALLOC_FL_KEEP_SIZE mode */
  747. if (mode & ~FALLOC_FL_KEEP_SIZE)
  748. return -EOPNOTSUPP;
  749. offset &= bsize_mask;
  750. len = next - offset;
  751. bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
  752. if (!bytes)
  753. bytes = UINT_MAX;
  754. bytes &= bsize_mask;
  755. if (bytes == 0)
  756. bytes = sdp->sd_sb.sb_bsize;
  757. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
  758. error = gfs2_glock_nq(&ip->i_gh);
  759. if (unlikely(error))
  760. goto out_uninit;
  761. if (!gfs2_write_alloc_required(ip, offset, len))
  762. goto out_unlock;
  763. while (len > 0) {
  764. if (len < bytes)
  765. bytes = len;
  766. al = gfs2_alloc_get(ip);
  767. if (!al) {
  768. error = -ENOMEM;
  769. goto out_unlock;
  770. }
  771. error = gfs2_quota_lock_check(ip);
  772. if (error)
  773. goto out_alloc_put;
  774. retry:
  775. gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
  776. al->al_requested = data_blocks + ind_blocks;
  777. error = gfs2_inplace_reserve(ip);
  778. if (error) {
  779. if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
  780. bytes >>= 1;
  781. bytes &= bsize_mask;
  782. if (bytes == 0)
  783. bytes = sdp->sd_sb.sb_bsize;
  784. goto retry;
  785. }
  786. goto out_qunlock;
  787. }
  788. max_bytes = bytes;
  789. calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
  790. al->al_requested = data_blocks + ind_blocks;
  791. rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
  792. RES_RG_HDR + gfs2_rg_blocks(al);
  793. if (gfs2_is_jdata(ip))
  794. rblocks += data_blocks ? data_blocks : 1;
  795. error = gfs2_trans_begin(sdp, rblocks,
  796. PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
  797. if (error)
  798. goto out_trans_fail;
  799. error = fallocate_chunk(inode, offset, max_bytes, mode);
  800. gfs2_trans_end(sdp);
  801. if (error)
  802. goto out_trans_fail;
  803. len -= max_bytes;
  804. offset += max_bytes;
  805. gfs2_inplace_release(ip);
  806. gfs2_quota_unlock(ip);
  807. gfs2_alloc_put(ip);
  808. }
  809. goto out_unlock;
  810. out_trans_fail:
  811. gfs2_inplace_release(ip);
  812. out_qunlock:
  813. gfs2_quota_unlock(ip);
  814. out_alloc_put:
  815. gfs2_alloc_put(ip);
  816. out_unlock:
  817. gfs2_glock_dq(&ip->i_gh);
  818. out_uninit:
  819. gfs2_holder_uninit(&ip->i_gh);
  820. return error;
  821. }
  822. #ifdef CONFIG_GFS2_FS_LOCKING_DLM
  823. /**
  824. * gfs2_setlease - acquire/release a file lease
  825. * @file: the file pointer
  826. * @arg: lease type
  827. * @fl: file lock
  828. *
  829. * We don't currently have a way to enforce a lease across the whole
  830. * cluster; until we do, disable leases (by just returning -EINVAL),
  831. * unless the administrator has requested purely local locking.
  832. *
  833. * Locking: called under lock_flocks
  834. *
  835. * Returns: errno
  836. */
  837. static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
  838. {
  839. return -EINVAL;
  840. }
  841. /**
  842. * gfs2_lock - acquire/release a posix lock on a file
  843. * @file: the file pointer
  844. * @cmd: either modify or retrieve lock state, possibly wait
  845. * @fl: type and range of lock
  846. *
  847. * Returns: errno
  848. */
  849. static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
  850. {
  851. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  852. struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
  853. struct lm_lockstruct *ls = &sdp->sd_lockstruct;
  854. if (!(fl->fl_flags & FL_POSIX))
  855. return -ENOLCK;
  856. if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
  857. return -ENOLCK;
  858. if (cmd == F_CANCELLK) {
  859. /* Hack: */
  860. cmd = F_SETLK;
  861. fl->fl_type = F_UNLCK;
  862. }
  863. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  864. return -EIO;
  865. if (IS_GETLK(cmd))
  866. return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
  867. else if (fl->fl_type == F_UNLCK)
  868. return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
  869. else
  870. return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
  871. }
  872. static int do_flock(struct file *file, int cmd, struct file_lock *fl)
  873. {
  874. struct gfs2_file *fp = file->private_data;
  875. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  876. struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
  877. struct gfs2_glock *gl;
  878. unsigned int state;
  879. int flags;
  880. int error = 0;
  881. state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
  882. flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
  883. mutex_lock(&fp->f_fl_mutex);
  884. gl = fl_gh->gh_gl;
  885. if (gl) {
  886. if (fl_gh->gh_state == state)
  887. goto out;
  888. flock_lock_file_wait(file,
  889. &(struct file_lock){.fl_type = F_UNLCK});
  890. gfs2_glock_dq_wait(fl_gh);
  891. gfs2_holder_reinit(state, flags, fl_gh);
  892. } else {
  893. error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
  894. &gfs2_flock_glops, CREATE, &gl);
  895. if (error)
  896. goto out;
  897. gfs2_holder_init(gl, state, flags, fl_gh);
  898. gfs2_glock_put(gl);
  899. }
  900. error = gfs2_glock_nq(fl_gh);
  901. if (error) {
  902. gfs2_holder_uninit(fl_gh);
  903. if (error == GLR_TRYFAILED)
  904. error = -EAGAIN;
  905. } else {
  906. error = flock_lock_file_wait(file, fl);
  907. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  908. }
  909. out:
  910. mutex_unlock(&fp->f_fl_mutex);
  911. return error;
  912. }
  913. static void do_unflock(struct file *file, struct file_lock *fl)
  914. {
  915. struct gfs2_file *fp = file->private_data;
  916. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  917. mutex_lock(&fp->f_fl_mutex);
  918. flock_lock_file_wait(file, fl);
  919. if (fl_gh->gh_gl) {
  920. gfs2_glock_dq_wait(fl_gh);
  921. gfs2_holder_uninit(fl_gh);
  922. }
  923. mutex_unlock(&fp->f_fl_mutex);
  924. }
  925. /**
  926. * gfs2_flock - acquire/release a flock lock on a file
  927. * @file: the file pointer
  928. * @cmd: either modify or retrieve lock state, possibly wait
  929. * @fl: type and range of lock
  930. *
  931. * Returns: errno
  932. */
  933. static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
  934. {
  935. if (!(fl->fl_flags & FL_FLOCK))
  936. return -ENOLCK;
  937. if (fl->fl_type & LOCK_MAND)
  938. return -EOPNOTSUPP;
  939. if (fl->fl_type == F_UNLCK) {
  940. do_unflock(file, fl);
  941. return 0;
  942. } else {
  943. return do_flock(file, cmd, fl);
  944. }
  945. }
  946. const struct file_operations gfs2_file_fops = {
  947. .llseek = gfs2_llseek,
  948. .read = do_sync_read,
  949. .aio_read = generic_file_aio_read,
  950. .write = do_sync_write,
  951. .aio_write = gfs2_file_aio_write,
  952. .unlocked_ioctl = gfs2_ioctl,
  953. .mmap = gfs2_mmap,
  954. .open = gfs2_open,
  955. .release = gfs2_close,
  956. .fsync = gfs2_fsync,
  957. .lock = gfs2_lock,
  958. .flock = gfs2_flock,
  959. .splice_read = generic_file_splice_read,
  960. .splice_write = generic_file_splice_write,
  961. .setlease = gfs2_setlease,
  962. .fallocate = gfs2_fallocate,
  963. };
  964. const struct file_operations gfs2_dir_fops = {
  965. .readdir = gfs2_readdir,
  966. .unlocked_ioctl = gfs2_ioctl,
  967. .open = gfs2_open,
  968. .release = gfs2_close,
  969. .fsync = gfs2_fsync,
  970. .lock = gfs2_lock,
  971. .flock = gfs2_flock,
  972. .llseek = default_llseek,
  973. };
  974. #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
  975. const struct file_operations gfs2_file_fops_nolock = {
  976. .llseek = gfs2_llseek,
  977. .read = do_sync_read,
  978. .aio_read = generic_file_aio_read,
  979. .write = do_sync_write,
  980. .aio_write = gfs2_file_aio_write,
  981. .unlocked_ioctl = gfs2_ioctl,
  982. .mmap = gfs2_mmap,
  983. .open = gfs2_open,
  984. .release = gfs2_close,
  985. .fsync = gfs2_fsync,
  986. .splice_read = generic_file_splice_read,
  987. .splice_write = generic_file_splice_write,
  988. .setlease = generic_setlease,
  989. .fallocate = gfs2_fallocate,
  990. };
  991. const struct file_operations gfs2_dir_fops_nolock = {
  992. .readdir = gfs2_readdir,
  993. .unlocked_ioctl = gfs2_ioctl,
  994. .open = gfs2_open,
  995. .release = gfs2_close,
  996. .fsync = gfs2_fsync,
  997. .llseek = default_llseek,
  998. };