file.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/uio.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/mm.h>
  17. #include <linux/mount.h>
  18. #include <linux/fs.h>
  19. #include <linux/gfs2_ondisk.h>
  20. #include <linux/ext2_fs.h>
  21. #include <linux/falloc.h>
  22. #include <linux/swap.h>
  23. #include <linux/crc32.h>
  24. #include <linux/writeback.h>
  25. #include <asm/uaccess.h>
  26. #include <linux/dlm.h>
  27. #include <linux/dlm_plock.h>
  28. #include "gfs2.h"
  29. #include "incore.h"
  30. #include "bmap.h"
  31. #include "dir.h"
  32. #include "glock.h"
  33. #include "glops.h"
  34. #include "inode.h"
  35. #include "log.h"
  36. #include "meta_io.h"
  37. #include "quota.h"
  38. #include "rgrp.h"
  39. #include "trans.h"
  40. #include "util.h"
  41. /**
  42. * gfs2_llseek - seek to a location in a file
  43. * @file: the file
  44. * @offset: the offset
  45. * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  46. *
  47. * SEEK_END requires the glock for the file because it references the
  48. * file's size.
  49. *
  50. * Returns: The new offset, or errno
  51. */
  52. static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  53. {
  54. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  55. struct gfs2_holder i_gh;
  56. loff_t error;
  57. switch (origin) {
  58. case SEEK_END: /* These reference inode->i_size */
  59. case SEEK_DATA:
  60. case SEEK_HOLE:
  61. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  62. &i_gh);
  63. if (!error) {
  64. error = generic_file_llseek_unlocked(file, offset, origin);
  65. gfs2_glock_dq_uninit(&i_gh);
  66. }
  67. break;
  68. case SEEK_CUR:
  69. case SEEK_SET:
  70. error = generic_file_llseek_unlocked(file, offset, origin);
  71. break;
  72. default:
  73. error = -EINVAL;
  74. }
  75. return error;
  76. }
  77. /**
  78. * gfs2_readdir - Read directory entries from a directory
  79. * @file: The directory to read from
  80. * @dirent: Buffer for dirents
  81. * @filldir: Function used to do the copying
  82. *
  83. * Returns: errno
  84. */
  85. static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
  86. {
  87. struct inode *dir = file->f_mapping->host;
  88. struct gfs2_inode *dip = GFS2_I(dir);
  89. struct gfs2_holder d_gh;
  90. u64 offset = file->f_pos;
  91. int error;
  92. gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  93. error = gfs2_glock_nq(&d_gh);
  94. if (error) {
  95. gfs2_holder_uninit(&d_gh);
  96. return error;
  97. }
  98. error = gfs2_dir_read(dir, &offset, dirent, filldir);
  99. gfs2_glock_dq_uninit(&d_gh);
  100. file->f_pos = offset;
  101. return error;
  102. }
  103. /**
  104. * fsflags_cvt
  105. * @table: A table of 32 u32 flags
  106. * @val: a 32 bit value to convert
  107. *
  108. * This function can be used to convert between fsflags values and
  109. * GFS2's own flags values.
  110. *
  111. * Returns: the converted flags
  112. */
  113. static u32 fsflags_cvt(const u32 *table, u32 val)
  114. {
  115. u32 res = 0;
  116. while(val) {
  117. if (val & 1)
  118. res |= *table;
  119. table++;
  120. val >>= 1;
  121. }
  122. return res;
  123. }
  124. static const u32 fsflags_to_gfs2[32] = {
  125. [3] = GFS2_DIF_SYNC,
  126. [4] = GFS2_DIF_IMMUTABLE,
  127. [5] = GFS2_DIF_APPENDONLY,
  128. [7] = GFS2_DIF_NOATIME,
  129. [12] = GFS2_DIF_EXHASH,
  130. [14] = GFS2_DIF_INHERIT_JDATA,
  131. };
  132. static const u32 gfs2_to_fsflags[32] = {
  133. [gfs2fl_Sync] = FS_SYNC_FL,
  134. [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
  135. [gfs2fl_AppendOnly] = FS_APPEND_FL,
  136. [gfs2fl_NoAtime] = FS_NOATIME_FL,
  137. [gfs2fl_ExHash] = FS_INDEX_FL,
  138. [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
  139. };
  140. static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
  141. {
  142. struct inode *inode = filp->f_path.dentry->d_inode;
  143. struct gfs2_inode *ip = GFS2_I(inode);
  144. struct gfs2_holder gh;
  145. int error;
  146. u32 fsflags;
  147. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  148. error = gfs2_glock_nq(&gh);
  149. if (error)
  150. return error;
  151. fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
  152. if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
  153. fsflags |= FS_JOURNAL_DATA_FL;
  154. if (put_user(fsflags, ptr))
  155. error = -EFAULT;
  156. gfs2_glock_dq(&gh);
  157. gfs2_holder_uninit(&gh);
  158. return error;
  159. }
  160. void gfs2_set_inode_flags(struct inode *inode)
  161. {
  162. struct gfs2_inode *ip = GFS2_I(inode);
  163. unsigned int flags = inode->i_flags;
  164. flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
  165. if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
  166. inode->i_flags |= S_NOSEC;
  167. if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
  168. flags |= S_IMMUTABLE;
  169. if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
  170. flags |= S_APPEND;
  171. if (ip->i_diskflags & GFS2_DIF_NOATIME)
  172. flags |= S_NOATIME;
  173. if (ip->i_diskflags & GFS2_DIF_SYNC)
  174. flags |= S_SYNC;
  175. inode->i_flags = flags;
  176. }
  177. /* Flags that can be set by user space */
  178. #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
  179. GFS2_DIF_IMMUTABLE| \
  180. GFS2_DIF_APPENDONLY| \
  181. GFS2_DIF_NOATIME| \
  182. GFS2_DIF_SYNC| \
  183. GFS2_DIF_SYSTEM| \
  184. GFS2_DIF_INHERIT_JDATA)
  185. /**
  186. * gfs2_set_flags - set flags on an inode
  187. * @inode: The inode
  188. * @flags: The flags to set
  189. * @mask: Indicates which flags are valid
  190. *
  191. */
  192. static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
  193. {
  194. struct inode *inode = filp->f_path.dentry->d_inode;
  195. struct gfs2_inode *ip = GFS2_I(inode);
  196. struct gfs2_sbd *sdp = GFS2_SB(inode);
  197. struct buffer_head *bh;
  198. struct gfs2_holder gh;
  199. int error;
  200. u32 new_flags, flags;
  201. error = mnt_want_write(filp->f_path.mnt);
  202. if (error)
  203. return error;
  204. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  205. if (error)
  206. goto out_drop_write;
  207. error = -EACCES;
  208. if (!inode_owner_or_capable(inode))
  209. goto out;
  210. error = 0;
  211. flags = ip->i_diskflags;
  212. new_flags = (flags & ~mask) | (reqflags & mask);
  213. if ((new_flags ^ flags) == 0)
  214. goto out;
  215. error = -EINVAL;
  216. if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
  217. goto out;
  218. error = -EPERM;
  219. if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
  220. goto out;
  221. if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
  222. goto out;
  223. if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
  224. !capable(CAP_LINUX_IMMUTABLE))
  225. goto out;
  226. if (!IS_IMMUTABLE(inode)) {
  227. error = gfs2_permission(inode, MAY_WRITE);
  228. if (error)
  229. goto out;
  230. }
  231. if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
  232. if (flags & GFS2_DIF_JDATA)
  233. gfs2_log_flush(sdp, ip->i_gl);
  234. error = filemap_fdatawrite(inode->i_mapping);
  235. if (error)
  236. goto out;
  237. error = filemap_fdatawait(inode->i_mapping);
  238. if (error)
  239. goto out;
  240. }
  241. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  242. if (error)
  243. goto out;
  244. error = gfs2_meta_inode_buffer(ip, &bh);
  245. if (error)
  246. goto out_trans_end;
  247. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  248. ip->i_diskflags = new_flags;
  249. gfs2_dinode_out(ip, bh->b_data);
  250. brelse(bh);
  251. gfs2_set_inode_flags(inode);
  252. gfs2_set_aops(inode);
  253. out_trans_end:
  254. gfs2_trans_end(sdp);
  255. out:
  256. gfs2_glock_dq_uninit(&gh);
  257. out_drop_write:
  258. mnt_drop_write(filp->f_path.mnt);
  259. return error;
  260. }
  261. static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
  262. {
  263. struct inode *inode = filp->f_path.dentry->d_inode;
  264. u32 fsflags, gfsflags;
  265. if (get_user(fsflags, ptr))
  266. return -EFAULT;
  267. gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
  268. if (!S_ISDIR(inode->i_mode)) {
  269. if (gfsflags & GFS2_DIF_INHERIT_JDATA)
  270. gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
  271. return do_gfs2_set_flags(filp, gfsflags, ~0);
  272. }
  273. return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
  274. }
  275. static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  276. {
  277. switch(cmd) {
  278. case FS_IOC_GETFLAGS:
  279. return gfs2_get_flags(filp, (u32 __user *)arg);
  280. case FS_IOC_SETFLAGS:
  281. return gfs2_set_flags(filp, (u32 __user *)arg);
  282. }
  283. return -ENOTTY;
  284. }
  285. /**
  286. * gfs2_allocate_page_backing - Use bmap to allocate blocks
  287. * @page: The (locked) page to allocate backing for
  288. *
  289. * We try to allocate all the blocks required for the page in
  290. * one go. This might fail for various reasons, so we keep
  291. * trying until all the blocks to back this page are allocated.
  292. * If some of the blocks are already allocated, thats ok too.
  293. */
  294. static int gfs2_allocate_page_backing(struct page *page)
  295. {
  296. struct inode *inode = page->mapping->host;
  297. struct buffer_head bh;
  298. unsigned long size = PAGE_CACHE_SIZE;
  299. u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  300. do {
  301. bh.b_state = 0;
  302. bh.b_size = size;
  303. gfs2_block_map(inode, lblock, &bh, 1);
  304. if (!buffer_mapped(&bh))
  305. return -EIO;
  306. size -= bh.b_size;
  307. lblock += (bh.b_size >> inode->i_blkbits);
  308. } while(size > 0);
  309. return 0;
  310. }
  311. /**
  312. * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
  313. * @vma: The virtual memory area
  314. * @page: The page which is about to become writable
  315. *
  316. * When the page becomes writable, we need to ensure that we have
  317. * blocks allocated on disk to back that page.
  318. */
  319. static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  320. {
  321. struct page *page = vmf->page;
  322. struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
  323. struct gfs2_inode *ip = GFS2_I(inode);
  324. struct gfs2_sbd *sdp = GFS2_SB(inode);
  325. unsigned long last_index;
  326. u64 pos = page->index << PAGE_CACHE_SHIFT;
  327. unsigned int data_blocks, ind_blocks, rblocks;
  328. struct gfs2_holder gh;
  329. struct gfs2_alloc *al;
  330. int ret;
  331. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  332. ret = gfs2_glock_nq(&gh);
  333. if (ret)
  334. goto out;
  335. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  336. set_bit(GIF_SW_PAGED, &ip->i_flags);
  337. if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
  338. goto out_unlock;
  339. ret = -ENOMEM;
  340. al = gfs2_alloc_get(ip);
  341. if (al == NULL)
  342. goto out_unlock;
  343. ret = gfs2_quota_lock_check(ip);
  344. if (ret)
  345. goto out_alloc_put;
  346. gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
  347. al->al_requested = data_blocks + ind_blocks;
  348. ret = gfs2_inplace_reserve(ip);
  349. if (ret)
  350. goto out_quota_unlock;
  351. rblocks = RES_DINODE + ind_blocks;
  352. if (gfs2_is_jdata(ip))
  353. rblocks += data_blocks ? data_blocks : 1;
  354. if (ind_blocks || data_blocks) {
  355. rblocks += RES_STATFS + RES_QUOTA;
  356. rblocks += gfs2_rg_blocks(ip);
  357. }
  358. ret = gfs2_trans_begin(sdp, rblocks, 0);
  359. if (ret)
  360. goto out_trans_fail;
  361. lock_page(page);
  362. ret = -EINVAL;
  363. last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
  364. if (page->index > last_index)
  365. goto out_unlock_page;
  366. ret = 0;
  367. if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
  368. goto out_unlock_page;
  369. if (gfs2_is_stuffed(ip)) {
  370. ret = gfs2_unstuff_dinode(ip, page);
  371. if (ret)
  372. goto out_unlock_page;
  373. }
  374. ret = gfs2_allocate_page_backing(page);
  375. out_unlock_page:
  376. unlock_page(page);
  377. gfs2_trans_end(sdp);
  378. out_trans_fail:
  379. gfs2_inplace_release(ip);
  380. out_quota_unlock:
  381. gfs2_quota_unlock(ip);
  382. out_alloc_put:
  383. gfs2_alloc_put(ip);
  384. out_unlock:
  385. gfs2_glock_dq(&gh);
  386. out:
  387. gfs2_holder_uninit(&gh);
  388. if (ret == -ENOMEM)
  389. ret = VM_FAULT_OOM;
  390. else if (ret)
  391. ret = VM_FAULT_SIGBUS;
  392. return ret;
  393. }
  394. static const struct vm_operations_struct gfs2_vm_ops = {
  395. .fault = filemap_fault,
  396. .page_mkwrite = gfs2_page_mkwrite,
  397. };
  398. /**
  399. * gfs2_mmap -
  400. * @file: The file to map
  401. * @vma: The VMA which described the mapping
  402. *
  403. * There is no need to get a lock here unless we should be updating
  404. * atime. We ignore any locking errors since the only consequence is
  405. * a missed atime update (which will just be deferred until later).
  406. *
  407. * Returns: 0
  408. */
  409. static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
  410. {
  411. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  412. if (!(file->f_flags & O_NOATIME) &&
  413. !IS_NOATIME(&ip->i_inode)) {
  414. struct gfs2_holder i_gh;
  415. int error;
  416. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  417. error = gfs2_glock_nq(&i_gh);
  418. if (error == 0) {
  419. file_accessed(file);
  420. gfs2_glock_dq(&i_gh);
  421. }
  422. gfs2_holder_uninit(&i_gh);
  423. if (error)
  424. return error;
  425. }
  426. vma->vm_ops = &gfs2_vm_ops;
  427. vma->vm_flags |= VM_CAN_NONLINEAR;
  428. return 0;
  429. }
  430. /**
  431. * gfs2_open - open a file
  432. * @inode: the inode to open
  433. * @file: the struct file for this opening
  434. *
  435. * Returns: errno
  436. */
  437. static int gfs2_open(struct inode *inode, struct file *file)
  438. {
  439. struct gfs2_inode *ip = GFS2_I(inode);
  440. struct gfs2_holder i_gh;
  441. struct gfs2_file *fp;
  442. int error;
  443. fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
  444. if (!fp)
  445. return -ENOMEM;
  446. mutex_init(&fp->f_fl_mutex);
  447. gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
  448. file->private_data = fp;
  449. if (S_ISREG(ip->i_inode.i_mode)) {
  450. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  451. &i_gh);
  452. if (error)
  453. goto fail;
  454. if (!(file->f_flags & O_LARGEFILE) &&
  455. i_size_read(inode) > MAX_NON_LFS) {
  456. error = -EOVERFLOW;
  457. goto fail_gunlock;
  458. }
  459. gfs2_glock_dq_uninit(&i_gh);
  460. }
  461. return 0;
  462. fail_gunlock:
  463. gfs2_glock_dq_uninit(&i_gh);
  464. fail:
  465. file->private_data = NULL;
  466. kfree(fp);
  467. return error;
  468. }
  469. /**
  470. * gfs2_close - called to close a struct file
  471. * @inode: the inode the struct file belongs to
  472. * @file: the struct file being closed
  473. *
  474. * Returns: errno
  475. */
  476. static int gfs2_close(struct inode *inode, struct file *file)
  477. {
  478. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  479. struct gfs2_file *fp;
  480. fp = file->private_data;
  481. file->private_data = NULL;
  482. if (gfs2_assert_warn(sdp, fp))
  483. return -EIO;
  484. kfree(fp);
  485. return 0;
  486. }
  487. /**
  488. * gfs2_fsync - sync the dirty data for a file (across the cluster)
  489. * @file: the file that points to the dentry
  490. * @start: the start position in the file to sync
  491. * @end: the end position in the file to sync
  492. * @datasync: set if we can ignore timestamp changes
  493. *
  494. * We split the data flushing here so that we don't wait for the data
  495. * until after we've also sent the metadata to disk. Note that for
  496. * data=ordered, we will write & wait for the data at the log flush
  497. * stage anyway, so this is unlikely to make much of a difference
  498. * except in the data=writeback case.
  499. *
  500. * If the fdatawrite fails due to any reason except -EIO, we will
  501. * continue the remainder of the fsync, although we'll still report
  502. * the error at the end. This is to match filemap_write_and_wait_range()
  503. * behaviour.
  504. *
  505. * Returns: errno
  506. */
  507. static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
  508. int datasync)
  509. {
  510. struct address_space *mapping = file->f_mapping;
  511. struct inode *inode = mapping->host;
  512. int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
  513. struct gfs2_inode *ip = GFS2_I(inode);
  514. int ret, ret1 = 0;
  515. if (mapping->nrpages) {
  516. ret1 = filemap_fdatawrite_range(mapping, start, end);
  517. if (ret1 == -EIO)
  518. return ret1;
  519. }
  520. if (datasync)
  521. sync_state &= ~I_DIRTY_SYNC;
  522. if (sync_state) {
  523. ret = sync_inode_metadata(inode, 1);
  524. if (ret)
  525. return ret;
  526. if (gfs2_is_jdata(ip))
  527. filemap_write_and_wait(mapping);
  528. gfs2_ail_flush(ip->i_gl, 1);
  529. }
  530. if (mapping->nrpages)
  531. ret = filemap_fdatawait_range(mapping, start, end);
  532. return ret ? ret : ret1;
  533. }
  534. /**
  535. * gfs2_file_aio_write - Perform a write to a file
  536. * @iocb: The io context
  537. * @iov: The data to write
  538. * @nr_segs: Number of @iov segments
  539. * @pos: The file position
  540. *
  541. * We have to do a lock/unlock here to refresh the inode size for
  542. * O_APPEND writes, otherwise we can land up writing at the wrong
  543. * offset. There is still a race, but provided the app is using its
  544. * own file locking, this will make O_APPEND work as expected.
  545. *
  546. */
  547. static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
  548. unsigned long nr_segs, loff_t pos)
  549. {
  550. struct file *file = iocb->ki_filp;
  551. if (file->f_flags & O_APPEND) {
  552. struct dentry *dentry = file->f_dentry;
  553. struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
  554. struct gfs2_holder gh;
  555. int ret;
  556. ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  557. if (ret)
  558. return ret;
  559. gfs2_glock_dq_uninit(&gh);
  560. }
  561. return generic_file_aio_write(iocb, iov, nr_segs, pos);
  562. }
  563. static int empty_write_end(struct page *page, unsigned from,
  564. unsigned to, int mode)
  565. {
  566. struct inode *inode = page->mapping->host;
  567. struct gfs2_inode *ip = GFS2_I(inode);
  568. struct buffer_head *bh;
  569. unsigned offset, blksize = 1 << inode->i_blkbits;
  570. pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
  571. zero_user(page, from, to-from);
  572. mark_page_accessed(page);
  573. if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
  574. if (!gfs2_is_writeback(ip))
  575. gfs2_page_add_databufs(ip, page, from, to);
  576. block_commit_write(page, from, to);
  577. return 0;
  578. }
  579. offset = 0;
  580. bh = page_buffers(page);
  581. while (offset < to) {
  582. if (offset >= from) {
  583. set_buffer_uptodate(bh);
  584. mark_buffer_dirty(bh);
  585. clear_buffer_new(bh);
  586. write_dirty_buffer(bh, WRITE);
  587. }
  588. offset += blksize;
  589. bh = bh->b_this_page;
  590. }
  591. offset = 0;
  592. bh = page_buffers(page);
  593. while (offset < to) {
  594. if (offset >= from) {
  595. wait_on_buffer(bh);
  596. if (!buffer_uptodate(bh))
  597. return -EIO;
  598. }
  599. offset += blksize;
  600. bh = bh->b_this_page;
  601. }
  602. return 0;
  603. }
  604. static int needs_empty_write(sector_t block, struct inode *inode)
  605. {
  606. int error;
  607. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  608. bh_map.b_size = 1 << inode->i_blkbits;
  609. error = gfs2_block_map(inode, block, &bh_map, 0);
  610. if (unlikely(error))
  611. return error;
  612. return !buffer_mapped(&bh_map);
  613. }
  614. static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
  615. int mode)
  616. {
  617. struct inode *inode = page->mapping->host;
  618. unsigned start, end, next, blksize;
  619. sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  620. int ret;
  621. blksize = 1 << inode->i_blkbits;
  622. next = end = 0;
  623. while (next < from) {
  624. next += blksize;
  625. block++;
  626. }
  627. start = next;
  628. do {
  629. next += blksize;
  630. ret = needs_empty_write(block, inode);
  631. if (unlikely(ret < 0))
  632. return ret;
  633. if (ret == 0) {
  634. if (end) {
  635. ret = __block_write_begin(page, start, end - start,
  636. gfs2_block_map);
  637. if (unlikely(ret))
  638. return ret;
  639. ret = empty_write_end(page, start, end, mode);
  640. if (unlikely(ret))
  641. return ret;
  642. end = 0;
  643. }
  644. start = next;
  645. }
  646. else
  647. end = next;
  648. block++;
  649. } while (next < to);
  650. if (end) {
  651. ret = __block_write_begin(page, start, end - start, gfs2_block_map);
  652. if (unlikely(ret))
  653. return ret;
  654. ret = empty_write_end(page, start, end, mode);
  655. if (unlikely(ret))
  656. return ret;
  657. }
  658. return 0;
  659. }
  660. static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
  661. int mode)
  662. {
  663. struct gfs2_inode *ip = GFS2_I(inode);
  664. struct buffer_head *dibh;
  665. int error;
  666. u64 start = offset >> PAGE_CACHE_SHIFT;
  667. unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
  668. u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
  669. pgoff_t curr;
  670. struct page *page;
  671. unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
  672. unsigned int from, to;
  673. if (!end_offset)
  674. end_offset = PAGE_CACHE_SIZE;
  675. error = gfs2_meta_inode_buffer(ip, &dibh);
  676. if (unlikely(error))
  677. goto out;
  678. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  679. if (gfs2_is_stuffed(ip)) {
  680. error = gfs2_unstuff_dinode(ip, NULL);
  681. if (unlikely(error))
  682. goto out;
  683. }
  684. curr = start;
  685. offset = start << PAGE_CACHE_SHIFT;
  686. from = start_offset;
  687. to = PAGE_CACHE_SIZE;
  688. while (curr <= end) {
  689. page = grab_cache_page_write_begin(inode->i_mapping, curr,
  690. AOP_FLAG_NOFS);
  691. if (unlikely(!page)) {
  692. error = -ENOMEM;
  693. goto out;
  694. }
  695. if (curr == end)
  696. to = end_offset;
  697. error = write_empty_blocks(page, from, to, mode);
  698. if (!error && offset + to > inode->i_size &&
  699. !(mode & FALLOC_FL_KEEP_SIZE)) {
  700. i_size_write(inode, offset + to);
  701. }
  702. unlock_page(page);
  703. page_cache_release(page);
  704. if (error)
  705. goto out;
  706. curr++;
  707. offset += PAGE_CACHE_SIZE;
  708. from = 0;
  709. }
  710. mark_inode_dirty(inode);
  711. brelse(dibh);
  712. out:
  713. return error;
  714. }
  715. static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
  716. unsigned int *data_blocks, unsigned int *ind_blocks)
  717. {
  718. const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  719. unsigned int max_blocks = ip->i_rgd->rd_free_clone;
  720. unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
  721. for (tmp = max_data; tmp > sdp->sd_diptrs;) {
  722. tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
  723. max_data -= tmp;
  724. }
  725. /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
  726. so it might end up with fewer data blocks */
  727. if (max_data <= *data_blocks)
  728. return;
  729. *data_blocks = max_data;
  730. *ind_blocks = max_blocks - max_data;
  731. *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
  732. if (*len > max) {
  733. *len = max;
  734. gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
  735. }
  736. }
  737. static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
  738. loff_t len)
  739. {
  740. struct inode *inode = file->f_path.dentry->d_inode;
  741. struct gfs2_sbd *sdp = GFS2_SB(inode);
  742. struct gfs2_inode *ip = GFS2_I(inode);
  743. unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
  744. loff_t bytes, max_bytes;
  745. struct gfs2_alloc *al;
  746. int error;
  747. loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
  748. loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
  749. next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
  750. /* We only support the FALLOC_FL_KEEP_SIZE mode */
  751. if (mode & ~FALLOC_FL_KEEP_SIZE)
  752. return -EOPNOTSUPP;
  753. offset &= bsize_mask;
  754. len = next - offset;
  755. bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
  756. if (!bytes)
  757. bytes = UINT_MAX;
  758. bytes &= bsize_mask;
  759. if (bytes == 0)
  760. bytes = sdp->sd_sb.sb_bsize;
  761. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
  762. error = gfs2_glock_nq(&ip->i_gh);
  763. if (unlikely(error))
  764. goto out_uninit;
  765. if (!gfs2_write_alloc_required(ip, offset, len))
  766. goto out_unlock;
  767. while (len > 0) {
  768. if (len < bytes)
  769. bytes = len;
  770. al = gfs2_alloc_get(ip);
  771. if (!al) {
  772. error = -ENOMEM;
  773. goto out_unlock;
  774. }
  775. error = gfs2_quota_lock_check(ip);
  776. if (error)
  777. goto out_alloc_put;
  778. retry:
  779. gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
  780. al->al_requested = data_blocks + ind_blocks;
  781. error = gfs2_inplace_reserve(ip);
  782. if (error) {
  783. if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
  784. bytes >>= 1;
  785. bytes &= bsize_mask;
  786. if (bytes == 0)
  787. bytes = sdp->sd_sb.sb_bsize;
  788. goto retry;
  789. }
  790. goto out_qunlock;
  791. }
  792. max_bytes = bytes;
  793. calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
  794. al->al_requested = data_blocks + ind_blocks;
  795. rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
  796. RES_RG_HDR + gfs2_rg_blocks(ip);
  797. if (gfs2_is_jdata(ip))
  798. rblocks += data_blocks ? data_blocks : 1;
  799. error = gfs2_trans_begin(sdp, rblocks,
  800. PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
  801. if (error)
  802. goto out_trans_fail;
  803. error = fallocate_chunk(inode, offset, max_bytes, mode);
  804. gfs2_trans_end(sdp);
  805. if (error)
  806. goto out_trans_fail;
  807. len -= max_bytes;
  808. offset += max_bytes;
  809. gfs2_inplace_release(ip);
  810. gfs2_quota_unlock(ip);
  811. gfs2_alloc_put(ip);
  812. }
  813. goto out_unlock;
  814. out_trans_fail:
  815. gfs2_inplace_release(ip);
  816. out_qunlock:
  817. gfs2_quota_unlock(ip);
  818. out_alloc_put:
  819. gfs2_alloc_put(ip);
  820. out_unlock:
  821. gfs2_glock_dq(&ip->i_gh);
  822. out_uninit:
  823. gfs2_holder_uninit(&ip->i_gh);
  824. return error;
  825. }
  826. #ifdef CONFIG_GFS2_FS_LOCKING_DLM
  827. /**
  828. * gfs2_setlease - acquire/release a file lease
  829. * @file: the file pointer
  830. * @arg: lease type
  831. * @fl: file lock
  832. *
  833. * We don't currently have a way to enforce a lease across the whole
  834. * cluster; until we do, disable leases (by just returning -EINVAL),
  835. * unless the administrator has requested purely local locking.
  836. *
  837. * Locking: called under lock_flocks
  838. *
  839. * Returns: errno
  840. */
  841. static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
  842. {
  843. return -EINVAL;
  844. }
  845. /**
  846. * gfs2_lock - acquire/release a posix lock on a file
  847. * @file: the file pointer
  848. * @cmd: either modify or retrieve lock state, possibly wait
  849. * @fl: type and range of lock
  850. *
  851. * Returns: errno
  852. */
  853. static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
  854. {
  855. struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  856. struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
  857. struct lm_lockstruct *ls = &sdp->sd_lockstruct;
  858. if (!(fl->fl_flags & FL_POSIX))
  859. return -ENOLCK;
  860. if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
  861. return -ENOLCK;
  862. if (cmd == F_CANCELLK) {
  863. /* Hack: */
  864. cmd = F_SETLK;
  865. fl->fl_type = F_UNLCK;
  866. }
  867. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  868. return -EIO;
  869. if (IS_GETLK(cmd))
  870. return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
  871. else if (fl->fl_type == F_UNLCK)
  872. return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
  873. else
  874. return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
  875. }
  876. static int do_flock(struct file *file, int cmd, struct file_lock *fl)
  877. {
  878. struct gfs2_file *fp = file->private_data;
  879. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  880. struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
  881. struct gfs2_glock *gl;
  882. unsigned int state;
  883. int flags;
  884. int error = 0;
  885. state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
  886. flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
  887. mutex_lock(&fp->f_fl_mutex);
  888. gl = fl_gh->gh_gl;
  889. if (gl) {
  890. if (fl_gh->gh_state == state)
  891. goto out;
  892. flock_lock_file_wait(file,
  893. &(struct file_lock){.fl_type = F_UNLCK});
  894. gfs2_glock_dq_wait(fl_gh);
  895. gfs2_holder_reinit(state, flags, fl_gh);
  896. } else {
  897. error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
  898. &gfs2_flock_glops, CREATE, &gl);
  899. if (error)
  900. goto out;
  901. gfs2_holder_init(gl, state, flags, fl_gh);
  902. gfs2_glock_put(gl);
  903. }
  904. error = gfs2_glock_nq(fl_gh);
  905. if (error) {
  906. gfs2_holder_uninit(fl_gh);
  907. if (error == GLR_TRYFAILED)
  908. error = -EAGAIN;
  909. } else {
  910. error = flock_lock_file_wait(file, fl);
  911. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  912. }
  913. out:
  914. mutex_unlock(&fp->f_fl_mutex);
  915. return error;
  916. }
  917. static void do_unflock(struct file *file, struct file_lock *fl)
  918. {
  919. struct gfs2_file *fp = file->private_data;
  920. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  921. mutex_lock(&fp->f_fl_mutex);
  922. flock_lock_file_wait(file, fl);
  923. if (fl_gh->gh_gl) {
  924. gfs2_glock_dq_wait(fl_gh);
  925. gfs2_holder_uninit(fl_gh);
  926. }
  927. mutex_unlock(&fp->f_fl_mutex);
  928. }
  929. /**
  930. * gfs2_flock - acquire/release a flock lock on a file
  931. * @file: the file pointer
  932. * @cmd: either modify or retrieve lock state, possibly wait
  933. * @fl: type and range of lock
  934. *
  935. * Returns: errno
  936. */
  937. static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
  938. {
  939. if (!(fl->fl_flags & FL_FLOCK))
  940. return -ENOLCK;
  941. if (fl->fl_type & LOCK_MAND)
  942. return -EOPNOTSUPP;
  943. if (fl->fl_type == F_UNLCK) {
  944. do_unflock(file, fl);
  945. return 0;
  946. } else {
  947. return do_flock(file, cmd, fl);
  948. }
  949. }
  950. const struct file_operations gfs2_file_fops = {
  951. .llseek = gfs2_llseek,
  952. .read = do_sync_read,
  953. .aio_read = generic_file_aio_read,
  954. .write = do_sync_write,
  955. .aio_write = gfs2_file_aio_write,
  956. .unlocked_ioctl = gfs2_ioctl,
  957. .mmap = gfs2_mmap,
  958. .open = gfs2_open,
  959. .release = gfs2_close,
  960. .fsync = gfs2_fsync,
  961. .lock = gfs2_lock,
  962. .flock = gfs2_flock,
  963. .splice_read = generic_file_splice_read,
  964. .splice_write = generic_file_splice_write,
  965. .setlease = gfs2_setlease,
  966. .fallocate = gfs2_fallocate,
  967. };
  968. const struct file_operations gfs2_dir_fops = {
  969. .readdir = gfs2_readdir,
  970. .unlocked_ioctl = gfs2_ioctl,
  971. .open = gfs2_open,
  972. .release = gfs2_close,
  973. .fsync = gfs2_fsync,
  974. .lock = gfs2_lock,
  975. .flock = gfs2_flock,
  976. .llseek = default_llseek,
  977. };
  978. #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
  979. const struct file_operations gfs2_file_fops_nolock = {
  980. .llseek = gfs2_llseek,
  981. .read = do_sync_read,
  982. .aio_read = generic_file_aio_read,
  983. .write = do_sync_write,
  984. .aio_write = gfs2_file_aio_write,
  985. .unlocked_ioctl = gfs2_ioctl,
  986. .mmap = gfs2_mmap,
  987. .open = gfs2_open,
  988. .release = gfs2_close,
  989. .fsync = gfs2_fsync,
  990. .splice_read = generic_file_splice_read,
  991. .splice_write = generic_file_splice_write,
  992. .setlease = generic_setlease,
  993. .fallocate = gfs2_fallocate,
  994. };
  995. const struct file_operations gfs2_dir_fops_nolock = {
  996. .readdir = gfs2_readdir,
  997. .unlocked_ioctl = gfs2_ioctl,
  998. .open = gfs2_open,
  999. .release = gfs2_close,
  1000. .fsync = gfs2_fsync,
  1001. .llseek = default_llseek,
  1002. };