ops_file.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/uio.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/mm.h>
  18. #include <linux/smp_lock.h>
  19. #include <linux/fs.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/ext2_fs.h>
  22. #include <linux/crc32.h>
  23. #include <asm/semaphore.h>
  24. #include <asm/uaccess.h>
  25. #include "gfs2.h"
  26. #include "lm_interface.h"
  27. #include "incore.h"
  28. #include "bmap.h"
  29. #include "dir.h"
  30. #include "glock.h"
  31. #include "glops.h"
  32. #include "inode.h"
  33. #include "lm.h"
  34. #include "log.h"
  35. #include "meta_io.h"
  36. #include "ops_file.h"
  37. #include "ops_vm.h"
  38. #include "quota.h"
  39. #include "rgrp.h"
  40. #include "trans.h"
  41. #include "util.h"
  42. #include "eaops.h"
  43. /* "bad" is for NFS support */
  44. struct filldir_bad_entry {
  45. char *fbe_name;
  46. unsigned int fbe_length;
  47. uint64_t fbe_offset;
  48. struct gfs2_inum fbe_inum;
  49. unsigned int fbe_type;
  50. };
  51. struct filldir_bad {
  52. struct gfs2_sbd *fdb_sbd;
  53. struct filldir_bad_entry *fdb_entry;
  54. unsigned int fdb_entry_num;
  55. unsigned int fdb_entry_off;
  56. char *fdb_name;
  57. unsigned int fdb_name_size;
  58. unsigned int fdb_name_off;
  59. };
  60. /* For regular, non-NFS */
  61. struct filldir_reg {
  62. struct gfs2_sbd *fdr_sbd;
  63. int fdr_prefetch;
  64. filldir_t fdr_filldir;
  65. void *fdr_opaque;
  66. };
  67. /*
  68. * Most fields left uninitialised to catch anybody who tries to
  69. * use them. f_flags set to prevent file_accessed() from touching
  70. * any other part of this. Its use is purely as a flag so that we
  71. * know (in readpage()) whether or not do to locking.
  72. */
  73. struct file gfs2_internal_file_sentinal = {
  74. .f_flags = O_NOATIME|O_RDONLY,
  75. };
  76. static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
  77. unsigned long offset, unsigned long size)
  78. {
  79. char *kaddr;
  80. unsigned long count = desc->count;
  81. if (size > count)
  82. size = count;
  83. kaddr = kmap(page);
  84. memcpy(desc->arg.buf, kaddr + offset, size);
  85. kunmap(page);
  86. desc->count = count - size;
  87. desc->written += size;
  88. desc->arg.buf += size;
  89. return size;
  90. }
  91. int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
  92. char *buf, loff_t *pos, unsigned size)
  93. {
  94. struct inode *inode = ip->i_vnode;
  95. read_descriptor_t desc;
  96. desc.written = 0;
  97. desc.arg.buf = buf;
  98. desc.count = size;
  99. desc.error = 0;
  100. do_generic_mapping_read(inode->i_mapping, ra_state,
  101. &gfs2_internal_file_sentinal, pos, &desc,
  102. gfs2_read_actor);
  103. return desc.written ? desc.written : desc.error;
  104. }
  105. /**
  106. * gfs2_llseek - seek to a location in a file
  107. * @file: the file
  108. * @offset: the offset
  109. * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  110. *
  111. * SEEK_END requires the glock for the file because it references the
  112. * file's size.
  113. *
  114. * Returns: The new offset, or errno
  115. */
  116. static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  117. {
  118. struct gfs2_inode *ip = file->f_mapping->host->u.generic_ip;
  119. struct gfs2_holder i_gh;
  120. loff_t error;
  121. if (origin == 2) {
  122. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  123. &i_gh);
  124. if (!error) {
  125. error = remote_llseek(file, offset, origin);
  126. gfs2_glock_dq_uninit(&i_gh);
  127. }
  128. } else
  129. error = remote_llseek(file, offset, origin);
  130. return error;
  131. }
  132. static ssize_t gfs2_direct_IO_read(struct kiocb *iocb, const struct iovec *iov,
  133. loff_t offset, unsigned long nr_segs)
  134. {
  135. struct file *file = iocb->ki_filp;
  136. struct address_space *mapping = file->f_mapping;
  137. ssize_t retval;
  138. retval = filemap_write_and_wait(mapping);
  139. if (retval == 0) {
  140. retval = mapping->a_ops->direct_IO(READ, iocb, iov, offset,
  141. nr_segs);
  142. }
  143. return retval;
  144. }
  145. /**
  146. * __gfs2_file_aio_read - The main GFS2 read function
  147. *
  148. * N.B. This is almost, but not quite the same as __generic_file_aio_read()
  149. * the important subtle different being that inode->i_size isn't valid
  150. * unless we are holding a lock, and we do this _only_ on the O_DIRECT
  151. * path since otherwise locking is done entirely at the page cache
  152. * layer.
  153. */
  154. static ssize_t __gfs2_file_aio_read(struct kiocb *iocb,
  155. const struct iovec *iov,
  156. unsigned long nr_segs, loff_t *ppos)
  157. {
  158. struct file *filp = iocb->ki_filp;
  159. struct gfs2_inode *ip = filp->f_mapping->host->u.generic_ip;
  160. struct gfs2_holder gh;
  161. ssize_t retval;
  162. unsigned long seg;
  163. size_t count;
  164. count = 0;
  165. for (seg = 0; seg < nr_segs; seg++) {
  166. const struct iovec *iv = &iov[seg];
  167. /*
  168. * If any segment has a negative length, or the cumulative
  169. * length ever wraps negative then return -EINVAL.
  170. */
  171. count += iv->iov_len;
  172. if (unlikely((ssize_t)(count|iv->iov_len) < 0))
  173. return -EINVAL;
  174. if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
  175. continue;
  176. if (seg == 0)
  177. return -EFAULT;
  178. nr_segs = seg;
  179. count -= iv->iov_len; /* This segment is no good */
  180. break;
  181. }
  182. /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
  183. if (filp->f_flags & O_DIRECT) {
  184. loff_t pos = *ppos, size;
  185. struct address_space *mapping;
  186. struct inode *inode;
  187. mapping = filp->f_mapping;
  188. inode = mapping->host;
  189. retval = 0;
  190. if (!count)
  191. goto out; /* skip atime */
  192. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
  193. retval = gfs2_glock_nq_m_atime(1, &gh);
  194. if (retval)
  195. goto out;
  196. if (gfs2_is_stuffed(ip)) {
  197. gfs2_glock_dq_m(1, &gh);
  198. gfs2_holder_uninit(&gh);
  199. goto fallback_to_normal;
  200. }
  201. size = i_size_read(inode);
  202. if (pos < size) {
  203. retval = gfs2_direct_IO_read(iocb, iov, pos, nr_segs);
  204. if (retval > 0 && !is_sync_kiocb(iocb))
  205. retval = -EIOCBQUEUED;
  206. if (retval > 0)
  207. *ppos = pos + retval;
  208. }
  209. file_accessed(filp);
  210. gfs2_glock_dq_m(1, &gh);
  211. gfs2_holder_uninit(&gh);
  212. goto out;
  213. }
  214. fallback_to_normal:
  215. retval = 0;
  216. if (count) {
  217. for (seg = 0; seg < nr_segs; seg++) {
  218. read_descriptor_t desc;
  219. desc.written = 0;
  220. desc.arg.buf = iov[seg].iov_base;
  221. desc.count = iov[seg].iov_len;
  222. if (desc.count == 0)
  223. continue;
  224. desc.error = 0;
  225. do_generic_file_read(filp,ppos,&desc,file_read_actor);
  226. retval += desc.written;
  227. if (desc.error) {
  228. retval = retval ?: desc.error;
  229. break;
  230. }
  231. }
  232. }
  233. out:
  234. return retval;
  235. }
  236. /**
  237. * gfs2_read - Read bytes from a file
  238. * @file: The file to read from
  239. * @buf: The buffer to copy into
  240. * @size: The amount of data requested
  241. * @offset: The current file offset
  242. *
  243. * Outputs: Offset - updated according to number of bytes read
  244. *
  245. * Returns: The number of bytes read, errno on failure
  246. */
  247. static ssize_t gfs2_read(struct file *filp, char __user *buf, size_t size,
  248. loff_t *offset)
  249. {
  250. struct iovec local_iov = { .iov_base = buf, .iov_len = size };
  251. struct kiocb kiocb;
  252. ssize_t ret;
  253. init_sync_kiocb(&kiocb, filp);
  254. ret = __gfs2_file_aio_read(&kiocb, &local_iov, 1, offset);
  255. if (-EIOCBQUEUED == ret)
  256. ret = wait_on_sync_kiocb(&kiocb);
  257. return ret;
  258. }
  259. static ssize_t gfs2_file_readv(struct file *filp, const struct iovec *iov,
  260. unsigned long nr_segs, loff_t *ppos)
  261. {
  262. struct kiocb kiocb;
  263. ssize_t ret;
  264. init_sync_kiocb(&kiocb, filp);
  265. ret = __gfs2_file_aio_read(&kiocb, iov, nr_segs, ppos);
  266. if (-EIOCBQUEUED == ret)
  267. ret = wait_on_sync_kiocb(&kiocb);
  268. return ret;
  269. }
  270. static ssize_t gfs2_file_aio_read(struct kiocb *iocb, char __user *buf,
  271. size_t count, loff_t pos)
  272. {
  273. struct iovec local_iov = { .iov_base = buf, .iov_len = count };
  274. BUG_ON(iocb->ki_pos != pos);
  275. return __gfs2_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
  276. }
  277. /**
  278. * filldir_reg_func - Report a directory entry to the caller of gfs2_dir_read()
  279. * @opaque: opaque data used by the function
  280. * @name: the name of the directory entry
  281. * @length: the length of the name
  282. * @offset: the entry's offset in the directory
  283. * @inum: the inode number the entry points to
  284. * @type: the type of inode the entry points to
  285. *
  286. * Returns: 0 on success, 1 if buffer full
  287. */
  288. static int filldir_reg_func(void *opaque, const char *name, unsigned int length,
  289. uint64_t offset, struct gfs2_inum *inum,
  290. unsigned int type)
  291. {
  292. struct filldir_reg *fdr = (struct filldir_reg *)opaque;
  293. struct gfs2_sbd *sdp = fdr->fdr_sbd;
  294. int error;
  295. error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
  296. inum->no_formal_ino, type);
  297. if (error)
  298. return 1;
  299. if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
  300. gfs2_glock_prefetch_num(sdp,
  301. inum->no_addr, &gfs2_inode_glops,
  302. LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
  303. gfs2_glock_prefetch_num(sdp,
  304. inum->no_addr, &gfs2_iopen_glops,
  305. LM_ST_SHARED, LM_FLAG_TRY);
  306. }
  307. return 0;
  308. }
  309. /**
  310. * readdir_reg - Read directory entries from a directory
  311. * @file: The directory to read from
  312. * @dirent: Buffer for dirents
  313. * @filldir: Function used to do the copying
  314. *
  315. * Returns: errno
  316. */
  317. static int readdir_reg(struct file *file, void *dirent, filldir_t filldir)
  318. {
  319. struct inode *dir = file->f_mapping->host;
  320. struct gfs2_inode *dip = dir->u.generic_ip;
  321. struct filldir_reg fdr;
  322. struct gfs2_holder d_gh;
  323. uint64_t offset = file->f_pos;
  324. int error;
  325. fdr.fdr_sbd = dip->i_sbd;
  326. fdr.fdr_prefetch = 1;
  327. fdr.fdr_filldir = filldir;
  328. fdr.fdr_opaque = dirent;
  329. gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
  330. error = gfs2_glock_nq_atime(&d_gh);
  331. if (error) {
  332. gfs2_holder_uninit(&d_gh);
  333. return error;
  334. }
  335. error = gfs2_dir_read(dir, &offset, &fdr, filldir_reg_func);
  336. gfs2_glock_dq_uninit(&d_gh);
  337. file->f_pos = offset;
  338. return error;
  339. }
  340. /**
  341. * filldir_bad_func - Report a directory entry to the caller of gfs2_dir_read()
  342. * @opaque: opaque data used by the function
  343. * @name: the name of the directory entry
  344. * @length: the length of the name
  345. * @offset: the entry's offset in the directory
  346. * @inum: the inode number the entry points to
  347. * @type: the type of inode the entry points to
  348. *
  349. * For supporting NFS.
  350. *
  351. * Returns: 0 on success, 1 if buffer full
  352. */
  353. static int filldir_bad_func(void *opaque, const char *name, unsigned int length,
  354. uint64_t offset, struct gfs2_inum *inum,
  355. unsigned int type)
  356. {
  357. struct filldir_bad *fdb = (struct filldir_bad *)opaque;
  358. struct gfs2_sbd *sdp = fdb->fdb_sbd;
  359. struct filldir_bad_entry *fbe;
  360. if (fdb->fdb_entry_off == fdb->fdb_entry_num ||
  361. fdb->fdb_name_off + length > fdb->fdb_name_size)
  362. return 1;
  363. fbe = &fdb->fdb_entry[fdb->fdb_entry_off];
  364. fbe->fbe_name = fdb->fdb_name + fdb->fdb_name_off;
  365. memcpy(fbe->fbe_name, name, length);
  366. fbe->fbe_length = length;
  367. fbe->fbe_offset = offset;
  368. fbe->fbe_inum = *inum;
  369. fbe->fbe_type = type;
  370. fdb->fdb_entry_off++;
  371. fdb->fdb_name_off += length;
  372. if (!(length == 1 && *name == '.')) {
  373. gfs2_glock_prefetch_num(sdp,
  374. inum->no_addr, &gfs2_inode_glops,
  375. LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
  376. gfs2_glock_prefetch_num(sdp,
  377. inum->no_addr, &gfs2_iopen_glops,
  378. LM_ST_SHARED, LM_FLAG_TRY);
  379. }
  380. return 0;
  381. }
  382. /**
  383. * readdir_bad - Read directory entries from a directory
  384. * @file: The directory to read from
  385. * @dirent: Buffer for dirents
  386. * @filldir: Function used to do the copying
  387. *
  388. * For supporting NFS.
  389. *
  390. * Returns: errno
  391. */
  392. static int readdir_bad(struct file *file, void *dirent, filldir_t filldir)
  393. {
  394. struct inode *dir = file->f_mapping->host;
  395. struct gfs2_inode *dip = dir->u.generic_ip;
  396. struct gfs2_sbd *sdp = dip->i_sbd;
  397. struct filldir_reg fdr;
  398. unsigned int entries, size;
  399. struct filldir_bad *fdb;
  400. struct gfs2_holder d_gh;
  401. uint64_t offset = file->f_pos;
  402. unsigned int x;
  403. struct filldir_bad_entry *fbe;
  404. int error;
  405. entries = gfs2_tune_get(sdp, gt_entries_per_readdir);
  406. size = sizeof(struct filldir_bad) +
  407. entries * (sizeof(struct filldir_bad_entry) + GFS2_FAST_NAME_SIZE);
  408. fdb = kzalloc(size, GFP_KERNEL);
  409. if (!fdb)
  410. return -ENOMEM;
  411. fdb->fdb_sbd = sdp;
  412. fdb->fdb_entry = (struct filldir_bad_entry *)(fdb + 1);
  413. fdb->fdb_entry_num = entries;
  414. fdb->fdb_name = ((char *)fdb) + sizeof(struct filldir_bad) +
  415. entries * sizeof(struct filldir_bad_entry);
  416. fdb->fdb_name_size = entries * GFS2_FAST_NAME_SIZE;
  417. gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
  418. error = gfs2_glock_nq_atime(&d_gh);
  419. if (error) {
  420. gfs2_holder_uninit(&d_gh);
  421. goto out;
  422. }
  423. error = gfs2_dir_read(dir, &offset, fdb, filldir_bad_func);
  424. gfs2_glock_dq_uninit(&d_gh);
  425. fdr.fdr_sbd = sdp;
  426. fdr.fdr_prefetch = 0;
  427. fdr.fdr_filldir = filldir;
  428. fdr.fdr_opaque = dirent;
  429. for (x = 0; x < fdb->fdb_entry_off; x++) {
  430. fbe = &fdb->fdb_entry[x];
  431. error = filldir_reg_func(&fdr,
  432. fbe->fbe_name, fbe->fbe_length,
  433. fbe->fbe_offset,
  434. &fbe->fbe_inum, fbe->fbe_type);
  435. if (error) {
  436. file->f_pos = fbe->fbe_offset;
  437. error = 0;
  438. goto out;
  439. }
  440. }
  441. file->f_pos = offset;
  442. out:
  443. kfree(fdb);
  444. return error;
  445. }
  446. /**
  447. * gfs2_readdir - Read directory entries from a directory
  448. * @file: The directory to read from
  449. * @dirent: Buffer for dirents
  450. * @filldir: Function used to do the copying
  451. *
  452. * Returns: errno
  453. */
  454. static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
  455. {
  456. int error;
  457. if (strcmp(current->comm, "nfsd") != 0)
  458. error = readdir_reg(file, dirent, filldir);
  459. else
  460. error = readdir_bad(file, dirent, filldir);
  461. return error;
  462. }
  463. const struct gfs2_flag_eattr {
  464. u32 flag;
  465. u32 ext2;
  466. } gfs2_flag_eattrs[] = {
  467. {
  468. .flag = GFS2_DIF_IMMUTABLE,
  469. .ext2 = EXT2_IMMUTABLE_FL,
  470. }, {
  471. .flag = GFS2_DIF_APPENDONLY,
  472. .ext2 = EXT2_APPEND_FL,
  473. }, {
  474. .flag = GFS2_DIF_JDATA,
  475. .ext2 = EXT2_JOURNAL_DATA_FL,
  476. }, {
  477. .flag = GFS2_DIF_EXHASH,
  478. .ext2 = EXT2_INDEX_FL,
  479. }, {
  480. .flag = GFS2_DIF_EA_INDIRECT,
  481. }, {
  482. .flag = GFS2_DIF_DIRECTIO,
  483. }, {
  484. .flag = GFS2_DIF_NOATIME,
  485. .ext2 = EXT2_NOATIME_FL,
  486. }, {
  487. .flag = GFS2_DIF_SYNC,
  488. .ext2 = EXT2_SYNC_FL,
  489. }, {
  490. .flag = GFS2_DIF_SYSTEM,
  491. }, {
  492. .flag = GFS2_DIF_TRUNC_IN_PROG,
  493. }, {
  494. .flag = GFS2_DIF_INHERIT_JDATA,
  495. }, {
  496. .flag = GFS2_DIF_INHERIT_DIRECTIO,
  497. }, {
  498. },
  499. };
  500. static const struct gfs2_flag_eattr *get_by_ext2(u32 ext2)
  501. {
  502. const struct gfs2_flag_eattr *p = gfs2_flag_eattrs;
  503. for(; p->flag; p++) {
  504. if (ext2 == p->ext2)
  505. return p;
  506. }
  507. return NULL;
  508. }
  509. static const struct gfs2_flag_eattr *get_by_gfs2(u32 gfs2)
  510. {
  511. const struct gfs2_flag_eattr *p = gfs2_flag_eattrs;
  512. for(; p->flag; p++) {
  513. if (gfs2 == p->flag)
  514. return p;
  515. }
  516. return NULL;
  517. }
  518. static u32 gfs2_flags_to_ext2(u32 gfs2)
  519. {
  520. const struct gfs2_flag_eattr *ea;
  521. u32 ext2 = 0;
  522. u32 mask = 1;
  523. for(; mask != 0; mask <<=1) {
  524. if (mask & gfs2) {
  525. ea = get_by_gfs2(mask);
  526. if (ea)
  527. ext2 |= ea->ext2;
  528. }
  529. }
  530. return ext2;
  531. }
  532. static int gfs2_flags_from_ext2(u32 *gfs2, u32 ext2)
  533. {
  534. const struct gfs2_flag_eattr *ea;
  535. u32 mask = 1;
  536. for(; mask != 0; mask <<= 1) {
  537. if (mask & ext2) {
  538. ea = get_by_ext2(mask);
  539. if (ea == NULL)
  540. return -EINVAL;
  541. *gfs2 |= ea->flag;
  542. }
  543. }
  544. return 0;
  545. }
  546. static int get_ext2_flags(struct inode *inode, u32 __user *ptr)
  547. {
  548. struct gfs2_inode *ip = inode->u.generic_ip;
  549. struct gfs2_holder gh;
  550. int error;
  551. u32 ext2;
  552. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
  553. error = gfs2_glock_nq_m_atime(1, &gh);
  554. if (error)
  555. return error;
  556. ext2 = gfs2_flags_to_ext2(ip->i_di.di_flags);
  557. if (put_user(ext2, ptr))
  558. error = -EFAULT;
  559. gfs2_glock_dq_m(1, &gh);
  560. gfs2_holder_uninit(&gh);
  561. return error;
  562. }
  563. /* Flags that can be set by user space */
  564. #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
  565. GFS2_DIF_DIRECTIO| \
  566. GFS2_DIF_IMMUTABLE| \
  567. GFS2_DIF_APPENDONLY| \
  568. GFS2_DIF_NOATIME| \
  569. GFS2_DIF_SYNC| \
  570. GFS2_DIF_SYSTEM| \
  571. GFS2_DIF_INHERIT_DIRECTIO| \
  572. GFS2_DIF_INHERIT_JDATA)
  573. /**
  574. * gfs2_set_flags - set flags on an inode
  575. * @inode: The inode
  576. * @flags: The flags to set
  577. * @mask: Indicates which flags are valid
  578. *
  579. */
  580. static int gfs2_set_flags(struct inode *inode, u32 flags, u32 mask)
  581. {
  582. struct gfs2_inode *ip = inode->u.generic_ip;
  583. struct buffer_head *bh;
  584. struct gfs2_holder gh;
  585. int error;
  586. u32 new_flags;
  587. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  588. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  589. if (error)
  590. return error;
  591. new_flags = (ip->i_di.di_flags & ~mask) | (flags & mask);
  592. if ((new_flags ^ flags) == 0)
  593. goto out;
  594. error = -EINVAL;
  595. if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
  596. goto out;
  597. if (S_ISDIR(inode->i_mode)) {
  598. if ((new_flags ^ flags) & (GFS2_DIF_JDATA | GFS2_DIF_DIRECTIO))
  599. goto out;
  600. } else if (S_ISREG(inode->i_mode)) {
  601. if ((new_flags ^ flags) & (GFS2_DIF_INHERIT_DIRECTIO|
  602. GFS2_DIF_INHERIT_JDATA))
  603. goto out;
  604. } else
  605. goto out;
  606. error = -EPERM;
  607. if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
  608. goto out;
  609. if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
  610. goto out;
  611. error = gfs2_repermission(inode, MAY_WRITE, NULL);
  612. if (error)
  613. goto out;
  614. error = gfs2_meta_inode_buffer(ip, &bh);
  615. if (error)
  616. goto out;
  617. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  618. ip->i_di.di_flags = new_flags;
  619. gfs2_dinode_out(&ip->i_di, bh->b_data);
  620. brelse(bh);
  621. out:
  622. gfs2_glock_dq_uninit(&gh);
  623. return error;
  624. }
  625. static int set_ext2_flags(struct inode *inode, u32 __user *ptr)
  626. {
  627. u32 ext2, gfs2;
  628. if (get_user(ext2, ptr))
  629. return -EFAULT;
  630. if (gfs2_flags_from_ext2(&gfs2, ext2))
  631. return -EINVAL;
  632. return gfs2_set_flags(inode, gfs2, ~0);
  633. }
  634. int gfs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
  635. unsigned long arg)
  636. {
  637. switch(cmd) {
  638. case EXT2_IOC_GETFLAGS:
  639. return get_ext2_flags(inode, (u32 __user *)arg);
  640. case EXT2_IOC_SETFLAGS:
  641. return set_ext2_flags(inode, (u32 __user *)arg);
  642. }
  643. return -ENOTTY;
  644. }
  645. /**
  646. * gfs2_mmap -
  647. * @file: The file to map
  648. * @vma: The VMA which described the mapping
  649. *
  650. * Returns: 0 or error code
  651. */
  652. static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
  653. {
  654. struct gfs2_inode *ip = file->f_mapping->host->u.generic_ip;
  655. struct gfs2_holder i_gh;
  656. int error;
  657. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  658. error = gfs2_glock_nq_atime(&i_gh);
  659. if (error) {
  660. gfs2_holder_uninit(&i_gh);
  661. return error;
  662. }
  663. /* This is VM_MAYWRITE instead of VM_WRITE because a call
  664. to mprotect() can turn on VM_WRITE later. */
  665. if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
  666. (VM_MAYSHARE | VM_MAYWRITE))
  667. vma->vm_ops = &gfs2_vm_ops_sharewrite;
  668. else
  669. vma->vm_ops = &gfs2_vm_ops_private;
  670. gfs2_glock_dq_uninit(&i_gh);
  671. return error;
  672. }
  673. /**
  674. * gfs2_open - open a file
  675. * @inode: the inode to open
  676. * @file: the struct file for this opening
  677. *
  678. * Returns: errno
  679. */
  680. static int gfs2_open(struct inode *inode, struct file *file)
  681. {
  682. struct gfs2_inode *ip = inode->u.generic_ip;
  683. struct gfs2_holder i_gh;
  684. struct gfs2_file *fp;
  685. int error;
  686. fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
  687. if (!fp)
  688. return -ENOMEM;
  689. mutex_init(&fp->f_fl_mutex);
  690. fp->f_inode = ip;
  691. fp->f_vfile = file;
  692. gfs2_assert_warn(ip->i_sbd, !file->private_data);
  693. file->private_data = fp;
  694. if (S_ISREG(ip->i_di.di_mode)) {
  695. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  696. &i_gh);
  697. if (error)
  698. goto fail;
  699. if (!(file->f_flags & O_LARGEFILE) &&
  700. ip->i_di.di_size > MAX_NON_LFS) {
  701. error = -EFBIG;
  702. goto fail_gunlock;
  703. }
  704. /* Listen to the Direct I/O flag */
  705. if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
  706. file->f_flags |= O_DIRECT;
  707. gfs2_glock_dq_uninit(&i_gh);
  708. }
  709. return 0;
  710. fail_gunlock:
  711. gfs2_glock_dq_uninit(&i_gh);
  712. fail:
  713. file->private_data = NULL;
  714. kfree(fp);
  715. return error;
  716. }
  717. /**
  718. * gfs2_close - called to close a struct file
  719. * @inode: the inode the struct file belongs to
  720. * @file: the struct file being closed
  721. *
  722. * Returns: errno
  723. */
  724. static int gfs2_close(struct inode *inode, struct file *file)
  725. {
  726. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  727. struct gfs2_file *fp;
  728. fp = file->private_data;
  729. file->private_data = NULL;
  730. if (gfs2_assert_warn(sdp, fp))
  731. return -EIO;
  732. kfree(fp);
  733. return 0;
  734. }
  735. /**
  736. * gfs2_fsync - sync the dirty data for a file (across the cluster)
  737. * @file: the file that points to the dentry (we ignore this)
  738. * @dentry: the dentry that points to the inode to sync
  739. *
  740. * Returns: errno
  741. */
  742. static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
  743. {
  744. struct gfs2_inode *ip = dentry->d_inode->u.generic_ip;
  745. gfs2_log_flush_glock(ip->i_gl);
  746. return 0;
  747. }
  748. /**
  749. * gfs2_lock - acquire/release a posix lock on a file
  750. * @file: the file pointer
  751. * @cmd: either modify or retrieve lock state, possibly wait
  752. * @fl: type and range of lock
  753. *
  754. * Returns: errno
  755. */
  756. static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
  757. {
  758. struct gfs2_inode *ip = file->f_mapping->host->u.generic_ip;
  759. struct gfs2_sbd *sdp = ip->i_sbd;
  760. struct lm_lockname name =
  761. { .ln_number = ip->i_num.no_addr,
  762. .ln_type = LM_TYPE_PLOCK };
  763. if (!(fl->fl_flags & FL_POSIX))
  764. return -ENOLCK;
  765. if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
  766. return -ENOLCK;
  767. if (sdp->sd_args.ar_localflocks) {
  768. if (IS_GETLK(cmd)) {
  769. struct file_lock *tmp;
  770. lock_kernel();
  771. tmp = posix_test_lock(file, fl);
  772. fl->fl_type = F_UNLCK;
  773. if (tmp)
  774. memcpy(fl, tmp, sizeof(struct file_lock));
  775. unlock_kernel();
  776. return 0;
  777. } else {
  778. int error;
  779. lock_kernel();
  780. error = posix_lock_file_wait(file, fl);
  781. unlock_kernel();
  782. return error;
  783. }
  784. }
  785. if (IS_GETLK(cmd))
  786. return gfs2_lm_plock_get(sdp, &name, file, fl);
  787. else if (fl->fl_type == F_UNLCK)
  788. return gfs2_lm_punlock(sdp, &name, file, fl);
  789. else
  790. return gfs2_lm_plock(sdp, &name, file, cmd, fl);
  791. }
  792. /**
  793. * gfs2_sendfile - Send bytes to a file or socket
  794. * @in_file: The file to read from
  795. * @out_file: The file to write to
  796. * @count: The amount of data
  797. * @offset: The beginning file offset
  798. *
  799. * Outputs: offset - updated according to number of bytes read
  800. *
  801. * Returns: The number of bytes sent, errno on failure
  802. */
  803. static ssize_t gfs2_sendfile(struct file *in_file, loff_t *offset, size_t count,
  804. read_actor_t actor, void *target)
  805. {
  806. return generic_file_sendfile(in_file, offset, count, actor, target);
  807. }
  808. static int do_flock(struct file *file, int cmd, struct file_lock *fl)
  809. {
  810. struct gfs2_file *fp = file->private_data;
  811. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  812. struct gfs2_inode *ip = fp->f_inode;
  813. struct gfs2_glock *gl;
  814. unsigned int state;
  815. int flags;
  816. int error = 0;
  817. state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
  818. flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
  819. mutex_lock(&fp->f_fl_mutex);
  820. gl = fl_gh->gh_gl;
  821. if (gl) {
  822. if (fl_gh->gh_state == state)
  823. goto out;
  824. gfs2_glock_hold(gl);
  825. flock_lock_file_wait(file,
  826. &(struct file_lock){.fl_type = F_UNLCK});
  827. gfs2_glock_dq_uninit(fl_gh);
  828. } else {
  829. error = gfs2_glock_get(ip->i_sbd,
  830. ip->i_num.no_addr, &gfs2_flock_glops,
  831. CREATE, &gl);
  832. if (error)
  833. goto out;
  834. }
  835. gfs2_holder_init(gl, state, flags, fl_gh);
  836. gfs2_glock_put(gl);
  837. error = gfs2_glock_nq(fl_gh);
  838. if (error) {
  839. gfs2_holder_uninit(fl_gh);
  840. if (error == GLR_TRYFAILED)
  841. error = -EAGAIN;
  842. } else {
  843. error = flock_lock_file_wait(file, fl);
  844. gfs2_assert_warn(ip->i_sbd, !error);
  845. }
  846. out:
  847. mutex_unlock(&fp->f_fl_mutex);
  848. return error;
  849. }
  850. static void do_unflock(struct file *file, struct file_lock *fl)
  851. {
  852. struct gfs2_file *fp = file->private_data;
  853. struct gfs2_holder *fl_gh = &fp->f_fl_gh;
  854. mutex_lock(&fp->f_fl_mutex);
  855. flock_lock_file_wait(file, fl);
  856. if (fl_gh->gh_gl)
  857. gfs2_glock_dq_uninit(fl_gh);
  858. mutex_unlock(&fp->f_fl_mutex);
  859. }
  860. /**
  861. * gfs2_flock - acquire/release a flock lock on a file
  862. * @file: the file pointer
  863. * @cmd: either modify or retrieve lock state, possibly wait
  864. * @fl: type and range of lock
  865. *
  866. * Returns: errno
  867. */
  868. static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
  869. {
  870. struct gfs2_inode *ip = file->f_mapping->host->u.generic_ip;
  871. struct gfs2_sbd *sdp = ip->i_sbd;
  872. if (!(fl->fl_flags & FL_FLOCK))
  873. return -ENOLCK;
  874. if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
  875. return -ENOLCK;
  876. if (sdp->sd_args.ar_localflocks)
  877. return flock_lock_file_wait(file, fl);
  878. if (fl->fl_type == F_UNLCK) {
  879. do_unflock(file, fl);
  880. return 0;
  881. } else
  882. return do_flock(file, cmd, fl);
  883. }
  884. struct file_operations gfs2_file_fops = {
  885. .llseek = gfs2_llseek,
  886. .read = gfs2_read,
  887. .readv = gfs2_file_readv,
  888. .aio_read = gfs2_file_aio_read,
  889. .write = generic_file_write,
  890. .writev = generic_file_writev,
  891. .aio_write = generic_file_aio_write,
  892. .ioctl = gfs2_ioctl,
  893. .mmap = gfs2_mmap,
  894. .open = gfs2_open,
  895. .release = gfs2_close,
  896. .fsync = gfs2_fsync,
  897. .lock = gfs2_lock,
  898. .sendfile = gfs2_sendfile,
  899. .flock = gfs2_flock,
  900. };
  901. struct file_operations gfs2_dir_fops = {
  902. .readdir = gfs2_readdir,
  903. .ioctl = gfs2_ioctl,
  904. .open = gfs2_open,
  905. .release = gfs2_close,
  906. .fsync = gfs2_fsync,
  907. .lock = gfs2_lock,
  908. .flock = gfs2_flock,
  909. };