inode.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_inode.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "util.h"
  37. struct gfs2_inum_range_host {
  38. u64 ir_start;
  39. u64 ir_length;
  40. };
  41. static int iget_test(struct inode *inode, void *opaque)
  42. {
  43. struct gfs2_inode *ip = GFS2_I(inode);
  44. u64 *no_addr = opaque;
  45. if (ip->i_no_addr == *no_addr &&
  46. inode->i_private != NULL)
  47. return 1;
  48. return 0;
  49. }
  50. static int iget_set(struct inode *inode, void *opaque)
  51. {
  52. struct gfs2_inode *ip = GFS2_I(inode);
  53. u64 *no_addr = opaque;
  54. inode->i_ino = (unsigned long)*no_addr;
  55. ip->i_no_addr = *no_addr;
  56. return 0;
  57. }
  58. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  59. {
  60. unsigned long hash = (unsigned long)no_addr;
  61. return ilookup5(sb, hash, iget_test, &no_addr);
  62. }
  63. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  64. {
  65. unsigned long hash = (unsigned long)no_addr;
  66. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  67. }
  68. struct gfs2_skip_data {
  69. u64 no_addr;
  70. int skipped;
  71. };
  72. static int iget_skip_test(struct inode *inode, void *opaque)
  73. {
  74. struct gfs2_inode *ip = GFS2_I(inode);
  75. struct gfs2_skip_data *data = opaque;
  76. if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
  77. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  78. data->skipped = 1;
  79. return 0;
  80. }
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. static int iget_skip_set(struct inode *inode, void *opaque)
  86. {
  87. struct gfs2_inode *ip = GFS2_I(inode);
  88. struct gfs2_skip_data *data = opaque;
  89. if (data->skipped)
  90. return 1;
  91. inode->i_ino = (unsigned long)(data->no_addr);
  92. ip->i_no_addr = data->no_addr;
  93. return 0;
  94. }
  95. static struct inode *gfs2_iget_skip(struct super_block *sb,
  96. u64 no_addr)
  97. {
  98. struct gfs2_skip_data data;
  99. unsigned long hash = (unsigned long)no_addr;
  100. data.no_addr = no_addr;
  101. data.skipped = 0;
  102. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  103. }
  104. /**
  105. * GFS2 lookup code fills in vfs inode contents based on info obtained
  106. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  107. * with NFS code path since its get_dentry routine doesn't have the relevant
  108. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  109. * segment inside gfs2_inode_lookup code needs to get moved around.
  110. *
  111. * Clean up I_LOCK and I_NEW as well.
  112. **/
  113. void gfs2_set_iop(struct inode *inode)
  114. {
  115. struct gfs2_sbd *sdp = GFS2_SB(inode);
  116. umode_t mode = inode->i_mode;
  117. if (S_ISREG(mode)) {
  118. inode->i_op = &gfs2_file_iops;
  119. if (sdp->sd_args.ar_localflocks)
  120. inode->i_fop = &gfs2_file_fops_nolock;
  121. else
  122. inode->i_fop = &gfs2_file_fops;
  123. } else if (S_ISDIR(mode)) {
  124. inode->i_op = &gfs2_dir_iops;
  125. if (sdp->sd_args.ar_localflocks)
  126. inode->i_fop = &gfs2_dir_fops_nolock;
  127. else
  128. inode->i_fop = &gfs2_dir_fops;
  129. } else if (S_ISLNK(mode)) {
  130. inode->i_op = &gfs2_symlink_iops;
  131. } else {
  132. inode->i_op = &gfs2_dev_iops;
  133. }
  134. unlock_new_inode(inode);
  135. }
  136. /**
  137. * gfs2_inode_lookup - Lookup an inode
  138. * @sb: The super block
  139. * @no_addr: The inode number
  140. * @type: The type of the inode
  141. * @skip_freeing: set this not return an inode if it is currently being freed.
  142. *
  143. * Returns: A VFS inode, or an error
  144. */
  145. struct inode *gfs2_inode_lookup(struct super_block *sb,
  146. unsigned int type,
  147. u64 no_addr,
  148. u64 no_formal_ino, int skip_freeing)
  149. {
  150. struct inode *inode;
  151. struct gfs2_inode *ip;
  152. struct gfs2_glock *io_gl;
  153. int error;
  154. if (skip_freeing)
  155. inode = gfs2_iget_skip(sb, no_addr);
  156. else
  157. inode = gfs2_iget(sb, no_addr);
  158. ip = GFS2_I(inode);
  159. if (!inode)
  160. return ERR_PTR(-ENOBUFS);
  161. if (inode->i_state & I_NEW) {
  162. struct gfs2_sbd *sdp = GFS2_SB(inode);
  163. inode->i_private = ip;
  164. ip->i_no_formal_ino = no_formal_ino;
  165. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  166. if (unlikely(error))
  167. goto fail;
  168. ip->i_gl->gl_object = ip;
  169. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  170. if (unlikely(error))
  171. goto fail_put;
  172. set_bit(GIF_INVALID, &ip->i_flags);
  173. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  174. if (unlikely(error))
  175. goto fail_iopen;
  176. ip->i_iopen_gh.gh_gl->gl_object = ip;
  177. gfs2_glock_put(io_gl);
  178. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  179. goto gfs2_nfsbypass;
  180. inode->i_mode = DT2IF(type);
  181. /*
  182. * We must read the inode in order to work out its type in
  183. * this case. Note that this doesn't happen often as we normally
  184. * know the type beforehand. This code path only occurs during
  185. * unlinked inode recovery (where it is safe to do this glock,
  186. * which is not true in the general case).
  187. */
  188. if (type == DT_UNKNOWN) {
  189. struct gfs2_holder gh;
  190. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  191. if (unlikely(error))
  192. goto fail_glock;
  193. /* Inode is now uptodate */
  194. gfs2_glock_dq_uninit(&gh);
  195. }
  196. gfs2_set_iop(inode);
  197. }
  198. gfs2_nfsbypass:
  199. return inode;
  200. fail_glock:
  201. gfs2_glock_dq(&ip->i_iopen_gh);
  202. fail_iopen:
  203. gfs2_glock_put(io_gl);
  204. fail_put:
  205. ip->i_gl->gl_object = NULL;
  206. gfs2_glock_put(ip->i_gl);
  207. fail:
  208. iget_failed(inode);
  209. return ERR_PTR(error);
  210. }
  211. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  212. {
  213. struct gfs2_dinode_host *di = &ip->i_di;
  214. const struct gfs2_dinode *str = buf;
  215. u16 height, depth;
  216. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  217. goto corrupt;
  218. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  219. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  220. ip->i_inode.i_rdev = 0;
  221. switch (ip->i_inode.i_mode & S_IFMT) {
  222. case S_IFBLK:
  223. case S_IFCHR:
  224. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  225. be32_to_cpu(str->di_minor));
  226. break;
  227. };
  228. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  229. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  230. /*
  231. * We will need to review setting the nlink count here in the
  232. * light of the forthcoming ro bind mount work. This is a reminder
  233. * to do that.
  234. */
  235. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  236. di->di_size = be64_to_cpu(str->di_size);
  237. i_size_write(&ip->i_inode, di->di_size);
  238. di->di_blocks = be64_to_cpu(str->di_blocks);
  239. gfs2_set_inode_blocks(&ip->i_inode);
  240. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  241. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  242. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  243. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  244. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  245. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  246. di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
  247. di->di_goal_data = be64_to_cpu(str->di_goal_data);
  248. di->di_generation = be64_to_cpu(str->di_generation);
  249. di->di_flags = be32_to_cpu(str->di_flags);
  250. gfs2_set_inode_flags(&ip->i_inode);
  251. height = be16_to_cpu(str->di_height);
  252. if (unlikely(height > GFS2_MAX_META_HEIGHT))
  253. goto corrupt;
  254. ip->i_height = (u8)height;
  255. depth = be16_to_cpu(str->di_depth);
  256. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  257. goto corrupt;
  258. ip->i_depth = (u8)depth;
  259. di->di_entries = be32_to_cpu(str->di_entries);
  260. di->di_eattr = be64_to_cpu(str->di_eattr);
  261. if (S_ISREG(ip->i_inode.i_mode))
  262. gfs2_set_aops(&ip->i_inode);
  263. return 0;
  264. corrupt:
  265. if (gfs2_consist_inode(ip))
  266. gfs2_dinode_print(ip);
  267. return -EIO;
  268. }
  269. /**
  270. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  271. * @ip: The GFS2 inode
  272. *
  273. * Returns: errno
  274. */
  275. int gfs2_inode_refresh(struct gfs2_inode *ip)
  276. {
  277. struct buffer_head *dibh;
  278. int error;
  279. error = gfs2_meta_inode_buffer(ip, &dibh);
  280. if (error)
  281. return error;
  282. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  283. brelse(dibh);
  284. return -EIO;
  285. }
  286. error = gfs2_dinode_in(ip, dibh->b_data);
  287. brelse(dibh);
  288. clear_bit(GIF_INVALID, &ip->i_flags);
  289. return error;
  290. }
  291. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  292. {
  293. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  294. struct gfs2_alloc *al;
  295. struct gfs2_rgrpd *rgd;
  296. int error;
  297. if (ip->i_di.di_blocks != 1) {
  298. if (gfs2_consist_inode(ip))
  299. gfs2_dinode_print(ip);
  300. return -EIO;
  301. }
  302. al = gfs2_alloc_get(ip);
  303. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  304. if (error)
  305. goto out;
  306. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  307. if (error)
  308. goto out_qs;
  309. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  310. if (!rgd) {
  311. gfs2_consist_inode(ip);
  312. error = -EIO;
  313. goto out_rindex_relse;
  314. }
  315. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  316. &al->al_rgd_gh);
  317. if (error)
  318. goto out_rindex_relse;
  319. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  320. if (error)
  321. goto out_rg_gunlock;
  322. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  323. set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
  324. gfs2_free_di(rgd, ip);
  325. gfs2_trans_end(sdp);
  326. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  327. out_rg_gunlock:
  328. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  329. out_rindex_relse:
  330. gfs2_glock_dq_uninit(&al->al_ri_gh);
  331. out_qs:
  332. gfs2_quota_unhold(ip);
  333. out:
  334. gfs2_alloc_put(ip);
  335. return error;
  336. }
  337. /**
  338. * gfs2_change_nlink - Change nlink count on inode
  339. * @ip: The GFS2 inode
  340. * @diff: The change in the nlink count required
  341. *
  342. * Returns: errno
  343. */
  344. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  345. {
  346. struct buffer_head *dibh;
  347. u32 nlink;
  348. int error;
  349. BUG_ON(diff != 1 && diff != -1);
  350. nlink = ip->i_inode.i_nlink + diff;
  351. /* If we are reducing the nlink count, but the new value ends up being
  352. bigger than the old one, we must have underflowed. */
  353. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  354. if (gfs2_consist_inode(ip))
  355. gfs2_dinode_print(ip);
  356. return -EIO;
  357. }
  358. error = gfs2_meta_inode_buffer(ip, &dibh);
  359. if (error)
  360. return error;
  361. if (diff > 0)
  362. inc_nlink(&ip->i_inode);
  363. else
  364. drop_nlink(&ip->i_inode);
  365. ip->i_inode.i_ctime = CURRENT_TIME;
  366. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  367. gfs2_dinode_out(ip, dibh->b_data);
  368. brelse(dibh);
  369. mark_inode_dirty(&ip->i_inode);
  370. if (ip->i_inode.i_nlink == 0)
  371. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  372. return error;
  373. }
  374. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  375. {
  376. struct qstr qstr;
  377. struct inode *inode;
  378. gfs2_str2qstr(&qstr, name);
  379. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  380. /* gfs2_lookupi has inconsistent callers: vfs
  381. * related routines expect NULL for no entry found,
  382. * gfs2_lookup_simple callers expect ENOENT
  383. * and do not check for NULL.
  384. */
  385. if (inode == NULL)
  386. return ERR_PTR(-ENOENT);
  387. else
  388. return inode;
  389. }
  390. /**
  391. * gfs2_lookupi - Look up a filename in a directory and return its inode
  392. * @d_gh: An initialized holder for the directory glock
  393. * @name: The name of the inode to look for
  394. * @is_root: If 1, ignore the caller's permissions
  395. * @i_gh: An uninitialized holder for the new inode glock
  396. *
  397. * This can be called via the VFS filldir function when NFS is doing
  398. * a readdirplus and the inode which its intending to stat isn't
  399. * already in cache. In this case we must not take the directory glock
  400. * again, since the readdir call will have already taken that lock.
  401. *
  402. * Returns: errno
  403. */
  404. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  405. int is_root, struct nameidata *nd)
  406. {
  407. struct super_block *sb = dir->i_sb;
  408. struct gfs2_inode *dip = GFS2_I(dir);
  409. struct gfs2_holder d_gh;
  410. int error = 0;
  411. struct inode *inode = NULL;
  412. int unlock = 0;
  413. if (!name->len || name->len > GFS2_FNAMESIZE)
  414. return ERR_PTR(-ENAMETOOLONG);
  415. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  416. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  417. dir == sb->s_root->d_inode)) {
  418. igrab(dir);
  419. return dir;
  420. }
  421. if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
  422. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  423. if (error)
  424. return ERR_PTR(error);
  425. unlock = 1;
  426. }
  427. if (!is_root) {
  428. error = permission(dir, MAY_EXEC, NULL);
  429. if (error)
  430. goto out;
  431. }
  432. inode = gfs2_dir_search(dir, name);
  433. if (IS_ERR(inode))
  434. error = PTR_ERR(inode);
  435. out:
  436. if (unlock)
  437. gfs2_glock_dq_uninit(&d_gh);
  438. if (error == -ENOENT)
  439. return NULL;
  440. return inode ? inode : ERR_PTR(error);
  441. }
  442. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  443. {
  444. const struct gfs2_inum_range *str = buf;
  445. ir->ir_start = be64_to_cpu(str->ir_start);
  446. ir->ir_length = be64_to_cpu(str->ir_length);
  447. }
  448. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  449. {
  450. struct gfs2_inum_range *str = buf;
  451. str->ir_start = cpu_to_be64(ir->ir_start);
  452. str->ir_length = cpu_to_be64(ir->ir_length);
  453. }
  454. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  455. {
  456. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  457. struct buffer_head *bh;
  458. struct gfs2_inum_range_host ir;
  459. int error;
  460. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  461. if (error)
  462. return error;
  463. mutex_lock(&sdp->sd_inum_mutex);
  464. error = gfs2_meta_inode_buffer(ip, &bh);
  465. if (error) {
  466. mutex_unlock(&sdp->sd_inum_mutex);
  467. gfs2_trans_end(sdp);
  468. return error;
  469. }
  470. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  471. if (ir.ir_length) {
  472. *formal_ino = ir.ir_start++;
  473. ir.ir_length--;
  474. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  475. gfs2_inum_range_out(&ir,
  476. bh->b_data + sizeof(struct gfs2_dinode));
  477. brelse(bh);
  478. mutex_unlock(&sdp->sd_inum_mutex);
  479. gfs2_trans_end(sdp);
  480. return 0;
  481. }
  482. brelse(bh);
  483. mutex_unlock(&sdp->sd_inum_mutex);
  484. gfs2_trans_end(sdp);
  485. return 1;
  486. }
  487. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  488. {
  489. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  490. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  491. struct gfs2_holder gh;
  492. struct buffer_head *bh;
  493. struct gfs2_inum_range_host ir;
  494. int error;
  495. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  496. if (error)
  497. return error;
  498. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  499. if (error)
  500. goto out;
  501. mutex_lock(&sdp->sd_inum_mutex);
  502. error = gfs2_meta_inode_buffer(ip, &bh);
  503. if (error)
  504. goto out_end_trans;
  505. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  506. if (!ir.ir_length) {
  507. struct buffer_head *m_bh;
  508. u64 x, y;
  509. __be64 z;
  510. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  511. if (error)
  512. goto out_brelse;
  513. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  514. x = y = be64_to_cpu(z);
  515. ir.ir_start = x;
  516. ir.ir_length = GFS2_INUM_QUANTUM;
  517. x += GFS2_INUM_QUANTUM;
  518. if (x < y)
  519. gfs2_consist_inode(m_ip);
  520. z = cpu_to_be64(x);
  521. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  522. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  523. brelse(m_bh);
  524. }
  525. *formal_ino = ir.ir_start++;
  526. ir.ir_length--;
  527. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  528. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  529. out_brelse:
  530. brelse(bh);
  531. out_end_trans:
  532. mutex_unlock(&sdp->sd_inum_mutex);
  533. gfs2_trans_end(sdp);
  534. out:
  535. gfs2_glock_dq_uninit(&gh);
  536. return error;
  537. }
  538. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  539. {
  540. int error;
  541. error = pick_formal_ino_1(sdp, inum);
  542. if (error <= 0)
  543. return error;
  544. error = pick_formal_ino_2(sdp, inum);
  545. return error;
  546. }
  547. /**
  548. * create_ok - OK to create a new on-disk inode here?
  549. * @dip: Directory in which dinode is to be created
  550. * @name: Name of new dinode
  551. * @mode:
  552. *
  553. * Returns: errno
  554. */
  555. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  556. unsigned int mode)
  557. {
  558. int error;
  559. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  560. if (error)
  561. return error;
  562. /* Don't create entries in an unlinked directory */
  563. if (!dip->i_inode.i_nlink)
  564. return -EPERM;
  565. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  566. switch (error) {
  567. case -ENOENT:
  568. error = 0;
  569. break;
  570. case 0:
  571. return -EEXIST;
  572. default:
  573. return error;
  574. }
  575. if (dip->i_di.di_entries == (u32)-1)
  576. return -EFBIG;
  577. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  578. return -EMLINK;
  579. return 0;
  580. }
  581. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  582. unsigned int *uid, unsigned int *gid)
  583. {
  584. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  585. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  586. if (S_ISDIR(*mode))
  587. *mode |= S_ISUID;
  588. else if (dip->i_inode.i_uid != current->fsuid)
  589. *mode &= ~07111;
  590. *uid = dip->i_inode.i_uid;
  591. } else
  592. *uid = current->fsuid;
  593. if (dip->i_inode.i_mode & S_ISGID) {
  594. if (S_ISDIR(*mode))
  595. *mode |= S_ISGID;
  596. *gid = dip->i_inode.i_gid;
  597. } else
  598. *gid = current->fsgid;
  599. }
  600. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  601. {
  602. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  603. int error;
  604. if (gfs2_alloc_get(dip) == NULL)
  605. return -ENOMEM;
  606. dip->i_alloc->al_requested = RES_DINODE;
  607. error = gfs2_inplace_reserve(dip);
  608. if (error)
  609. goto out;
  610. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  611. if (error)
  612. goto out_ipreserv;
  613. *no_addr = gfs2_alloc_di(dip, generation);
  614. gfs2_trans_end(sdp);
  615. out_ipreserv:
  616. gfs2_inplace_release(dip);
  617. out:
  618. gfs2_alloc_put(dip);
  619. return error;
  620. }
  621. /**
  622. * init_dinode - Fill in a new dinode structure
  623. * @dip: the directory this inode is being created in
  624. * @gl: The glock covering the new inode
  625. * @inum: the inode number
  626. * @mode: the file permissions
  627. * @uid:
  628. * @gid:
  629. *
  630. */
  631. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  632. const struct gfs2_inum_host *inum, unsigned int mode,
  633. unsigned int uid, unsigned int gid,
  634. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  635. {
  636. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  637. struct gfs2_dinode *di;
  638. struct buffer_head *dibh;
  639. struct timespec tv = CURRENT_TIME;
  640. dibh = gfs2_meta_new(gl, inum->no_addr);
  641. gfs2_trans_add_bh(gl, dibh, 1);
  642. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  643. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  644. di = (struct gfs2_dinode *)dibh->b_data;
  645. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  646. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  647. di->di_mode = cpu_to_be32(mode);
  648. di->di_uid = cpu_to_be32(uid);
  649. di->di_gid = cpu_to_be32(gid);
  650. di->di_nlink = 0;
  651. di->di_size = 0;
  652. di->di_blocks = cpu_to_be64(1);
  653. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  654. di->di_major = cpu_to_be32(MAJOR(dev));
  655. di->di_minor = cpu_to_be32(MINOR(dev));
  656. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  657. di->di_generation = cpu_to_be64(*generation);
  658. di->di_flags = 0;
  659. if (S_ISREG(mode)) {
  660. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  661. gfs2_tune_get(sdp, gt_new_files_jdata))
  662. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  663. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  664. gfs2_tune_get(sdp, gt_new_files_directio))
  665. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  666. } else if (S_ISDIR(mode)) {
  667. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  668. GFS2_DIF_INHERIT_DIRECTIO);
  669. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  670. GFS2_DIF_INHERIT_JDATA);
  671. }
  672. di->__pad1 = 0;
  673. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  674. di->di_height = 0;
  675. di->__pad2 = 0;
  676. di->__pad3 = 0;
  677. di->di_depth = 0;
  678. di->di_entries = 0;
  679. memset(&di->__pad4, 0, sizeof(di->__pad4));
  680. di->di_eattr = 0;
  681. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  682. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  683. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  684. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  685. set_buffer_uptodate(dibh);
  686. *bhp = dibh;
  687. }
  688. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  689. unsigned int mode, const struct gfs2_inum_host *inum,
  690. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  691. {
  692. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  693. unsigned int uid, gid;
  694. int error;
  695. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  696. gfs2_alloc_get(dip);
  697. error = gfs2_quota_lock(dip, uid, gid);
  698. if (error)
  699. goto out;
  700. error = gfs2_quota_check(dip, uid, gid);
  701. if (error)
  702. goto out_quota;
  703. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  704. if (error)
  705. goto out_quota;
  706. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  707. gfs2_quota_change(dip, +1, uid, gid);
  708. gfs2_trans_end(sdp);
  709. out_quota:
  710. gfs2_quota_unlock(dip);
  711. out:
  712. gfs2_alloc_put(dip);
  713. return error;
  714. }
  715. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  716. struct gfs2_inode *ip)
  717. {
  718. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  719. struct gfs2_alloc *al;
  720. int alloc_required;
  721. struct buffer_head *dibh;
  722. int error;
  723. al = gfs2_alloc_get(dip);
  724. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  725. if (error)
  726. goto fail;
  727. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  728. if (alloc_required < 0)
  729. goto fail_quota_locks;
  730. if (alloc_required) {
  731. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  732. if (error)
  733. goto fail_quota_locks;
  734. al->al_requested = sdp->sd_max_dirres;
  735. error = gfs2_inplace_reserve(dip);
  736. if (error)
  737. goto fail_quota_locks;
  738. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  739. al->al_rgd->rd_length +
  740. 2 * RES_DINODE +
  741. RES_STATFS + RES_QUOTA, 0);
  742. if (error)
  743. goto fail_ipreserv;
  744. } else {
  745. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  746. if (error)
  747. goto fail_quota_locks;
  748. }
  749. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  750. if (error)
  751. goto fail_end_trans;
  752. error = gfs2_meta_inode_buffer(ip, &dibh);
  753. if (error)
  754. goto fail_end_trans;
  755. ip->i_inode.i_nlink = 1;
  756. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  757. gfs2_dinode_out(ip, dibh->b_data);
  758. brelse(dibh);
  759. return 0;
  760. fail_end_trans:
  761. gfs2_trans_end(sdp);
  762. fail_ipreserv:
  763. if (dip->i_alloc->al_rgd)
  764. gfs2_inplace_release(dip);
  765. fail_quota_locks:
  766. gfs2_quota_unlock(dip);
  767. fail:
  768. gfs2_alloc_put(dip);
  769. return error;
  770. }
  771. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  772. {
  773. int err;
  774. size_t len;
  775. void *value;
  776. char *name;
  777. struct gfs2_ea_request er;
  778. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  779. &name, &value, &len);
  780. if (err) {
  781. if (err == -EOPNOTSUPP)
  782. return 0;
  783. return err;
  784. }
  785. memset(&er, 0, sizeof(struct gfs2_ea_request));
  786. er.er_type = GFS2_EATYPE_SECURITY;
  787. er.er_name = name;
  788. er.er_data = value;
  789. er.er_name_len = strlen(name);
  790. er.er_data_len = len;
  791. err = gfs2_ea_set_i(ip, &er);
  792. kfree(value);
  793. kfree(name);
  794. return err;
  795. }
  796. /**
  797. * gfs2_createi - Create a new inode
  798. * @ghs: An array of two holders
  799. * @name: The name of the new file
  800. * @mode: the permissions on the new inode
  801. *
  802. * @ghs[0] is an initialized holder for the directory
  803. * @ghs[1] is the holder for the inode lock
  804. *
  805. * If the return value is not NULL, the glocks on both the directory and the new
  806. * file are held. A transaction has been started and an inplace reservation
  807. * is held, as well.
  808. *
  809. * Returns: An inode
  810. */
  811. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  812. unsigned int mode, dev_t dev)
  813. {
  814. struct inode *inode = NULL;
  815. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  816. struct inode *dir = &dip->i_inode;
  817. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  818. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  819. int error;
  820. u64 generation;
  821. struct buffer_head *bh = NULL;
  822. if (!name->len || name->len > GFS2_FNAMESIZE)
  823. return ERR_PTR(-ENAMETOOLONG);
  824. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  825. error = gfs2_glock_nq(ghs);
  826. if (error)
  827. goto fail;
  828. error = create_ok(dip, name, mode);
  829. if (error)
  830. goto fail_gunlock;
  831. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  832. if (error)
  833. goto fail_gunlock;
  834. error = alloc_dinode(dip, &inum.no_addr, &generation);
  835. if (error)
  836. goto fail_gunlock;
  837. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  838. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  839. if (error)
  840. goto fail_gunlock;
  841. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  842. if (error)
  843. goto fail_gunlock2;
  844. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  845. inum.no_addr,
  846. inum.no_formal_ino, 0);
  847. if (IS_ERR(inode))
  848. goto fail_gunlock2;
  849. error = gfs2_inode_refresh(GFS2_I(inode));
  850. if (error)
  851. goto fail_gunlock2;
  852. error = gfs2_acl_create(dip, GFS2_I(inode));
  853. if (error)
  854. goto fail_gunlock2;
  855. error = gfs2_security_init(dip, GFS2_I(inode));
  856. if (error)
  857. goto fail_gunlock2;
  858. error = link_dinode(dip, name, GFS2_I(inode));
  859. if (error)
  860. goto fail_gunlock2;
  861. if (bh)
  862. brelse(bh);
  863. if (!inode)
  864. return ERR_PTR(-ENOMEM);
  865. return inode;
  866. fail_gunlock2:
  867. gfs2_glock_dq_uninit(ghs + 1);
  868. if (inode)
  869. iput(inode);
  870. fail_gunlock:
  871. gfs2_glock_dq(ghs);
  872. fail:
  873. if (bh)
  874. brelse(bh);
  875. return ERR_PTR(error);
  876. }
  877. /**
  878. * gfs2_rmdiri - Remove a directory
  879. * @dip: The parent directory of the directory to be removed
  880. * @name: The name of the directory to be removed
  881. * @ip: The GFS2 inode of the directory to be removed
  882. *
  883. * Assumes Glocks on dip and ip are held
  884. *
  885. * Returns: errno
  886. */
  887. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  888. struct gfs2_inode *ip)
  889. {
  890. struct qstr dotname;
  891. int error;
  892. if (ip->i_di.di_entries != 2) {
  893. if (gfs2_consist_inode(ip))
  894. gfs2_dinode_print(ip);
  895. return -EIO;
  896. }
  897. error = gfs2_dir_del(dip, name);
  898. if (error)
  899. return error;
  900. error = gfs2_change_nlink(dip, -1);
  901. if (error)
  902. return error;
  903. gfs2_str2qstr(&dotname, ".");
  904. error = gfs2_dir_del(ip, &dotname);
  905. if (error)
  906. return error;
  907. gfs2_str2qstr(&dotname, "..");
  908. error = gfs2_dir_del(ip, &dotname);
  909. if (error)
  910. return error;
  911. /* It looks odd, but it really should be done twice */
  912. error = gfs2_change_nlink(ip, -1);
  913. if (error)
  914. return error;
  915. error = gfs2_change_nlink(ip, -1);
  916. if (error)
  917. return error;
  918. return error;
  919. }
  920. /*
  921. * gfs2_unlink_ok - check to see that a inode is still in a directory
  922. * @dip: the directory
  923. * @name: the name of the file
  924. * @ip: the inode
  925. *
  926. * Assumes that the lock on (at least) @dip is held.
  927. *
  928. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  929. */
  930. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  931. const struct gfs2_inode *ip)
  932. {
  933. int error;
  934. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  935. return -EPERM;
  936. if ((dip->i_inode.i_mode & S_ISVTX) &&
  937. dip->i_inode.i_uid != current->fsuid &&
  938. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  939. return -EPERM;
  940. if (IS_APPEND(&dip->i_inode))
  941. return -EPERM;
  942. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  943. if (error)
  944. return error;
  945. error = gfs2_dir_check(&dip->i_inode, name, ip);
  946. if (error)
  947. return error;
  948. return 0;
  949. }
  950. /*
  951. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  952. * @this: move this
  953. * @to: to here
  954. *
  955. * Follow @to back to the root and make sure we don't encounter @this
  956. * Assumes we already hold the rename lock.
  957. *
  958. * Returns: errno
  959. */
  960. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  961. {
  962. struct inode *dir = &to->i_inode;
  963. struct super_block *sb = dir->i_sb;
  964. struct inode *tmp;
  965. struct qstr dotdot;
  966. int error = 0;
  967. gfs2_str2qstr(&dotdot, "..");
  968. igrab(dir);
  969. for (;;) {
  970. if (dir == &this->i_inode) {
  971. error = -EINVAL;
  972. break;
  973. }
  974. if (dir == sb->s_root->d_inode) {
  975. error = 0;
  976. break;
  977. }
  978. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  979. if (IS_ERR(tmp)) {
  980. error = PTR_ERR(tmp);
  981. break;
  982. }
  983. iput(dir);
  984. dir = tmp;
  985. }
  986. iput(dir);
  987. return error;
  988. }
  989. /**
  990. * gfs2_readlinki - return the contents of a symlink
  991. * @ip: the symlink's inode
  992. * @buf: a pointer to the buffer to be filled
  993. * @len: a pointer to the length of @buf
  994. *
  995. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  996. * to be freed by the caller.
  997. *
  998. * Returns: errno
  999. */
  1000. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  1001. {
  1002. struct gfs2_holder i_gh;
  1003. struct buffer_head *dibh;
  1004. unsigned int x;
  1005. int error;
  1006. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  1007. error = gfs2_glock_nq_atime(&i_gh);
  1008. if (error) {
  1009. gfs2_holder_uninit(&i_gh);
  1010. return error;
  1011. }
  1012. if (!ip->i_di.di_size) {
  1013. gfs2_consist_inode(ip);
  1014. error = -EIO;
  1015. goto out;
  1016. }
  1017. error = gfs2_meta_inode_buffer(ip, &dibh);
  1018. if (error)
  1019. goto out;
  1020. x = ip->i_di.di_size + 1;
  1021. if (x > *len) {
  1022. *buf = kmalloc(x, GFP_KERNEL);
  1023. if (!*buf) {
  1024. error = -ENOMEM;
  1025. goto out_brelse;
  1026. }
  1027. }
  1028. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  1029. *len = x;
  1030. out_brelse:
  1031. brelse(dibh);
  1032. out:
  1033. gfs2_glock_dq_uninit(&i_gh);
  1034. return error;
  1035. }
  1036. /**
  1037. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  1038. * conditionally update the inode's atime
  1039. * @gh: the holder to acquire
  1040. *
  1041. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  1042. * Update if the difference between the current time and the inode's current
  1043. * atime is greater than an interval specified at mount.
  1044. *
  1045. * Returns: errno
  1046. */
  1047. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  1048. {
  1049. struct gfs2_glock *gl = gh->gh_gl;
  1050. struct gfs2_sbd *sdp = gl->gl_sbd;
  1051. struct gfs2_inode *ip = gl->gl_object;
  1052. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  1053. unsigned int state;
  1054. int flags;
  1055. int error;
  1056. struct timespec tv = CURRENT_TIME;
  1057. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  1058. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1059. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1060. return -EINVAL;
  1061. state = gh->gh_state;
  1062. flags = gh->gh_flags;
  1063. error = gfs2_glock_nq(gh);
  1064. if (error)
  1065. return error;
  1066. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1067. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1068. return 0;
  1069. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1070. gfs2_glock_dq(gh);
  1071. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1072. gh);
  1073. error = gfs2_glock_nq(gh);
  1074. if (error)
  1075. return error;
  1076. /* Verify that atime hasn't been updated while we were
  1077. trying to get exclusive lock. */
  1078. tv = CURRENT_TIME;
  1079. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1080. struct buffer_head *dibh;
  1081. struct gfs2_dinode *di;
  1082. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1083. if (error == -EROFS)
  1084. return 0;
  1085. if (error)
  1086. goto fail;
  1087. error = gfs2_meta_inode_buffer(ip, &dibh);
  1088. if (error)
  1089. goto fail_end_trans;
  1090. ip->i_inode.i_atime = tv;
  1091. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1092. di = (struct gfs2_dinode *)dibh->b_data;
  1093. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1094. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1095. brelse(dibh);
  1096. gfs2_trans_end(sdp);
  1097. }
  1098. /* If someone else has asked for the glock,
  1099. unlock and let them have it. Then reacquire
  1100. in the original state. */
  1101. if (gfs2_glock_is_blocking(gl)) {
  1102. gfs2_glock_dq(gh);
  1103. gfs2_holder_reinit(state, flags, gh);
  1104. return gfs2_glock_nq(gh);
  1105. }
  1106. }
  1107. return 0;
  1108. fail_end_trans:
  1109. gfs2_trans_end(sdp);
  1110. fail:
  1111. gfs2_glock_dq(gh);
  1112. return error;
  1113. }
  1114. static int
  1115. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1116. {
  1117. struct buffer_head *dibh;
  1118. int error;
  1119. error = gfs2_meta_inode_buffer(ip, &dibh);
  1120. if (!error) {
  1121. error = inode_setattr(&ip->i_inode, attr);
  1122. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1123. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1124. gfs2_dinode_out(ip, dibh->b_data);
  1125. brelse(dibh);
  1126. }
  1127. return error;
  1128. }
  1129. /**
  1130. * gfs2_setattr_simple -
  1131. * @ip:
  1132. * @attr:
  1133. *
  1134. * Called with a reference on the vnode.
  1135. *
  1136. * Returns: errno
  1137. */
  1138. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1139. {
  1140. int error;
  1141. if (current->journal_info)
  1142. return __gfs2_setattr_simple(ip, attr);
  1143. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1144. if (error)
  1145. return error;
  1146. error = __gfs2_setattr_simple(ip, attr);
  1147. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1148. return error;
  1149. }
  1150. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1151. {
  1152. const struct gfs2_dinode_host *di = &ip->i_di;
  1153. struct gfs2_dinode *str = buf;
  1154. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1155. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1156. str->di_header.__pad0 = 0;
  1157. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1158. str->di_header.__pad1 = 0;
  1159. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1160. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1161. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1162. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1163. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1164. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1165. str->di_size = cpu_to_be64(di->di_size);
  1166. str->di_blocks = cpu_to_be64(di->di_blocks);
  1167. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1168. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1169. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1170. str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
  1171. str->di_goal_data = cpu_to_be64(di->di_goal_data);
  1172. str->di_generation = cpu_to_be64(di->di_generation);
  1173. str->di_flags = cpu_to_be32(di->di_flags);
  1174. str->di_height = cpu_to_be16(ip->i_height);
  1175. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1176. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1177. GFS2_FORMAT_DE : 0);
  1178. str->di_depth = cpu_to_be16(ip->i_depth);
  1179. str->di_entries = cpu_to_be32(di->di_entries);
  1180. str->di_eattr = cpu_to_be64(di->di_eattr);
  1181. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1182. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1183. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1184. }
  1185. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1186. {
  1187. const struct gfs2_dinode_host *di = &ip->i_di;
  1188. printk(KERN_INFO " no_formal_ino = %llu\n",
  1189. (unsigned long long)ip->i_no_formal_ino);
  1190. printk(KERN_INFO " no_addr = %llu\n",
  1191. (unsigned long long)ip->i_no_addr);
  1192. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1193. printk(KERN_INFO " di_blocks = %llu\n",
  1194. (unsigned long long)di->di_blocks);
  1195. printk(KERN_INFO " di_goal_meta = %llu\n",
  1196. (unsigned long long)di->di_goal_meta);
  1197. printk(KERN_INFO " di_goal_data = %llu\n",
  1198. (unsigned long long)di->di_goal_data);
  1199. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1200. printk(KERN_INFO " i_height = %u\n", ip->i_height);
  1201. printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
  1202. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1203. printk(KERN_INFO " di_eattr = %llu\n",
  1204. (unsigned long long)di->di_eattr);
  1205. }