inode.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_file.h"
  33. #include "ops_inode.h"
  34. #include "quota.h"
  35. #include "rgrp.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. struct gfs2_inum_range_host {
  39. u64 ir_start;
  40. u64 ir_length;
  41. };
  42. static int iget_test(struct inode *inode, void *opaque)
  43. {
  44. struct gfs2_inode *ip = GFS2_I(inode);
  45. u64 *no_addr = opaque;
  46. if (ip->i_no_addr == *no_addr &&
  47. inode->i_private != NULL)
  48. return 1;
  49. return 0;
  50. }
  51. static int iget_set(struct inode *inode, void *opaque)
  52. {
  53. struct gfs2_inode *ip = GFS2_I(inode);
  54. u64 *no_addr = opaque;
  55. inode->i_ino = (unsigned long)*no_addr;
  56. ip->i_no_addr = *no_addr;
  57. return 0;
  58. }
  59. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  60. {
  61. unsigned long hash = (unsigned long)no_addr;
  62. return ilookup5(sb, hash, iget_test, &no_addr);
  63. }
  64. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  65. {
  66. unsigned long hash = (unsigned long)no_addr;
  67. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  68. }
  69. struct gfs2_skip_data {
  70. u64 no_addr;
  71. int skipped;
  72. };
  73. static int iget_skip_test(struct inode *inode, void *opaque)
  74. {
  75. struct gfs2_inode *ip = GFS2_I(inode);
  76. struct gfs2_skip_data *data = opaque;
  77. if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
  78. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  79. data->skipped = 1;
  80. return 0;
  81. }
  82. return 1;
  83. }
  84. return 0;
  85. }
  86. static int iget_skip_set(struct inode *inode, void *opaque)
  87. {
  88. struct gfs2_inode *ip = GFS2_I(inode);
  89. struct gfs2_skip_data *data = opaque;
  90. if (data->skipped)
  91. return 1;
  92. inode->i_ino = (unsigned long)(data->no_addr);
  93. ip->i_no_addr = data->no_addr;
  94. return 0;
  95. }
  96. static struct inode *gfs2_iget_skip(struct super_block *sb,
  97. u64 no_addr)
  98. {
  99. struct gfs2_skip_data data;
  100. unsigned long hash = (unsigned long)no_addr;
  101. data.no_addr = no_addr;
  102. data.skipped = 0;
  103. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  104. }
  105. /**
  106. * GFS2 lookup code fills in vfs inode contents based on info obtained
  107. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  108. * with NFS code path since its get_dentry routine doesn't have the relevant
  109. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  110. * segment inside gfs2_inode_lookup code needs to get moved around.
  111. *
  112. * Clean up I_LOCK and I_NEW as well.
  113. **/
  114. void gfs2_set_iop(struct inode *inode)
  115. {
  116. umode_t mode = inode->i_mode;
  117. if (S_ISREG(mode)) {
  118. inode->i_op = &gfs2_file_iops;
  119. inode->i_fop = &gfs2_file_fops;
  120. inode->i_mapping->a_ops = &gfs2_file_aops;
  121. } else if (S_ISDIR(mode)) {
  122. inode->i_op = &gfs2_dir_iops;
  123. inode->i_fop = &gfs2_dir_fops;
  124. } else if (S_ISLNK(mode)) {
  125. inode->i_op = &gfs2_symlink_iops;
  126. } else {
  127. inode->i_op = &gfs2_dev_iops;
  128. }
  129. unlock_new_inode(inode);
  130. }
  131. /**
  132. * gfs2_inode_lookup - Lookup an inode
  133. * @sb: The super block
  134. * @no_addr: The inode number
  135. * @type: The type of the inode
  136. * @skip_freeing: set this not return an inode if it is currently being freed.
  137. *
  138. * Returns: A VFS inode, or an error
  139. */
  140. struct inode *gfs2_inode_lookup(struct super_block *sb,
  141. unsigned int type,
  142. u64 no_addr,
  143. u64 no_formal_ino, int skip_freeing)
  144. {
  145. struct inode *inode;
  146. struct gfs2_inode *ip;
  147. struct gfs2_glock *io_gl;
  148. int error;
  149. if (skip_freeing)
  150. inode = gfs2_iget_skip(sb, no_addr);
  151. else
  152. inode = gfs2_iget(sb, no_addr);
  153. ip = GFS2_I(inode);
  154. if (!inode)
  155. return ERR_PTR(-ENOBUFS);
  156. if (inode->i_state & I_NEW) {
  157. struct gfs2_sbd *sdp = GFS2_SB(inode);
  158. inode->i_private = ip;
  159. ip->i_no_formal_ino = no_formal_ino;
  160. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  161. if (unlikely(error))
  162. goto fail;
  163. ip->i_gl->gl_object = ip;
  164. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  165. if (unlikely(error))
  166. goto fail_put;
  167. set_bit(GIF_INVALID, &ip->i_flags);
  168. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  169. if (unlikely(error))
  170. goto fail_iopen;
  171. ip->i_iopen_gh.gh_gl->gl_object = ip;
  172. gfs2_glock_put(io_gl);
  173. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  174. goto gfs2_nfsbypass;
  175. inode->i_mode = DT2IF(type);
  176. /*
  177. * We must read the inode in order to work out its type in
  178. * this case. Note that this doesn't happen often as we normally
  179. * know the type beforehand. This code path only occurs during
  180. * unlinked inode recovery (where it is safe to do this glock,
  181. * which is not true in the general case).
  182. */
  183. if (type == DT_UNKNOWN) {
  184. struct gfs2_holder gh;
  185. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  186. if (unlikely(error))
  187. goto fail_glock;
  188. /* Inode is now uptodate */
  189. gfs2_glock_dq_uninit(&gh);
  190. }
  191. gfs2_set_iop(inode);
  192. }
  193. gfs2_nfsbypass:
  194. return inode;
  195. fail_glock:
  196. gfs2_glock_dq(&ip->i_iopen_gh);
  197. fail_iopen:
  198. gfs2_glock_put(io_gl);
  199. fail_put:
  200. ip->i_gl->gl_object = NULL;
  201. gfs2_glock_put(ip->i_gl);
  202. fail:
  203. iput(inode);
  204. return ERR_PTR(error);
  205. }
  206. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  207. {
  208. struct gfs2_dinode_host *di = &ip->i_di;
  209. const struct gfs2_dinode *str = buf;
  210. if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
  211. if (gfs2_consist_inode(ip))
  212. gfs2_dinode_print(ip);
  213. return -EIO;
  214. }
  215. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  216. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  217. ip->i_inode.i_rdev = 0;
  218. switch (ip->i_inode.i_mode & S_IFMT) {
  219. case S_IFBLK:
  220. case S_IFCHR:
  221. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  222. be32_to_cpu(str->di_minor));
  223. break;
  224. };
  225. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  226. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  227. /*
  228. * We will need to review setting the nlink count here in the
  229. * light of the forthcoming ro bind mount work. This is a reminder
  230. * to do that.
  231. */
  232. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  233. di->di_size = be64_to_cpu(str->di_size);
  234. i_size_write(&ip->i_inode, di->di_size);
  235. di->di_blocks = be64_to_cpu(str->di_blocks);
  236. gfs2_set_inode_blocks(&ip->i_inode);
  237. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  238. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  239. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  240. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  241. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  242. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  243. di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
  244. di->di_goal_data = be64_to_cpu(str->di_goal_data);
  245. di->di_generation = be64_to_cpu(str->di_generation);
  246. di->di_flags = be32_to_cpu(str->di_flags);
  247. gfs2_set_inode_flags(&ip->i_inode);
  248. di->di_height = be16_to_cpu(str->di_height);
  249. di->di_depth = be16_to_cpu(str->di_depth);
  250. di->di_entries = be32_to_cpu(str->di_entries);
  251. di->di_eattr = be64_to_cpu(str->di_eattr);
  252. return 0;
  253. }
  254. static void gfs2_inode_bh(struct gfs2_inode *ip, struct buffer_head *bh)
  255. {
  256. ip->i_cache[0] = bh;
  257. }
  258. /**
  259. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  260. * @ip: The GFS2 inode
  261. *
  262. * Returns: errno
  263. */
  264. int gfs2_inode_refresh(struct gfs2_inode *ip)
  265. {
  266. struct buffer_head *dibh;
  267. int error;
  268. error = gfs2_meta_inode_buffer(ip, &dibh);
  269. if (error)
  270. return error;
  271. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  272. brelse(dibh);
  273. return -EIO;
  274. }
  275. error = gfs2_dinode_in(ip, dibh->b_data);
  276. brelse(dibh);
  277. clear_bit(GIF_INVALID, &ip->i_flags);
  278. return error;
  279. }
  280. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  281. {
  282. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  283. struct gfs2_alloc *al;
  284. struct gfs2_rgrpd *rgd;
  285. int error;
  286. if (ip->i_di.di_blocks != 1) {
  287. if (gfs2_consist_inode(ip))
  288. gfs2_dinode_print(ip);
  289. return -EIO;
  290. }
  291. al = gfs2_alloc_get(ip);
  292. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  293. if (error)
  294. goto out;
  295. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  296. if (error)
  297. goto out_qs;
  298. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  299. if (!rgd) {
  300. gfs2_consist_inode(ip);
  301. error = -EIO;
  302. goto out_rindex_relse;
  303. }
  304. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  305. &al->al_rgd_gh);
  306. if (error)
  307. goto out_rindex_relse;
  308. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  309. if (error)
  310. goto out_rg_gunlock;
  311. gfs2_trans_add_gl(ip->i_gl);
  312. gfs2_free_di(rgd, ip);
  313. gfs2_trans_end(sdp);
  314. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  315. out_rg_gunlock:
  316. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  317. out_rindex_relse:
  318. gfs2_glock_dq_uninit(&al->al_ri_gh);
  319. out_qs:
  320. gfs2_quota_unhold(ip);
  321. out:
  322. gfs2_alloc_put(ip);
  323. return error;
  324. }
  325. /**
  326. * gfs2_change_nlink - Change nlink count on inode
  327. * @ip: The GFS2 inode
  328. * @diff: The change in the nlink count required
  329. *
  330. * Returns: errno
  331. */
  332. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  333. {
  334. struct buffer_head *dibh;
  335. u32 nlink;
  336. int error;
  337. BUG_ON(diff != 1 && diff != -1);
  338. nlink = ip->i_inode.i_nlink + diff;
  339. /* If we are reducing the nlink count, but the new value ends up being
  340. bigger than the old one, we must have underflowed. */
  341. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  342. if (gfs2_consist_inode(ip))
  343. gfs2_dinode_print(ip);
  344. return -EIO;
  345. }
  346. error = gfs2_meta_inode_buffer(ip, &dibh);
  347. if (error)
  348. return error;
  349. if (diff > 0)
  350. inc_nlink(&ip->i_inode);
  351. else
  352. drop_nlink(&ip->i_inode);
  353. ip->i_inode.i_ctime = CURRENT_TIME;
  354. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  355. gfs2_dinode_out(ip, dibh->b_data);
  356. brelse(dibh);
  357. mark_inode_dirty(&ip->i_inode);
  358. if (ip->i_inode.i_nlink == 0)
  359. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  360. return error;
  361. }
  362. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  363. {
  364. struct qstr qstr;
  365. struct inode *inode;
  366. gfs2_str2qstr(&qstr, name);
  367. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  368. /* gfs2_lookupi has inconsistent callers: vfs
  369. * related routines expect NULL for no entry found,
  370. * gfs2_lookup_simple callers expect ENOENT
  371. * and do not check for NULL.
  372. */
  373. if (inode == NULL)
  374. return ERR_PTR(-ENOENT);
  375. else
  376. return inode;
  377. }
  378. /**
  379. * gfs2_lookupi - Look up a filename in a directory and return its inode
  380. * @d_gh: An initialized holder for the directory glock
  381. * @name: The name of the inode to look for
  382. * @is_root: If 1, ignore the caller's permissions
  383. * @i_gh: An uninitialized holder for the new inode glock
  384. *
  385. * This can be called via the VFS filldir function when NFS is doing
  386. * a readdirplus and the inode which its intending to stat isn't
  387. * already in cache. In this case we must not take the directory glock
  388. * again, since the readdir call will have already taken that lock.
  389. *
  390. * Returns: errno
  391. */
  392. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  393. int is_root, struct nameidata *nd)
  394. {
  395. struct super_block *sb = dir->i_sb;
  396. struct gfs2_inode *dip = GFS2_I(dir);
  397. struct gfs2_holder d_gh;
  398. int error = 0;
  399. struct inode *inode = NULL;
  400. int unlock = 0;
  401. if (!name->len || name->len > GFS2_FNAMESIZE)
  402. return ERR_PTR(-ENAMETOOLONG);
  403. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  404. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  405. dir == sb->s_root->d_inode)) {
  406. igrab(dir);
  407. return dir;
  408. }
  409. if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
  410. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  411. if (error)
  412. return ERR_PTR(error);
  413. unlock = 1;
  414. }
  415. if (!is_root) {
  416. error = permission(dir, MAY_EXEC, NULL);
  417. if (error)
  418. goto out;
  419. }
  420. inode = gfs2_dir_search(dir, name);
  421. if (IS_ERR(inode))
  422. error = PTR_ERR(inode);
  423. out:
  424. if (unlock)
  425. gfs2_glock_dq_uninit(&d_gh);
  426. if (error == -ENOENT)
  427. return NULL;
  428. return inode ? inode : ERR_PTR(error);
  429. }
  430. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  431. {
  432. const struct gfs2_inum_range *str = buf;
  433. ir->ir_start = be64_to_cpu(str->ir_start);
  434. ir->ir_length = be64_to_cpu(str->ir_length);
  435. }
  436. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  437. {
  438. struct gfs2_inum_range *str = buf;
  439. str->ir_start = cpu_to_be64(ir->ir_start);
  440. str->ir_length = cpu_to_be64(ir->ir_length);
  441. }
  442. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  443. {
  444. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  445. struct buffer_head *bh;
  446. struct gfs2_inum_range_host ir;
  447. int error;
  448. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  449. if (error)
  450. return error;
  451. mutex_lock(&sdp->sd_inum_mutex);
  452. error = gfs2_meta_inode_buffer(ip, &bh);
  453. if (error) {
  454. mutex_unlock(&sdp->sd_inum_mutex);
  455. gfs2_trans_end(sdp);
  456. return error;
  457. }
  458. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  459. if (ir.ir_length) {
  460. *formal_ino = ir.ir_start++;
  461. ir.ir_length--;
  462. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  463. gfs2_inum_range_out(&ir,
  464. bh->b_data + sizeof(struct gfs2_dinode));
  465. brelse(bh);
  466. mutex_unlock(&sdp->sd_inum_mutex);
  467. gfs2_trans_end(sdp);
  468. return 0;
  469. }
  470. brelse(bh);
  471. mutex_unlock(&sdp->sd_inum_mutex);
  472. gfs2_trans_end(sdp);
  473. return 1;
  474. }
  475. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  476. {
  477. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  478. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  479. struct gfs2_holder gh;
  480. struct buffer_head *bh;
  481. struct gfs2_inum_range_host ir;
  482. int error;
  483. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  484. if (error)
  485. return error;
  486. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  487. if (error)
  488. goto out;
  489. mutex_lock(&sdp->sd_inum_mutex);
  490. error = gfs2_meta_inode_buffer(ip, &bh);
  491. if (error)
  492. goto out_end_trans;
  493. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  494. if (!ir.ir_length) {
  495. struct buffer_head *m_bh;
  496. u64 x, y;
  497. __be64 z;
  498. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  499. if (error)
  500. goto out_brelse;
  501. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  502. x = y = be64_to_cpu(z);
  503. ir.ir_start = x;
  504. ir.ir_length = GFS2_INUM_QUANTUM;
  505. x += GFS2_INUM_QUANTUM;
  506. if (x < y)
  507. gfs2_consist_inode(m_ip);
  508. z = cpu_to_be64(x);
  509. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  510. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  511. brelse(m_bh);
  512. }
  513. *formal_ino = ir.ir_start++;
  514. ir.ir_length--;
  515. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  516. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  517. out_brelse:
  518. brelse(bh);
  519. out_end_trans:
  520. mutex_unlock(&sdp->sd_inum_mutex);
  521. gfs2_trans_end(sdp);
  522. out:
  523. gfs2_glock_dq_uninit(&gh);
  524. return error;
  525. }
  526. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  527. {
  528. int error;
  529. error = pick_formal_ino_1(sdp, inum);
  530. if (error <= 0)
  531. return error;
  532. error = pick_formal_ino_2(sdp, inum);
  533. return error;
  534. }
  535. /**
  536. * create_ok - OK to create a new on-disk inode here?
  537. * @dip: Directory in which dinode is to be created
  538. * @name: Name of new dinode
  539. * @mode:
  540. *
  541. * Returns: errno
  542. */
  543. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  544. unsigned int mode)
  545. {
  546. int error;
  547. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  548. if (error)
  549. return error;
  550. /* Don't create entries in an unlinked directory */
  551. if (!dip->i_inode.i_nlink)
  552. return -EPERM;
  553. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  554. switch (error) {
  555. case -ENOENT:
  556. error = 0;
  557. break;
  558. case 0:
  559. return -EEXIST;
  560. default:
  561. return error;
  562. }
  563. if (dip->i_di.di_entries == (u32)-1)
  564. return -EFBIG;
  565. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  566. return -EMLINK;
  567. return 0;
  568. }
  569. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  570. unsigned int *uid, unsigned int *gid)
  571. {
  572. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  573. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  574. if (S_ISDIR(*mode))
  575. *mode |= S_ISUID;
  576. else if (dip->i_inode.i_uid != current->fsuid)
  577. *mode &= ~07111;
  578. *uid = dip->i_inode.i_uid;
  579. } else
  580. *uid = current->fsuid;
  581. if (dip->i_inode.i_mode & S_ISGID) {
  582. if (S_ISDIR(*mode))
  583. *mode |= S_ISGID;
  584. *gid = dip->i_inode.i_gid;
  585. } else
  586. *gid = current->fsgid;
  587. }
  588. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  589. {
  590. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  591. int error;
  592. gfs2_alloc_get(dip);
  593. dip->i_alloc.al_requested = RES_DINODE;
  594. error = gfs2_inplace_reserve(dip);
  595. if (error)
  596. goto out;
  597. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  598. if (error)
  599. goto out_ipreserv;
  600. *no_addr = gfs2_alloc_di(dip, generation);
  601. gfs2_trans_end(sdp);
  602. out_ipreserv:
  603. gfs2_inplace_release(dip);
  604. out:
  605. gfs2_alloc_put(dip);
  606. return error;
  607. }
  608. /**
  609. * init_dinode - Fill in a new dinode structure
  610. * @dip: the directory this inode is being created in
  611. * @gl: The glock covering the new inode
  612. * @inum: the inode number
  613. * @mode: the file permissions
  614. * @uid:
  615. * @gid:
  616. *
  617. */
  618. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  619. const struct gfs2_inum_host *inum, unsigned int mode,
  620. unsigned int uid, unsigned int gid,
  621. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  622. {
  623. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  624. struct gfs2_dinode *di;
  625. struct buffer_head *dibh;
  626. struct timespec tv = CURRENT_TIME;
  627. dibh = gfs2_meta_new(gl, inum->no_addr);
  628. gfs2_trans_add_bh(gl, dibh, 1);
  629. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  630. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  631. di = (struct gfs2_dinode *)dibh->b_data;
  632. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  633. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  634. di->di_mode = cpu_to_be32(mode);
  635. di->di_uid = cpu_to_be32(uid);
  636. di->di_gid = cpu_to_be32(gid);
  637. di->di_nlink = 0;
  638. di->di_size = 0;
  639. di->di_blocks = cpu_to_be64(1);
  640. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  641. di->di_major = cpu_to_be32(MAJOR(dev));
  642. di->di_minor = cpu_to_be32(MINOR(dev));
  643. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  644. di->di_generation = cpu_to_be64(*generation);
  645. di->di_flags = 0;
  646. if (S_ISREG(mode)) {
  647. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  648. gfs2_tune_get(sdp, gt_new_files_jdata))
  649. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  650. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  651. gfs2_tune_get(sdp, gt_new_files_directio))
  652. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  653. } else if (S_ISDIR(mode)) {
  654. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  655. GFS2_DIF_INHERIT_DIRECTIO);
  656. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  657. GFS2_DIF_INHERIT_JDATA);
  658. }
  659. di->__pad1 = 0;
  660. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  661. di->di_height = 0;
  662. di->__pad2 = 0;
  663. di->__pad3 = 0;
  664. di->di_depth = 0;
  665. di->di_entries = 0;
  666. memset(&di->__pad4, 0, sizeof(di->__pad4));
  667. di->di_eattr = 0;
  668. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  669. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  670. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  671. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  672. set_buffer_uptodate(dibh);
  673. *bhp = dibh;
  674. }
  675. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  676. unsigned int mode, const struct gfs2_inum_host *inum,
  677. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  678. {
  679. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  680. unsigned int uid, gid;
  681. int error;
  682. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  683. gfs2_alloc_get(dip);
  684. error = gfs2_quota_lock(dip, uid, gid);
  685. if (error)
  686. goto out;
  687. error = gfs2_quota_check(dip, uid, gid);
  688. if (error)
  689. goto out_quota;
  690. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  691. if (error)
  692. goto out_quota;
  693. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  694. gfs2_quota_change(dip, +1, uid, gid);
  695. gfs2_trans_end(sdp);
  696. out_quota:
  697. gfs2_quota_unlock(dip);
  698. out:
  699. gfs2_alloc_put(dip);
  700. return error;
  701. }
  702. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  703. struct gfs2_inode *ip)
  704. {
  705. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  706. struct gfs2_alloc *al;
  707. int alloc_required;
  708. struct buffer_head *dibh;
  709. int error;
  710. al = gfs2_alloc_get(dip);
  711. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  712. if (error)
  713. goto fail;
  714. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  715. if (alloc_required < 0)
  716. goto fail;
  717. if (alloc_required) {
  718. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  719. if (error)
  720. goto fail_quota_locks;
  721. al->al_requested = sdp->sd_max_dirres;
  722. error = gfs2_inplace_reserve(dip);
  723. if (error)
  724. goto fail_quota_locks;
  725. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  726. al->al_rgd->rd_length +
  727. 2 * RES_DINODE +
  728. RES_STATFS + RES_QUOTA, 0);
  729. if (error)
  730. goto fail_ipreserv;
  731. } else {
  732. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  733. if (error)
  734. goto fail_quota_locks;
  735. }
  736. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  737. if (error)
  738. goto fail_end_trans;
  739. error = gfs2_meta_inode_buffer(ip, &dibh);
  740. if (error)
  741. goto fail_end_trans;
  742. ip->i_inode.i_nlink = 1;
  743. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  744. gfs2_dinode_out(ip, dibh->b_data);
  745. brelse(dibh);
  746. return 0;
  747. fail_end_trans:
  748. gfs2_trans_end(sdp);
  749. fail_ipreserv:
  750. if (dip->i_alloc.al_rgd)
  751. gfs2_inplace_release(dip);
  752. fail_quota_locks:
  753. gfs2_quota_unlock(dip);
  754. fail:
  755. gfs2_alloc_put(dip);
  756. return error;
  757. }
  758. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  759. {
  760. int err;
  761. size_t len;
  762. void *value;
  763. char *name;
  764. struct gfs2_ea_request er;
  765. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  766. &name, &value, &len);
  767. if (err) {
  768. if (err == -EOPNOTSUPP)
  769. return 0;
  770. return err;
  771. }
  772. memset(&er, 0, sizeof(struct gfs2_ea_request));
  773. er.er_type = GFS2_EATYPE_SECURITY;
  774. er.er_name = name;
  775. er.er_data = value;
  776. er.er_name_len = strlen(name);
  777. er.er_data_len = len;
  778. err = gfs2_ea_set_i(ip, &er);
  779. kfree(value);
  780. kfree(name);
  781. return err;
  782. }
  783. /**
  784. * gfs2_createi - Create a new inode
  785. * @ghs: An array of two holders
  786. * @name: The name of the new file
  787. * @mode: the permissions on the new inode
  788. *
  789. * @ghs[0] is an initialized holder for the directory
  790. * @ghs[1] is the holder for the inode lock
  791. *
  792. * If the return value is not NULL, the glocks on both the directory and the new
  793. * file are held. A transaction has been started and an inplace reservation
  794. * is held, as well.
  795. *
  796. * Returns: An inode
  797. */
  798. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  799. unsigned int mode, dev_t dev)
  800. {
  801. struct inode *inode = NULL;
  802. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  803. struct inode *dir = &dip->i_inode;
  804. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  805. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  806. int error;
  807. u64 generation;
  808. struct buffer_head *bh=NULL;
  809. if (!name->len || name->len > GFS2_FNAMESIZE)
  810. return ERR_PTR(-ENAMETOOLONG);
  811. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  812. error = gfs2_glock_nq(ghs);
  813. if (error)
  814. goto fail;
  815. error = create_ok(dip, name, mode);
  816. if (error)
  817. goto fail_gunlock;
  818. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  819. if (error)
  820. goto fail_gunlock;
  821. error = alloc_dinode(dip, &inum.no_addr, &generation);
  822. if (error)
  823. goto fail_gunlock;
  824. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  825. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  826. if (error)
  827. goto fail_gunlock;
  828. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  829. if (error)
  830. goto fail_gunlock2;
  831. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  832. inum.no_addr,
  833. inum.no_formal_ino, 0);
  834. if (IS_ERR(inode))
  835. goto fail_gunlock2;
  836. gfs2_inode_bh(GFS2_I(inode), bh);
  837. error = gfs2_inode_refresh(GFS2_I(inode));
  838. if (error)
  839. goto fail_gunlock2;
  840. error = gfs2_acl_create(dip, GFS2_I(inode));
  841. if (error)
  842. goto fail_gunlock2;
  843. error = gfs2_security_init(dip, GFS2_I(inode));
  844. if (error)
  845. goto fail_gunlock2;
  846. error = link_dinode(dip, name, GFS2_I(inode));
  847. if (error)
  848. goto fail_gunlock2;
  849. if (!inode)
  850. return ERR_PTR(-ENOMEM);
  851. return inode;
  852. fail_gunlock2:
  853. gfs2_glock_dq_uninit(ghs + 1);
  854. if (inode)
  855. iput(inode);
  856. fail_gunlock:
  857. gfs2_glock_dq(ghs);
  858. fail:
  859. return ERR_PTR(error);
  860. }
  861. /**
  862. * gfs2_rmdiri - Remove a directory
  863. * @dip: The parent directory of the directory to be removed
  864. * @name: The name of the directory to be removed
  865. * @ip: The GFS2 inode of the directory to be removed
  866. *
  867. * Assumes Glocks on dip and ip are held
  868. *
  869. * Returns: errno
  870. */
  871. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  872. struct gfs2_inode *ip)
  873. {
  874. struct qstr dotname;
  875. int error;
  876. if (ip->i_di.di_entries != 2) {
  877. if (gfs2_consist_inode(ip))
  878. gfs2_dinode_print(ip);
  879. return -EIO;
  880. }
  881. error = gfs2_dir_del(dip, name);
  882. if (error)
  883. return error;
  884. error = gfs2_change_nlink(dip, -1);
  885. if (error)
  886. return error;
  887. gfs2_str2qstr(&dotname, ".");
  888. error = gfs2_dir_del(ip, &dotname);
  889. if (error)
  890. return error;
  891. gfs2_str2qstr(&dotname, "..");
  892. error = gfs2_dir_del(ip, &dotname);
  893. if (error)
  894. return error;
  895. /* It looks odd, but it really should be done twice */
  896. error = gfs2_change_nlink(ip, -1);
  897. if (error)
  898. return error;
  899. error = gfs2_change_nlink(ip, -1);
  900. if (error)
  901. return error;
  902. return error;
  903. }
  904. /*
  905. * gfs2_unlink_ok - check to see that a inode is still in a directory
  906. * @dip: the directory
  907. * @name: the name of the file
  908. * @ip: the inode
  909. *
  910. * Assumes that the lock on (at least) @dip is held.
  911. *
  912. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  913. */
  914. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  915. const struct gfs2_inode *ip)
  916. {
  917. int error;
  918. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  919. return -EPERM;
  920. if ((dip->i_inode.i_mode & S_ISVTX) &&
  921. dip->i_inode.i_uid != current->fsuid &&
  922. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  923. return -EPERM;
  924. if (IS_APPEND(&dip->i_inode))
  925. return -EPERM;
  926. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  927. if (error)
  928. return error;
  929. error = gfs2_dir_check(&dip->i_inode, name, ip);
  930. if (error)
  931. return error;
  932. return 0;
  933. }
  934. /*
  935. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  936. * @this: move this
  937. * @to: to here
  938. *
  939. * Follow @to back to the root and make sure we don't encounter @this
  940. * Assumes we already hold the rename lock.
  941. *
  942. * Returns: errno
  943. */
  944. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  945. {
  946. struct inode *dir = &to->i_inode;
  947. struct super_block *sb = dir->i_sb;
  948. struct inode *tmp;
  949. struct qstr dotdot;
  950. int error = 0;
  951. gfs2_str2qstr(&dotdot, "..");
  952. igrab(dir);
  953. for (;;) {
  954. if (dir == &this->i_inode) {
  955. error = -EINVAL;
  956. break;
  957. }
  958. if (dir == sb->s_root->d_inode) {
  959. error = 0;
  960. break;
  961. }
  962. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  963. if (IS_ERR(tmp)) {
  964. error = PTR_ERR(tmp);
  965. break;
  966. }
  967. iput(dir);
  968. dir = tmp;
  969. }
  970. iput(dir);
  971. return error;
  972. }
  973. /**
  974. * gfs2_readlinki - return the contents of a symlink
  975. * @ip: the symlink's inode
  976. * @buf: a pointer to the buffer to be filled
  977. * @len: a pointer to the length of @buf
  978. *
  979. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  980. * to be freed by the caller.
  981. *
  982. * Returns: errno
  983. */
  984. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  985. {
  986. struct gfs2_holder i_gh;
  987. struct buffer_head *dibh;
  988. unsigned int x;
  989. int error;
  990. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  991. error = gfs2_glock_nq_atime(&i_gh);
  992. if (error) {
  993. gfs2_holder_uninit(&i_gh);
  994. return error;
  995. }
  996. if (!ip->i_di.di_size) {
  997. gfs2_consist_inode(ip);
  998. error = -EIO;
  999. goto out;
  1000. }
  1001. error = gfs2_meta_inode_buffer(ip, &dibh);
  1002. if (error)
  1003. goto out;
  1004. x = ip->i_di.di_size + 1;
  1005. if (x > *len) {
  1006. *buf = kmalloc(x, GFP_KERNEL);
  1007. if (!*buf) {
  1008. error = -ENOMEM;
  1009. goto out_brelse;
  1010. }
  1011. }
  1012. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  1013. *len = x;
  1014. out_brelse:
  1015. brelse(dibh);
  1016. out:
  1017. gfs2_glock_dq_uninit(&i_gh);
  1018. return error;
  1019. }
  1020. /**
  1021. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  1022. * conditionally update the inode's atime
  1023. * @gh: the holder to acquire
  1024. *
  1025. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  1026. * Update if the difference between the current time and the inode's current
  1027. * atime is greater than an interval specified at mount.
  1028. *
  1029. * Returns: errno
  1030. */
  1031. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  1032. {
  1033. struct gfs2_glock *gl = gh->gh_gl;
  1034. struct gfs2_sbd *sdp = gl->gl_sbd;
  1035. struct gfs2_inode *ip = gl->gl_object;
  1036. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  1037. unsigned int state;
  1038. int flags;
  1039. int error;
  1040. struct timespec tv = CURRENT_TIME;
  1041. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  1042. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1043. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1044. return -EINVAL;
  1045. state = gh->gh_state;
  1046. flags = gh->gh_flags;
  1047. error = gfs2_glock_nq(gh);
  1048. if (error)
  1049. return error;
  1050. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1051. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1052. return 0;
  1053. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1054. gfs2_glock_dq(gh);
  1055. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1056. gh);
  1057. error = gfs2_glock_nq(gh);
  1058. if (error)
  1059. return error;
  1060. /* Verify that atime hasn't been updated while we were
  1061. trying to get exclusive lock. */
  1062. tv = CURRENT_TIME;
  1063. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1064. struct buffer_head *dibh;
  1065. struct gfs2_dinode *di;
  1066. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1067. if (error == -EROFS)
  1068. return 0;
  1069. if (error)
  1070. goto fail;
  1071. error = gfs2_meta_inode_buffer(ip, &dibh);
  1072. if (error)
  1073. goto fail_end_trans;
  1074. ip->i_inode.i_atime = tv;
  1075. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1076. di = (struct gfs2_dinode *)dibh->b_data;
  1077. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1078. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1079. brelse(dibh);
  1080. gfs2_trans_end(sdp);
  1081. }
  1082. /* If someone else has asked for the glock,
  1083. unlock and let them have it. Then reacquire
  1084. in the original state. */
  1085. if (gfs2_glock_is_blocking(gl)) {
  1086. gfs2_glock_dq(gh);
  1087. gfs2_holder_reinit(state, flags, gh);
  1088. return gfs2_glock_nq(gh);
  1089. }
  1090. }
  1091. return 0;
  1092. fail_end_trans:
  1093. gfs2_trans_end(sdp);
  1094. fail:
  1095. gfs2_glock_dq(gh);
  1096. return error;
  1097. }
  1098. static int
  1099. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1100. {
  1101. struct buffer_head *dibh;
  1102. int error;
  1103. error = gfs2_meta_inode_buffer(ip, &dibh);
  1104. if (!error) {
  1105. error = inode_setattr(&ip->i_inode, attr);
  1106. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1107. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1108. gfs2_dinode_out(ip, dibh->b_data);
  1109. brelse(dibh);
  1110. }
  1111. return error;
  1112. }
  1113. /**
  1114. * gfs2_setattr_simple -
  1115. * @ip:
  1116. * @attr:
  1117. *
  1118. * Called with a reference on the vnode.
  1119. *
  1120. * Returns: errno
  1121. */
  1122. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1123. {
  1124. int error;
  1125. if (current->journal_info)
  1126. return __gfs2_setattr_simple(ip, attr);
  1127. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1128. if (error)
  1129. return error;
  1130. error = __gfs2_setattr_simple(ip, attr);
  1131. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1132. return error;
  1133. }
  1134. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1135. {
  1136. const struct gfs2_dinode_host *di = &ip->i_di;
  1137. struct gfs2_dinode *str = buf;
  1138. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1139. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1140. str->di_header.__pad0 = 0;
  1141. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1142. str->di_header.__pad1 = 0;
  1143. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1144. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1145. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1146. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1147. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1148. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1149. str->di_size = cpu_to_be64(di->di_size);
  1150. str->di_blocks = cpu_to_be64(di->di_blocks);
  1151. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1152. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1153. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1154. str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
  1155. str->di_goal_data = cpu_to_be64(di->di_goal_data);
  1156. str->di_generation = cpu_to_be64(di->di_generation);
  1157. str->di_flags = cpu_to_be32(di->di_flags);
  1158. str->di_height = cpu_to_be16(di->di_height);
  1159. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1160. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1161. GFS2_FORMAT_DE : 0);
  1162. str->di_depth = cpu_to_be16(di->di_depth);
  1163. str->di_entries = cpu_to_be32(di->di_entries);
  1164. str->di_eattr = cpu_to_be64(di->di_eattr);
  1165. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1166. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1167. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1168. }
  1169. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1170. {
  1171. const struct gfs2_dinode_host *di = &ip->i_di;
  1172. printk(KERN_INFO " no_formal_ino = %llu\n",
  1173. (unsigned long long)ip->i_no_formal_ino);
  1174. printk(KERN_INFO " no_addr = %llu\n",
  1175. (unsigned long long)ip->i_no_addr);
  1176. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1177. printk(KERN_INFO " di_blocks = %llu\n",
  1178. (unsigned long long)di->di_blocks);
  1179. printk(KERN_INFO " di_goal_meta = %llu\n",
  1180. (unsigned long long)di->di_goal_meta);
  1181. printk(KERN_INFO " di_goal_data = %llu\n",
  1182. (unsigned long long)di->di_goal_data);
  1183. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1184. printk(KERN_INFO " di_height = %u\n", di->di_height);
  1185. printk(KERN_INFO " di_depth = %u\n", di->di_depth);
  1186. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1187. printk(KERN_INFO " di_eattr = %llu\n",
  1188. (unsigned long long)di->di_eattr);
  1189. }