inode.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_file.h"
  33. #include "ops_inode.h"
  34. #include "quota.h"
  35. #include "rgrp.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. struct gfs2_inum_range_host {
  39. u64 ir_start;
  40. u64 ir_length;
  41. };
  42. static int iget_test(struct inode *inode, void *opaque)
  43. {
  44. struct gfs2_inode *ip = GFS2_I(inode);
  45. u64 *no_addr = opaque;
  46. if (ip->i_no_addr == *no_addr &&
  47. inode->i_private != NULL)
  48. return 1;
  49. return 0;
  50. }
  51. static int iget_set(struct inode *inode, void *opaque)
  52. {
  53. struct gfs2_inode *ip = GFS2_I(inode);
  54. u64 *no_addr = opaque;
  55. inode->i_ino = (unsigned long)*no_addr;
  56. ip->i_no_addr = *no_addr;
  57. return 0;
  58. }
  59. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  60. {
  61. unsigned long hash = (unsigned long)no_addr;
  62. return ilookup5(sb, hash, iget_test, &no_addr);
  63. }
  64. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  65. {
  66. unsigned long hash = (unsigned long)no_addr;
  67. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  68. }
  69. /**
  70. * GFS2 lookup code fills in vfs inode contents based on info obtained
  71. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  72. * with NFS code path since its get_dentry routine doesn't have the relevant
  73. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  74. * segment inside gfs2_inode_lookup code needs to get moved around.
  75. *
  76. * Clean up I_LOCK and I_NEW as well.
  77. **/
  78. void gfs2_set_iop(struct inode *inode)
  79. {
  80. umode_t mode = inode->i_mode;
  81. if (S_ISREG(mode)) {
  82. inode->i_op = &gfs2_file_iops;
  83. inode->i_fop = &gfs2_file_fops;
  84. inode->i_mapping->a_ops = &gfs2_file_aops;
  85. } else if (S_ISDIR(mode)) {
  86. inode->i_op = &gfs2_dir_iops;
  87. inode->i_fop = &gfs2_dir_fops;
  88. } else if (S_ISLNK(mode)) {
  89. inode->i_op = &gfs2_symlink_iops;
  90. } else {
  91. inode->i_op = &gfs2_dev_iops;
  92. }
  93. unlock_new_inode(inode);
  94. }
  95. /**
  96. * gfs2_inode_lookup - Lookup an inode
  97. * @sb: The super block
  98. * @no_addr: The inode number
  99. * @type: The type of the inode
  100. *
  101. * Returns: A VFS inode, or an error
  102. */
  103. struct inode *gfs2_inode_lookup(struct super_block *sb,
  104. unsigned int type,
  105. u64 no_addr,
  106. u64 no_formal_ino)
  107. {
  108. struct inode *inode = gfs2_iget(sb, no_addr);
  109. struct gfs2_inode *ip = GFS2_I(inode);
  110. struct gfs2_glock *io_gl;
  111. int error;
  112. if (!inode)
  113. return ERR_PTR(-ENOBUFS);
  114. if (inode->i_state & I_NEW) {
  115. struct gfs2_sbd *sdp = GFS2_SB(inode);
  116. inode->i_private = ip;
  117. ip->i_no_formal_ino = no_formal_ino;
  118. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  119. if (unlikely(error))
  120. goto fail;
  121. ip->i_gl->gl_object = ip;
  122. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  123. if (unlikely(error))
  124. goto fail_put;
  125. set_bit(GIF_INVALID, &ip->i_flags);
  126. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  127. if (unlikely(error))
  128. goto fail_iopen;
  129. ip->i_iopen_gh.gh_gl->gl_object = ip;
  130. gfs2_glock_put(io_gl);
  131. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  132. goto gfs2_nfsbypass;
  133. inode->i_mode = DT2IF(type);
  134. /*
  135. * We must read the inode in order to work out its type in
  136. * this case. Note that this doesn't happen often as we normally
  137. * know the type beforehand. This code path only occurs during
  138. * unlinked inode recovery (where it is safe to do this glock,
  139. * which is not true in the general case).
  140. */
  141. if (type == DT_UNKNOWN) {
  142. struct gfs2_holder gh;
  143. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  144. if (unlikely(error))
  145. goto fail_glock;
  146. /* Inode is now uptodate */
  147. gfs2_glock_dq_uninit(&gh);
  148. }
  149. gfs2_set_iop(inode);
  150. }
  151. gfs2_nfsbypass:
  152. return inode;
  153. fail_glock:
  154. gfs2_glock_dq(&ip->i_iopen_gh);
  155. fail_iopen:
  156. gfs2_glock_put(io_gl);
  157. fail_put:
  158. ip->i_gl->gl_object = NULL;
  159. gfs2_glock_put(ip->i_gl);
  160. fail:
  161. iput(inode);
  162. return ERR_PTR(error);
  163. }
  164. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  165. {
  166. struct gfs2_dinode_host *di = &ip->i_di;
  167. const struct gfs2_dinode *str = buf;
  168. if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
  169. if (gfs2_consist_inode(ip))
  170. gfs2_dinode_print(ip);
  171. return -EIO;
  172. }
  173. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  174. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  175. ip->i_inode.i_rdev = 0;
  176. switch (ip->i_inode.i_mode & S_IFMT) {
  177. case S_IFBLK:
  178. case S_IFCHR:
  179. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  180. be32_to_cpu(str->di_minor));
  181. break;
  182. };
  183. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  184. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  185. /*
  186. * We will need to review setting the nlink count here in the
  187. * light of the forthcoming ro bind mount work. This is a reminder
  188. * to do that.
  189. */
  190. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  191. di->di_size = be64_to_cpu(str->di_size);
  192. i_size_write(&ip->i_inode, di->di_size);
  193. di->di_blocks = be64_to_cpu(str->di_blocks);
  194. gfs2_set_inode_blocks(&ip->i_inode);
  195. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  196. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  197. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  198. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  199. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  200. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  201. di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
  202. di->di_goal_data = be64_to_cpu(str->di_goal_data);
  203. di->di_generation = be64_to_cpu(str->di_generation);
  204. di->di_flags = be32_to_cpu(str->di_flags);
  205. gfs2_set_inode_flags(&ip->i_inode);
  206. di->di_height = be16_to_cpu(str->di_height);
  207. di->di_depth = be16_to_cpu(str->di_depth);
  208. di->di_entries = be32_to_cpu(str->di_entries);
  209. di->di_eattr = be64_to_cpu(str->di_eattr);
  210. return 0;
  211. }
  212. /**
  213. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  214. * @ip: The GFS2 inode
  215. *
  216. * Returns: errno
  217. */
  218. int gfs2_inode_refresh(struct gfs2_inode *ip)
  219. {
  220. struct buffer_head *dibh;
  221. int error;
  222. error = gfs2_meta_inode_buffer(ip, &dibh);
  223. if (error)
  224. return error;
  225. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  226. brelse(dibh);
  227. return -EIO;
  228. }
  229. error = gfs2_dinode_in(ip, dibh->b_data);
  230. brelse(dibh);
  231. clear_bit(GIF_INVALID, &ip->i_flags);
  232. return error;
  233. }
  234. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  235. {
  236. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  237. struct gfs2_alloc *al;
  238. struct gfs2_rgrpd *rgd;
  239. int error;
  240. if (ip->i_di.di_blocks != 1) {
  241. if (gfs2_consist_inode(ip))
  242. gfs2_dinode_print(ip);
  243. return -EIO;
  244. }
  245. al = gfs2_alloc_get(ip);
  246. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  247. if (error)
  248. goto out;
  249. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  250. if (error)
  251. goto out_qs;
  252. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  253. if (!rgd) {
  254. gfs2_consist_inode(ip);
  255. error = -EIO;
  256. goto out_rindex_relse;
  257. }
  258. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  259. &al->al_rgd_gh);
  260. if (error)
  261. goto out_rindex_relse;
  262. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  263. if (error)
  264. goto out_rg_gunlock;
  265. gfs2_trans_add_gl(ip->i_gl);
  266. gfs2_free_di(rgd, ip);
  267. gfs2_trans_end(sdp);
  268. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  269. out_rg_gunlock:
  270. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  271. out_rindex_relse:
  272. gfs2_glock_dq_uninit(&al->al_ri_gh);
  273. out_qs:
  274. gfs2_quota_unhold(ip);
  275. out:
  276. gfs2_alloc_put(ip);
  277. return error;
  278. }
  279. /**
  280. * gfs2_change_nlink - Change nlink count on inode
  281. * @ip: The GFS2 inode
  282. * @diff: The change in the nlink count required
  283. *
  284. * Returns: errno
  285. */
  286. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  287. {
  288. struct buffer_head *dibh;
  289. u32 nlink;
  290. int error;
  291. BUG_ON(diff != 1 && diff != -1);
  292. nlink = ip->i_inode.i_nlink + diff;
  293. /* If we are reducing the nlink count, but the new value ends up being
  294. bigger than the old one, we must have underflowed. */
  295. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  296. if (gfs2_consist_inode(ip))
  297. gfs2_dinode_print(ip);
  298. return -EIO;
  299. }
  300. error = gfs2_meta_inode_buffer(ip, &dibh);
  301. if (error)
  302. return error;
  303. if (diff > 0)
  304. inc_nlink(&ip->i_inode);
  305. else
  306. drop_nlink(&ip->i_inode);
  307. ip->i_inode.i_ctime = CURRENT_TIME;
  308. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  309. gfs2_dinode_out(ip, dibh->b_data);
  310. brelse(dibh);
  311. mark_inode_dirty(&ip->i_inode);
  312. if (ip->i_inode.i_nlink == 0)
  313. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  314. return error;
  315. }
  316. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  317. {
  318. struct qstr qstr;
  319. struct inode *inode;
  320. gfs2_str2qstr(&qstr, name);
  321. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  322. /* gfs2_lookupi has inconsistent callers: vfs
  323. * related routines expect NULL for no entry found,
  324. * gfs2_lookup_simple callers expect ENOENT
  325. * and do not check for NULL.
  326. */
  327. if (inode == NULL)
  328. return ERR_PTR(-ENOENT);
  329. else
  330. return inode;
  331. }
  332. /**
  333. * gfs2_lookupi - Look up a filename in a directory and return its inode
  334. * @d_gh: An initialized holder for the directory glock
  335. * @name: The name of the inode to look for
  336. * @is_root: If 1, ignore the caller's permissions
  337. * @i_gh: An uninitialized holder for the new inode glock
  338. *
  339. * This can be called via the VFS filldir function when NFS is doing
  340. * a readdirplus and the inode which its intending to stat isn't
  341. * already in cache. In this case we must not take the directory glock
  342. * again, since the readdir call will have already taken that lock.
  343. *
  344. * Returns: errno
  345. */
  346. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  347. int is_root, struct nameidata *nd)
  348. {
  349. struct super_block *sb = dir->i_sb;
  350. struct gfs2_inode *dip = GFS2_I(dir);
  351. struct gfs2_holder d_gh;
  352. int error = 0;
  353. struct inode *inode = NULL;
  354. int unlock = 0;
  355. if (!name->len || name->len > GFS2_FNAMESIZE)
  356. return ERR_PTR(-ENAMETOOLONG);
  357. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  358. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  359. dir == sb->s_root->d_inode)) {
  360. igrab(dir);
  361. return dir;
  362. }
  363. if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
  364. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  365. if (error)
  366. return ERR_PTR(error);
  367. unlock = 1;
  368. }
  369. if (!is_root) {
  370. error = permission(dir, MAY_EXEC, NULL);
  371. if (error)
  372. goto out;
  373. }
  374. inode = gfs2_dir_search(dir, name);
  375. if (IS_ERR(inode))
  376. error = PTR_ERR(inode);
  377. out:
  378. if (unlock)
  379. gfs2_glock_dq_uninit(&d_gh);
  380. if (error == -ENOENT)
  381. return NULL;
  382. return inode ? inode : ERR_PTR(error);
  383. }
  384. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  385. {
  386. const struct gfs2_inum_range *str = buf;
  387. ir->ir_start = be64_to_cpu(str->ir_start);
  388. ir->ir_length = be64_to_cpu(str->ir_length);
  389. }
  390. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  391. {
  392. struct gfs2_inum_range *str = buf;
  393. str->ir_start = cpu_to_be64(ir->ir_start);
  394. str->ir_length = cpu_to_be64(ir->ir_length);
  395. }
  396. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  397. {
  398. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  399. struct buffer_head *bh;
  400. struct gfs2_inum_range_host ir;
  401. int error;
  402. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  403. if (error)
  404. return error;
  405. mutex_lock(&sdp->sd_inum_mutex);
  406. error = gfs2_meta_inode_buffer(ip, &bh);
  407. if (error) {
  408. mutex_unlock(&sdp->sd_inum_mutex);
  409. gfs2_trans_end(sdp);
  410. return error;
  411. }
  412. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  413. if (ir.ir_length) {
  414. *formal_ino = ir.ir_start++;
  415. ir.ir_length--;
  416. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  417. gfs2_inum_range_out(&ir,
  418. bh->b_data + sizeof(struct gfs2_dinode));
  419. brelse(bh);
  420. mutex_unlock(&sdp->sd_inum_mutex);
  421. gfs2_trans_end(sdp);
  422. return 0;
  423. }
  424. brelse(bh);
  425. mutex_unlock(&sdp->sd_inum_mutex);
  426. gfs2_trans_end(sdp);
  427. return 1;
  428. }
  429. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  430. {
  431. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  432. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  433. struct gfs2_holder gh;
  434. struct buffer_head *bh;
  435. struct gfs2_inum_range_host ir;
  436. int error;
  437. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  438. if (error)
  439. return error;
  440. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  441. if (error)
  442. goto out;
  443. mutex_lock(&sdp->sd_inum_mutex);
  444. error = gfs2_meta_inode_buffer(ip, &bh);
  445. if (error)
  446. goto out_end_trans;
  447. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  448. if (!ir.ir_length) {
  449. struct buffer_head *m_bh;
  450. u64 x, y;
  451. __be64 z;
  452. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  453. if (error)
  454. goto out_brelse;
  455. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  456. x = y = be64_to_cpu(z);
  457. ir.ir_start = x;
  458. ir.ir_length = GFS2_INUM_QUANTUM;
  459. x += GFS2_INUM_QUANTUM;
  460. if (x < y)
  461. gfs2_consist_inode(m_ip);
  462. z = cpu_to_be64(x);
  463. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  464. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  465. brelse(m_bh);
  466. }
  467. *formal_ino = ir.ir_start++;
  468. ir.ir_length--;
  469. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  470. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  471. out_brelse:
  472. brelse(bh);
  473. out_end_trans:
  474. mutex_unlock(&sdp->sd_inum_mutex);
  475. gfs2_trans_end(sdp);
  476. out:
  477. gfs2_glock_dq_uninit(&gh);
  478. return error;
  479. }
  480. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  481. {
  482. int error;
  483. error = pick_formal_ino_1(sdp, inum);
  484. if (error <= 0)
  485. return error;
  486. error = pick_formal_ino_2(sdp, inum);
  487. return error;
  488. }
  489. /**
  490. * create_ok - OK to create a new on-disk inode here?
  491. * @dip: Directory in which dinode is to be created
  492. * @name: Name of new dinode
  493. * @mode:
  494. *
  495. * Returns: errno
  496. */
  497. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  498. unsigned int mode)
  499. {
  500. int error;
  501. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  502. if (error)
  503. return error;
  504. /* Don't create entries in an unlinked directory */
  505. if (!dip->i_inode.i_nlink)
  506. return -EPERM;
  507. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  508. switch (error) {
  509. case -ENOENT:
  510. error = 0;
  511. break;
  512. case 0:
  513. return -EEXIST;
  514. default:
  515. return error;
  516. }
  517. if (dip->i_di.di_entries == (u32)-1)
  518. return -EFBIG;
  519. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  520. return -EMLINK;
  521. return 0;
  522. }
  523. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  524. unsigned int *uid, unsigned int *gid)
  525. {
  526. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  527. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  528. if (S_ISDIR(*mode))
  529. *mode |= S_ISUID;
  530. else if (dip->i_inode.i_uid != current->fsuid)
  531. *mode &= ~07111;
  532. *uid = dip->i_inode.i_uid;
  533. } else
  534. *uid = current->fsuid;
  535. if (dip->i_inode.i_mode & S_ISGID) {
  536. if (S_ISDIR(*mode))
  537. *mode |= S_ISGID;
  538. *gid = dip->i_inode.i_gid;
  539. } else
  540. *gid = current->fsgid;
  541. }
  542. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  543. {
  544. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  545. int error;
  546. gfs2_alloc_get(dip);
  547. dip->i_alloc.al_requested = RES_DINODE;
  548. error = gfs2_inplace_reserve(dip);
  549. if (error)
  550. goto out;
  551. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  552. if (error)
  553. goto out_ipreserv;
  554. *no_addr = gfs2_alloc_di(dip, generation);
  555. gfs2_trans_end(sdp);
  556. out_ipreserv:
  557. gfs2_inplace_release(dip);
  558. out:
  559. gfs2_alloc_put(dip);
  560. return error;
  561. }
  562. /**
  563. * init_dinode - Fill in a new dinode structure
  564. * @dip: the directory this inode is being created in
  565. * @gl: The glock covering the new inode
  566. * @inum: the inode number
  567. * @mode: the file permissions
  568. * @uid:
  569. * @gid:
  570. *
  571. */
  572. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  573. const struct gfs2_inum_host *inum, unsigned int mode,
  574. unsigned int uid, unsigned int gid,
  575. const u64 *generation, dev_t dev)
  576. {
  577. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  578. struct gfs2_dinode *di;
  579. struct buffer_head *dibh;
  580. struct timespec tv = CURRENT_TIME;
  581. dibh = gfs2_meta_new(gl, inum->no_addr);
  582. gfs2_trans_add_bh(gl, dibh, 1);
  583. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  584. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  585. di = (struct gfs2_dinode *)dibh->b_data;
  586. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  587. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  588. di->di_mode = cpu_to_be32(mode);
  589. di->di_uid = cpu_to_be32(uid);
  590. di->di_gid = cpu_to_be32(gid);
  591. di->di_nlink = 0;
  592. di->di_size = 0;
  593. di->di_blocks = cpu_to_be64(1);
  594. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  595. di->di_major = cpu_to_be32(MAJOR(dev));
  596. di->di_minor = cpu_to_be32(MINOR(dev));
  597. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  598. di->di_generation = cpu_to_be64(*generation);
  599. di->di_flags = 0;
  600. if (S_ISREG(mode)) {
  601. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  602. gfs2_tune_get(sdp, gt_new_files_jdata))
  603. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  604. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  605. gfs2_tune_get(sdp, gt_new_files_directio))
  606. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  607. } else if (S_ISDIR(mode)) {
  608. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  609. GFS2_DIF_INHERIT_DIRECTIO);
  610. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  611. GFS2_DIF_INHERIT_JDATA);
  612. }
  613. di->__pad1 = 0;
  614. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  615. di->di_height = 0;
  616. di->__pad2 = 0;
  617. di->__pad3 = 0;
  618. di->di_depth = 0;
  619. di->di_entries = 0;
  620. memset(&di->__pad4, 0, sizeof(di->__pad4));
  621. di->di_eattr = 0;
  622. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  623. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  624. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  625. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  626. brelse(dibh);
  627. }
  628. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  629. unsigned int mode, const struct gfs2_inum_host *inum,
  630. const u64 *generation, dev_t dev)
  631. {
  632. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  633. unsigned int uid, gid;
  634. int error;
  635. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  636. gfs2_alloc_get(dip);
  637. error = gfs2_quota_lock(dip, uid, gid);
  638. if (error)
  639. goto out;
  640. error = gfs2_quota_check(dip, uid, gid);
  641. if (error)
  642. goto out_quota;
  643. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  644. if (error)
  645. goto out_quota;
  646. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
  647. gfs2_quota_change(dip, +1, uid, gid);
  648. gfs2_trans_end(sdp);
  649. out_quota:
  650. gfs2_quota_unlock(dip);
  651. out:
  652. gfs2_alloc_put(dip);
  653. return error;
  654. }
  655. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  656. struct gfs2_inode *ip)
  657. {
  658. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  659. struct gfs2_alloc *al;
  660. int alloc_required;
  661. struct buffer_head *dibh;
  662. int error;
  663. al = gfs2_alloc_get(dip);
  664. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  665. if (error)
  666. goto fail;
  667. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  668. if (alloc_required < 0)
  669. goto fail;
  670. if (alloc_required) {
  671. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  672. if (error)
  673. goto fail_quota_locks;
  674. al->al_requested = sdp->sd_max_dirres;
  675. error = gfs2_inplace_reserve(dip);
  676. if (error)
  677. goto fail_quota_locks;
  678. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  679. al->al_rgd->rd_length +
  680. 2 * RES_DINODE +
  681. RES_STATFS + RES_QUOTA, 0);
  682. if (error)
  683. goto fail_ipreserv;
  684. } else {
  685. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  686. if (error)
  687. goto fail_quota_locks;
  688. }
  689. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  690. if (error)
  691. goto fail_end_trans;
  692. error = gfs2_meta_inode_buffer(ip, &dibh);
  693. if (error)
  694. goto fail_end_trans;
  695. ip->i_inode.i_nlink = 1;
  696. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  697. gfs2_dinode_out(ip, dibh->b_data);
  698. brelse(dibh);
  699. return 0;
  700. fail_end_trans:
  701. gfs2_trans_end(sdp);
  702. fail_ipreserv:
  703. if (dip->i_alloc.al_rgd)
  704. gfs2_inplace_release(dip);
  705. fail_quota_locks:
  706. gfs2_quota_unlock(dip);
  707. fail:
  708. gfs2_alloc_put(dip);
  709. return error;
  710. }
  711. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  712. {
  713. int err;
  714. size_t len;
  715. void *value;
  716. char *name;
  717. struct gfs2_ea_request er;
  718. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  719. &name, &value, &len);
  720. if (err) {
  721. if (err == -EOPNOTSUPP)
  722. return 0;
  723. return err;
  724. }
  725. memset(&er, 0, sizeof(struct gfs2_ea_request));
  726. er.er_type = GFS2_EATYPE_SECURITY;
  727. er.er_name = name;
  728. er.er_data = value;
  729. er.er_name_len = strlen(name);
  730. er.er_data_len = len;
  731. err = gfs2_ea_set_i(ip, &er);
  732. kfree(value);
  733. kfree(name);
  734. return err;
  735. }
  736. /**
  737. * gfs2_createi - Create a new inode
  738. * @ghs: An array of two holders
  739. * @name: The name of the new file
  740. * @mode: the permissions on the new inode
  741. *
  742. * @ghs[0] is an initialized holder for the directory
  743. * @ghs[1] is the holder for the inode lock
  744. *
  745. * If the return value is not NULL, the glocks on both the directory and the new
  746. * file are held. A transaction has been started and an inplace reservation
  747. * is held, as well.
  748. *
  749. * Returns: An inode
  750. */
  751. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  752. unsigned int mode, dev_t dev)
  753. {
  754. struct inode *inode = NULL;
  755. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  756. struct inode *dir = &dip->i_inode;
  757. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  758. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  759. int error;
  760. u64 generation;
  761. if (!name->len || name->len > GFS2_FNAMESIZE)
  762. return ERR_PTR(-ENAMETOOLONG);
  763. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  764. error = gfs2_glock_nq(ghs);
  765. if (error)
  766. goto fail;
  767. error = create_ok(dip, name, mode);
  768. if (error)
  769. goto fail_gunlock;
  770. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  771. if (error)
  772. goto fail_gunlock;
  773. error = alloc_dinode(dip, &inum.no_addr, &generation);
  774. if (error)
  775. goto fail_gunlock;
  776. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  777. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  778. if (error)
  779. goto fail_gunlock;
  780. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
  781. if (error)
  782. goto fail_gunlock2;
  783. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  784. inum.no_addr,
  785. inum.no_formal_ino);
  786. if (IS_ERR(inode))
  787. goto fail_gunlock2;
  788. error = gfs2_inode_refresh(GFS2_I(inode));
  789. if (error)
  790. goto fail_gunlock2;
  791. error = gfs2_acl_create(dip, GFS2_I(inode));
  792. if (error)
  793. goto fail_gunlock2;
  794. error = gfs2_security_init(dip, GFS2_I(inode));
  795. if (error)
  796. goto fail_gunlock2;
  797. error = link_dinode(dip, name, GFS2_I(inode));
  798. if (error)
  799. goto fail_gunlock2;
  800. if (!inode)
  801. return ERR_PTR(-ENOMEM);
  802. return inode;
  803. fail_gunlock2:
  804. gfs2_glock_dq_uninit(ghs + 1);
  805. if (inode)
  806. iput(inode);
  807. fail_gunlock:
  808. gfs2_glock_dq(ghs);
  809. fail:
  810. return ERR_PTR(error);
  811. }
  812. /**
  813. * gfs2_rmdiri - Remove a directory
  814. * @dip: The parent directory of the directory to be removed
  815. * @name: The name of the directory to be removed
  816. * @ip: The GFS2 inode of the directory to be removed
  817. *
  818. * Assumes Glocks on dip and ip are held
  819. *
  820. * Returns: errno
  821. */
  822. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  823. struct gfs2_inode *ip)
  824. {
  825. struct qstr dotname;
  826. int error;
  827. if (ip->i_di.di_entries != 2) {
  828. if (gfs2_consist_inode(ip))
  829. gfs2_dinode_print(ip);
  830. return -EIO;
  831. }
  832. error = gfs2_dir_del(dip, name);
  833. if (error)
  834. return error;
  835. error = gfs2_change_nlink(dip, -1);
  836. if (error)
  837. return error;
  838. gfs2_str2qstr(&dotname, ".");
  839. error = gfs2_dir_del(ip, &dotname);
  840. if (error)
  841. return error;
  842. gfs2_str2qstr(&dotname, "..");
  843. error = gfs2_dir_del(ip, &dotname);
  844. if (error)
  845. return error;
  846. /* It looks odd, but it really should be done twice */
  847. error = gfs2_change_nlink(ip, -1);
  848. if (error)
  849. return error;
  850. error = gfs2_change_nlink(ip, -1);
  851. if (error)
  852. return error;
  853. return error;
  854. }
  855. /*
  856. * gfs2_unlink_ok - check to see that a inode is still in a directory
  857. * @dip: the directory
  858. * @name: the name of the file
  859. * @ip: the inode
  860. *
  861. * Assumes that the lock on (at least) @dip is held.
  862. *
  863. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  864. */
  865. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  866. const struct gfs2_inode *ip)
  867. {
  868. int error;
  869. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  870. return -EPERM;
  871. if ((dip->i_inode.i_mode & S_ISVTX) &&
  872. dip->i_inode.i_uid != current->fsuid &&
  873. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  874. return -EPERM;
  875. if (IS_APPEND(&dip->i_inode))
  876. return -EPERM;
  877. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  878. if (error)
  879. return error;
  880. error = gfs2_dir_check(&dip->i_inode, name, ip);
  881. if (error)
  882. return error;
  883. return 0;
  884. }
  885. /*
  886. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  887. * @this: move this
  888. * @to: to here
  889. *
  890. * Follow @to back to the root and make sure we don't encounter @this
  891. * Assumes we already hold the rename lock.
  892. *
  893. * Returns: errno
  894. */
  895. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  896. {
  897. struct inode *dir = &to->i_inode;
  898. struct super_block *sb = dir->i_sb;
  899. struct inode *tmp;
  900. struct qstr dotdot;
  901. int error = 0;
  902. gfs2_str2qstr(&dotdot, "..");
  903. igrab(dir);
  904. for (;;) {
  905. if (dir == &this->i_inode) {
  906. error = -EINVAL;
  907. break;
  908. }
  909. if (dir == sb->s_root->d_inode) {
  910. error = 0;
  911. break;
  912. }
  913. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  914. if (IS_ERR(tmp)) {
  915. error = PTR_ERR(tmp);
  916. break;
  917. }
  918. iput(dir);
  919. dir = tmp;
  920. }
  921. iput(dir);
  922. return error;
  923. }
  924. /**
  925. * gfs2_readlinki - return the contents of a symlink
  926. * @ip: the symlink's inode
  927. * @buf: a pointer to the buffer to be filled
  928. * @len: a pointer to the length of @buf
  929. *
  930. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  931. * to be freed by the caller.
  932. *
  933. * Returns: errno
  934. */
  935. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  936. {
  937. struct gfs2_holder i_gh;
  938. struct buffer_head *dibh;
  939. unsigned int x;
  940. int error;
  941. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  942. error = gfs2_glock_nq_atime(&i_gh);
  943. if (error) {
  944. gfs2_holder_uninit(&i_gh);
  945. return error;
  946. }
  947. if (!ip->i_di.di_size) {
  948. gfs2_consist_inode(ip);
  949. error = -EIO;
  950. goto out;
  951. }
  952. error = gfs2_meta_inode_buffer(ip, &dibh);
  953. if (error)
  954. goto out;
  955. x = ip->i_di.di_size + 1;
  956. if (x > *len) {
  957. *buf = kmalloc(x, GFP_KERNEL);
  958. if (!*buf) {
  959. error = -ENOMEM;
  960. goto out_brelse;
  961. }
  962. }
  963. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  964. *len = x;
  965. out_brelse:
  966. brelse(dibh);
  967. out:
  968. gfs2_glock_dq_uninit(&i_gh);
  969. return error;
  970. }
  971. /**
  972. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  973. * conditionally update the inode's atime
  974. * @gh: the holder to acquire
  975. *
  976. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  977. * Update if the difference between the current time and the inode's current
  978. * atime is greater than an interval specified at mount.
  979. *
  980. * Returns: errno
  981. */
  982. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  983. {
  984. struct gfs2_glock *gl = gh->gh_gl;
  985. struct gfs2_sbd *sdp = gl->gl_sbd;
  986. struct gfs2_inode *ip = gl->gl_object;
  987. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  988. unsigned int state;
  989. int flags;
  990. int error;
  991. struct timespec tv = CURRENT_TIME;
  992. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  993. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  994. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  995. return -EINVAL;
  996. state = gh->gh_state;
  997. flags = gh->gh_flags;
  998. error = gfs2_glock_nq(gh);
  999. if (error)
  1000. return error;
  1001. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1002. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1003. return 0;
  1004. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1005. gfs2_glock_dq(gh);
  1006. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1007. gh);
  1008. error = gfs2_glock_nq(gh);
  1009. if (error)
  1010. return error;
  1011. /* Verify that atime hasn't been updated while we were
  1012. trying to get exclusive lock. */
  1013. tv = CURRENT_TIME;
  1014. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1015. struct buffer_head *dibh;
  1016. struct gfs2_dinode *di;
  1017. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1018. if (error == -EROFS)
  1019. return 0;
  1020. if (error)
  1021. goto fail;
  1022. error = gfs2_meta_inode_buffer(ip, &dibh);
  1023. if (error)
  1024. goto fail_end_trans;
  1025. ip->i_inode.i_atime = tv;
  1026. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1027. di = (struct gfs2_dinode *)dibh->b_data;
  1028. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1029. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1030. brelse(dibh);
  1031. gfs2_trans_end(sdp);
  1032. }
  1033. /* If someone else has asked for the glock,
  1034. unlock and let them have it. Then reacquire
  1035. in the original state. */
  1036. if (gfs2_glock_is_blocking(gl)) {
  1037. gfs2_glock_dq(gh);
  1038. gfs2_holder_reinit(state, flags, gh);
  1039. return gfs2_glock_nq(gh);
  1040. }
  1041. }
  1042. return 0;
  1043. fail_end_trans:
  1044. gfs2_trans_end(sdp);
  1045. fail:
  1046. gfs2_glock_dq(gh);
  1047. return error;
  1048. }
  1049. static int
  1050. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1051. {
  1052. struct buffer_head *dibh;
  1053. int error;
  1054. error = gfs2_meta_inode_buffer(ip, &dibh);
  1055. if (!error) {
  1056. error = inode_setattr(&ip->i_inode, attr);
  1057. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1058. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1059. gfs2_dinode_out(ip, dibh->b_data);
  1060. brelse(dibh);
  1061. }
  1062. return error;
  1063. }
  1064. /**
  1065. * gfs2_setattr_simple -
  1066. * @ip:
  1067. * @attr:
  1068. *
  1069. * Called with a reference on the vnode.
  1070. *
  1071. * Returns: errno
  1072. */
  1073. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1074. {
  1075. int error;
  1076. if (current->journal_info)
  1077. return __gfs2_setattr_simple(ip, attr);
  1078. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1079. if (error)
  1080. return error;
  1081. error = __gfs2_setattr_simple(ip, attr);
  1082. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1083. return error;
  1084. }
  1085. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1086. {
  1087. const struct gfs2_dinode_host *di = &ip->i_di;
  1088. struct gfs2_dinode *str = buf;
  1089. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1090. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1091. str->di_header.__pad0 = 0;
  1092. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1093. str->di_header.__pad1 = 0;
  1094. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1095. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1096. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1097. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1098. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1099. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1100. str->di_size = cpu_to_be64(di->di_size);
  1101. str->di_blocks = cpu_to_be64(di->di_blocks);
  1102. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1103. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1104. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1105. str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
  1106. str->di_goal_data = cpu_to_be64(di->di_goal_data);
  1107. str->di_generation = cpu_to_be64(di->di_generation);
  1108. str->di_flags = cpu_to_be32(di->di_flags);
  1109. str->di_height = cpu_to_be16(di->di_height);
  1110. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1111. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1112. GFS2_FORMAT_DE : 0);
  1113. str->di_depth = cpu_to_be16(di->di_depth);
  1114. str->di_entries = cpu_to_be32(di->di_entries);
  1115. str->di_eattr = cpu_to_be64(di->di_eattr);
  1116. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1117. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1118. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1119. }
  1120. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1121. {
  1122. const struct gfs2_dinode_host *di = &ip->i_di;
  1123. printk(KERN_INFO " no_formal_ino = %llu\n",
  1124. (unsigned long long)ip->i_no_formal_ino);
  1125. printk(KERN_INFO " no_addr = %llu\n",
  1126. (unsigned long long)ip->i_no_addr);
  1127. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1128. printk(KERN_INFO " di_blocks = %llu\n",
  1129. (unsigned long long)di->di_blocks);
  1130. printk(KERN_INFO " di_goal_meta = %llu\n",
  1131. (unsigned long long)di->di_goal_meta);
  1132. printk(KERN_INFO " di_goal_data = %llu\n",
  1133. (unsigned long long)di->di_goal_data);
  1134. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1135. printk(KERN_INFO " di_height = %u\n", di->di_height);
  1136. printk(KERN_INFO " di_depth = %u\n", di->di_depth);
  1137. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1138. printk(KERN_INFO " di_eattr = %llu\n",
  1139. (unsigned long long)di->di_eattr);
  1140. }