inode.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include <linux/time.h>
  21. #include "gfs2.h"
  22. #include "incore.h"
  23. #include "acl.h"
  24. #include "bmap.h"
  25. #include "dir.h"
  26. #include "eattr.h"
  27. #include "glock.h"
  28. #include "glops.h"
  29. #include "inode.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "ops_address.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "util.h"
  37. struct gfs2_inum_range_host {
  38. u64 ir_start;
  39. u64 ir_length;
  40. };
  41. static int iget_test(struct inode *inode, void *opaque)
  42. {
  43. struct gfs2_inode *ip = GFS2_I(inode);
  44. u64 *no_addr = opaque;
  45. if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
  46. return 1;
  47. return 0;
  48. }
  49. static int iget_set(struct inode *inode, void *opaque)
  50. {
  51. struct gfs2_inode *ip = GFS2_I(inode);
  52. u64 *no_addr = opaque;
  53. inode->i_ino = (unsigned long)*no_addr;
  54. ip->i_no_addr = *no_addr;
  55. set_bit(GIF_USER, &ip->i_flags);
  56. return 0;
  57. }
  58. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  59. {
  60. unsigned long hash = (unsigned long)no_addr;
  61. return ilookup5(sb, hash, iget_test, &no_addr);
  62. }
  63. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  64. {
  65. unsigned long hash = (unsigned long)no_addr;
  66. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  67. }
  68. struct gfs2_skip_data {
  69. u64 no_addr;
  70. int skipped;
  71. };
  72. static int iget_skip_test(struct inode *inode, void *opaque)
  73. {
  74. struct gfs2_inode *ip = GFS2_I(inode);
  75. struct gfs2_skip_data *data = opaque;
  76. if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
  77. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  78. data->skipped = 1;
  79. return 0;
  80. }
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. static int iget_skip_set(struct inode *inode, void *opaque)
  86. {
  87. struct gfs2_inode *ip = GFS2_I(inode);
  88. struct gfs2_skip_data *data = opaque;
  89. if (data->skipped)
  90. return 1;
  91. inode->i_ino = (unsigned long)(data->no_addr);
  92. ip->i_no_addr = data->no_addr;
  93. set_bit(GIF_USER, &ip->i_flags);
  94. return 0;
  95. }
  96. static struct inode *gfs2_iget_skip(struct super_block *sb,
  97. u64 no_addr)
  98. {
  99. struct gfs2_skip_data data;
  100. unsigned long hash = (unsigned long)no_addr;
  101. data.no_addr = no_addr;
  102. data.skipped = 0;
  103. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  104. }
  105. /**
  106. * GFS2 lookup code fills in vfs inode contents based on info obtained
  107. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  108. * with NFS code path since its get_dentry routine doesn't have the relevant
  109. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  110. * segment inside gfs2_inode_lookup code needs to get moved around.
  111. *
  112. * Clean up I_LOCK and I_NEW as well.
  113. **/
  114. void gfs2_set_iop(struct inode *inode)
  115. {
  116. struct gfs2_sbd *sdp = GFS2_SB(inode);
  117. umode_t mode = inode->i_mode;
  118. if (S_ISREG(mode)) {
  119. inode->i_op = &gfs2_file_iops;
  120. if (sdp->sd_args.ar_localflocks)
  121. inode->i_fop = &gfs2_file_fops_nolock;
  122. else
  123. inode->i_fop = &gfs2_file_fops;
  124. } else if (S_ISDIR(mode)) {
  125. inode->i_op = &gfs2_dir_iops;
  126. if (sdp->sd_args.ar_localflocks)
  127. inode->i_fop = &gfs2_dir_fops_nolock;
  128. else
  129. inode->i_fop = &gfs2_dir_fops;
  130. } else if (S_ISLNK(mode)) {
  131. inode->i_op = &gfs2_symlink_iops;
  132. } else {
  133. inode->i_op = &gfs2_file_iops;
  134. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  135. }
  136. unlock_new_inode(inode);
  137. }
  138. /**
  139. * gfs2_inode_lookup - Lookup an inode
  140. * @sb: The super block
  141. * @no_addr: The inode number
  142. * @type: The type of the inode
  143. * @skip_freeing: set this not return an inode if it is currently being freed.
  144. *
  145. * Returns: A VFS inode, or an error
  146. */
  147. struct inode *gfs2_inode_lookup(struct super_block *sb,
  148. unsigned int type,
  149. u64 no_addr,
  150. u64 no_formal_ino, int skip_freeing)
  151. {
  152. struct inode *inode;
  153. struct gfs2_inode *ip;
  154. struct gfs2_glock *io_gl;
  155. int error;
  156. if (skip_freeing)
  157. inode = gfs2_iget_skip(sb, no_addr);
  158. else
  159. inode = gfs2_iget(sb, no_addr);
  160. ip = GFS2_I(inode);
  161. if (!inode)
  162. return ERR_PTR(-ENOBUFS);
  163. if (inode->i_state & I_NEW) {
  164. struct gfs2_sbd *sdp = GFS2_SB(inode);
  165. ip->i_no_formal_ino = no_formal_ino;
  166. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  167. if (unlikely(error))
  168. goto fail;
  169. ip->i_gl->gl_object = ip;
  170. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  171. if (unlikely(error))
  172. goto fail_put;
  173. set_bit(GIF_INVALID, &ip->i_flags);
  174. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  175. if (unlikely(error))
  176. goto fail_iopen;
  177. ip->i_iopen_gh.gh_gl->gl_object = ip;
  178. gfs2_glock_put(io_gl);
  179. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  180. goto gfs2_nfsbypass;
  181. inode->i_mode = DT2IF(type);
  182. /*
  183. * We must read the inode in order to work out its type in
  184. * this case. Note that this doesn't happen often as we normally
  185. * know the type beforehand. This code path only occurs during
  186. * unlinked inode recovery (where it is safe to do this glock,
  187. * which is not true in the general case).
  188. */
  189. if (type == DT_UNKNOWN) {
  190. struct gfs2_holder gh;
  191. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  192. if (unlikely(error))
  193. goto fail_glock;
  194. /* Inode is now uptodate */
  195. gfs2_glock_dq_uninit(&gh);
  196. }
  197. gfs2_set_iop(inode);
  198. }
  199. gfs2_nfsbypass:
  200. return inode;
  201. fail_glock:
  202. gfs2_glock_dq(&ip->i_iopen_gh);
  203. fail_iopen:
  204. gfs2_glock_put(io_gl);
  205. fail_put:
  206. ip->i_gl->gl_object = NULL;
  207. gfs2_glock_put(ip->i_gl);
  208. fail:
  209. iget_failed(inode);
  210. return ERR_PTR(error);
  211. }
  212. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  213. {
  214. struct gfs2_dinode_host *di = &ip->i_di;
  215. const struct gfs2_dinode *str = buf;
  216. struct timespec atime;
  217. u16 height, depth;
  218. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  219. goto corrupt;
  220. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  221. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  222. ip->i_inode.i_rdev = 0;
  223. switch (ip->i_inode.i_mode & S_IFMT) {
  224. case S_IFBLK:
  225. case S_IFCHR:
  226. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  227. be32_to_cpu(str->di_minor));
  228. break;
  229. };
  230. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  231. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  232. /*
  233. * We will need to review setting the nlink count here in the
  234. * light of the forthcoming ro bind mount work. This is a reminder
  235. * to do that.
  236. */
  237. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  238. di->di_size = be64_to_cpu(str->di_size);
  239. i_size_write(&ip->i_inode, di->di_size);
  240. gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
  241. atime.tv_sec = be64_to_cpu(str->di_atime);
  242. atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  243. if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
  244. ip->i_inode.i_atime = atime;
  245. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  246. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  247. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  248. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  249. ip->i_goal = be64_to_cpu(str->di_goal_meta);
  250. ip->i_generation = be64_to_cpu(str->di_generation);
  251. di->di_flags = be32_to_cpu(str->di_flags);
  252. gfs2_set_inode_flags(&ip->i_inode);
  253. height = be16_to_cpu(str->di_height);
  254. if (unlikely(height > GFS2_MAX_META_HEIGHT))
  255. goto corrupt;
  256. ip->i_height = (u8)height;
  257. depth = be16_to_cpu(str->di_depth);
  258. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  259. goto corrupt;
  260. ip->i_depth = (u8)depth;
  261. ip->i_entries = be32_to_cpu(str->di_entries);
  262. ip->i_eattr = be64_to_cpu(str->di_eattr);
  263. if (S_ISREG(ip->i_inode.i_mode))
  264. gfs2_set_aops(&ip->i_inode);
  265. return 0;
  266. corrupt:
  267. if (gfs2_consist_inode(ip))
  268. gfs2_dinode_print(ip);
  269. return -EIO;
  270. }
  271. /**
  272. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  273. * @ip: The GFS2 inode
  274. *
  275. * Returns: errno
  276. */
  277. int gfs2_inode_refresh(struct gfs2_inode *ip)
  278. {
  279. struct buffer_head *dibh;
  280. int error;
  281. error = gfs2_meta_inode_buffer(ip, &dibh);
  282. if (error)
  283. return error;
  284. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  285. brelse(dibh);
  286. return -EIO;
  287. }
  288. error = gfs2_dinode_in(ip, dibh->b_data);
  289. brelse(dibh);
  290. clear_bit(GIF_INVALID, &ip->i_flags);
  291. return error;
  292. }
  293. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  294. {
  295. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  296. struct gfs2_alloc *al;
  297. struct gfs2_rgrpd *rgd;
  298. int error;
  299. if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
  300. if (gfs2_consist_inode(ip))
  301. gfs2_dinode_print(ip);
  302. return -EIO;
  303. }
  304. al = gfs2_alloc_get(ip);
  305. if (!al)
  306. return -ENOMEM;
  307. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  308. if (error)
  309. goto out;
  310. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  311. if (error)
  312. goto out_qs;
  313. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  314. if (!rgd) {
  315. gfs2_consist_inode(ip);
  316. error = -EIO;
  317. goto out_rindex_relse;
  318. }
  319. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  320. &al->al_rgd_gh);
  321. if (error)
  322. goto out_rindex_relse;
  323. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  324. if (error)
  325. goto out_rg_gunlock;
  326. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  327. set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
  328. gfs2_free_di(rgd, ip);
  329. gfs2_trans_end(sdp);
  330. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  331. out_rg_gunlock:
  332. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  333. out_rindex_relse:
  334. gfs2_glock_dq_uninit(&al->al_ri_gh);
  335. out_qs:
  336. gfs2_quota_unhold(ip);
  337. out:
  338. gfs2_alloc_put(ip);
  339. return error;
  340. }
  341. /**
  342. * gfs2_change_nlink - Change nlink count on inode
  343. * @ip: The GFS2 inode
  344. * @diff: The change in the nlink count required
  345. *
  346. * Returns: errno
  347. */
  348. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  349. {
  350. struct buffer_head *dibh;
  351. u32 nlink;
  352. int error;
  353. BUG_ON(diff != 1 && diff != -1);
  354. nlink = ip->i_inode.i_nlink + diff;
  355. /* If we are reducing the nlink count, but the new value ends up being
  356. bigger than the old one, we must have underflowed. */
  357. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  358. if (gfs2_consist_inode(ip))
  359. gfs2_dinode_print(ip);
  360. return -EIO;
  361. }
  362. error = gfs2_meta_inode_buffer(ip, &dibh);
  363. if (error)
  364. return error;
  365. if (diff > 0)
  366. inc_nlink(&ip->i_inode);
  367. else
  368. drop_nlink(&ip->i_inode);
  369. ip->i_inode.i_ctime = CURRENT_TIME;
  370. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  371. gfs2_dinode_out(ip, dibh->b_data);
  372. brelse(dibh);
  373. mark_inode_dirty(&ip->i_inode);
  374. if (ip->i_inode.i_nlink == 0)
  375. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  376. return error;
  377. }
  378. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  379. {
  380. struct qstr qstr;
  381. struct inode *inode;
  382. gfs2_str2qstr(&qstr, name);
  383. inode = gfs2_lookupi(dip, &qstr, 1);
  384. /* gfs2_lookupi has inconsistent callers: vfs
  385. * related routines expect NULL for no entry found,
  386. * gfs2_lookup_simple callers expect ENOENT
  387. * and do not check for NULL.
  388. */
  389. if (inode == NULL)
  390. return ERR_PTR(-ENOENT);
  391. else
  392. return inode;
  393. }
  394. /**
  395. * gfs2_lookupi - Look up a filename in a directory and return its inode
  396. * @d_gh: An initialized holder for the directory glock
  397. * @name: The name of the inode to look for
  398. * @is_root: If 1, ignore the caller's permissions
  399. * @i_gh: An uninitialized holder for the new inode glock
  400. *
  401. * This can be called via the VFS filldir function when NFS is doing
  402. * a readdirplus and the inode which its intending to stat isn't
  403. * already in cache. In this case we must not take the directory glock
  404. * again, since the readdir call will have already taken that lock.
  405. *
  406. * Returns: errno
  407. */
  408. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  409. int is_root)
  410. {
  411. struct super_block *sb = dir->i_sb;
  412. struct gfs2_inode *dip = GFS2_I(dir);
  413. struct gfs2_holder d_gh;
  414. int error = 0;
  415. struct inode *inode = NULL;
  416. int unlock = 0;
  417. if (!name->len || name->len > GFS2_FNAMESIZE)
  418. return ERR_PTR(-ENAMETOOLONG);
  419. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  420. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  421. dir == sb->s_root->d_inode)) {
  422. igrab(dir);
  423. return dir;
  424. }
  425. if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
  426. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  427. if (error)
  428. return ERR_PTR(error);
  429. unlock = 1;
  430. }
  431. if (!is_root) {
  432. error = gfs2_permission(dir, MAY_EXEC);
  433. if (error)
  434. goto out;
  435. }
  436. inode = gfs2_dir_search(dir, name);
  437. if (IS_ERR(inode))
  438. error = PTR_ERR(inode);
  439. out:
  440. if (unlock)
  441. gfs2_glock_dq_uninit(&d_gh);
  442. if (error == -ENOENT)
  443. return NULL;
  444. return inode ? inode : ERR_PTR(error);
  445. }
  446. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  447. {
  448. const struct gfs2_inum_range *str = buf;
  449. ir->ir_start = be64_to_cpu(str->ir_start);
  450. ir->ir_length = be64_to_cpu(str->ir_length);
  451. }
  452. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  453. {
  454. struct gfs2_inum_range *str = buf;
  455. str->ir_start = cpu_to_be64(ir->ir_start);
  456. str->ir_length = cpu_to_be64(ir->ir_length);
  457. }
  458. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  459. {
  460. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  461. struct buffer_head *bh;
  462. struct gfs2_inum_range_host ir;
  463. int error;
  464. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  465. if (error)
  466. return error;
  467. mutex_lock(&sdp->sd_inum_mutex);
  468. error = gfs2_meta_inode_buffer(ip, &bh);
  469. if (error) {
  470. mutex_unlock(&sdp->sd_inum_mutex);
  471. gfs2_trans_end(sdp);
  472. return error;
  473. }
  474. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  475. if (ir.ir_length) {
  476. *formal_ino = ir.ir_start++;
  477. ir.ir_length--;
  478. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  479. gfs2_inum_range_out(&ir,
  480. bh->b_data + sizeof(struct gfs2_dinode));
  481. brelse(bh);
  482. mutex_unlock(&sdp->sd_inum_mutex);
  483. gfs2_trans_end(sdp);
  484. return 0;
  485. }
  486. brelse(bh);
  487. mutex_unlock(&sdp->sd_inum_mutex);
  488. gfs2_trans_end(sdp);
  489. return 1;
  490. }
  491. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  492. {
  493. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  494. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  495. struct gfs2_holder gh;
  496. struct buffer_head *bh;
  497. struct gfs2_inum_range_host ir;
  498. int error;
  499. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  500. if (error)
  501. return error;
  502. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  503. if (error)
  504. goto out;
  505. mutex_lock(&sdp->sd_inum_mutex);
  506. error = gfs2_meta_inode_buffer(ip, &bh);
  507. if (error)
  508. goto out_end_trans;
  509. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  510. if (!ir.ir_length) {
  511. struct buffer_head *m_bh;
  512. u64 x, y;
  513. __be64 z;
  514. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  515. if (error)
  516. goto out_brelse;
  517. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  518. x = y = be64_to_cpu(z);
  519. ir.ir_start = x;
  520. ir.ir_length = GFS2_INUM_QUANTUM;
  521. x += GFS2_INUM_QUANTUM;
  522. if (x < y)
  523. gfs2_consist_inode(m_ip);
  524. z = cpu_to_be64(x);
  525. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  526. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  527. brelse(m_bh);
  528. }
  529. *formal_ino = ir.ir_start++;
  530. ir.ir_length--;
  531. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  532. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  533. out_brelse:
  534. brelse(bh);
  535. out_end_trans:
  536. mutex_unlock(&sdp->sd_inum_mutex);
  537. gfs2_trans_end(sdp);
  538. out:
  539. gfs2_glock_dq_uninit(&gh);
  540. return error;
  541. }
  542. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  543. {
  544. int error;
  545. error = pick_formal_ino_1(sdp, inum);
  546. if (error <= 0)
  547. return error;
  548. error = pick_formal_ino_2(sdp, inum);
  549. return error;
  550. }
  551. /**
  552. * create_ok - OK to create a new on-disk inode here?
  553. * @dip: Directory in which dinode is to be created
  554. * @name: Name of new dinode
  555. * @mode:
  556. *
  557. * Returns: errno
  558. */
  559. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  560. unsigned int mode)
  561. {
  562. int error;
  563. error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
  564. if (error)
  565. return error;
  566. /* Don't create entries in an unlinked directory */
  567. if (!dip->i_inode.i_nlink)
  568. return -EPERM;
  569. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  570. switch (error) {
  571. case -ENOENT:
  572. error = 0;
  573. break;
  574. case 0:
  575. return -EEXIST;
  576. default:
  577. return error;
  578. }
  579. if (dip->i_entries == (u32)-1)
  580. return -EFBIG;
  581. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  582. return -EMLINK;
  583. return 0;
  584. }
  585. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  586. unsigned int *uid, unsigned int *gid)
  587. {
  588. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  589. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  590. if (S_ISDIR(*mode))
  591. *mode |= S_ISUID;
  592. else if (dip->i_inode.i_uid != current_fsuid())
  593. *mode &= ~07111;
  594. *uid = dip->i_inode.i_uid;
  595. } else
  596. *uid = current_fsuid();
  597. if (dip->i_inode.i_mode & S_ISGID) {
  598. if (S_ISDIR(*mode))
  599. *mode |= S_ISGID;
  600. *gid = dip->i_inode.i_gid;
  601. } else
  602. *gid = current_fsgid();
  603. }
  604. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  605. {
  606. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  607. int error;
  608. if (gfs2_alloc_get(dip) == NULL)
  609. return -ENOMEM;
  610. dip->i_alloc->al_requested = RES_DINODE;
  611. error = gfs2_inplace_reserve(dip);
  612. if (error)
  613. goto out;
  614. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  615. if (error)
  616. goto out_ipreserv;
  617. *no_addr = gfs2_alloc_di(dip, generation);
  618. gfs2_trans_end(sdp);
  619. out_ipreserv:
  620. gfs2_inplace_release(dip);
  621. out:
  622. gfs2_alloc_put(dip);
  623. return error;
  624. }
  625. /**
  626. * init_dinode - Fill in a new dinode structure
  627. * @dip: the directory this inode is being created in
  628. * @gl: The glock covering the new inode
  629. * @inum: the inode number
  630. * @mode: the file permissions
  631. * @uid:
  632. * @gid:
  633. *
  634. */
  635. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  636. const struct gfs2_inum_host *inum, unsigned int mode,
  637. unsigned int uid, unsigned int gid,
  638. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  639. {
  640. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  641. struct gfs2_dinode *di;
  642. struct buffer_head *dibh;
  643. struct timespec tv = CURRENT_TIME;
  644. dibh = gfs2_meta_new(gl, inum->no_addr);
  645. gfs2_trans_add_bh(gl, dibh, 1);
  646. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  647. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  648. di = (struct gfs2_dinode *)dibh->b_data;
  649. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  650. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  651. di->di_mode = cpu_to_be32(mode);
  652. di->di_uid = cpu_to_be32(uid);
  653. di->di_gid = cpu_to_be32(gid);
  654. di->di_nlink = 0;
  655. di->di_size = 0;
  656. di->di_blocks = cpu_to_be64(1);
  657. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  658. di->di_major = cpu_to_be32(MAJOR(dev));
  659. di->di_minor = cpu_to_be32(MINOR(dev));
  660. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  661. di->di_generation = cpu_to_be64(*generation);
  662. di->di_flags = 0;
  663. if (S_ISREG(mode)) {
  664. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  665. gfs2_tune_get(sdp, gt_new_files_jdata))
  666. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  667. } else if (S_ISDIR(mode)) {
  668. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  669. GFS2_DIF_INHERIT_JDATA);
  670. }
  671. di->__pad1 = 0;
  672. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  673. di->di_height = 0;
  674. di->__pad2 = 0;
  675. di->__pad3 = 0;
  676. di->di_depth = 0;
  677. di->di_entries = 0;
  678. memset(&di->__pad4, 0, sizeof(di->__pad4));
  679. di->di_eattr = 0;
  680. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  681. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  682. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  683. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  684. set_buffer_uptodate(dibh);
  685. *bhp = dibh;
  686. }
  687. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  688. unsigned int mode, const struct gfs2_inum_host *inum,
  689. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  690. {
  691. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  692. unsigned int uid, gid;
  693. int error;
  694. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  695. if (!gfs2_alloc_get(dip))
  696. return -ENOMEM;
  697. error = gfs2_quota_lock(dip, uid, gid);
  698. if (error)
  699. goto out;
  700. error = gfs2_quota_check(dip, uid, gid);
  701. if (error)
  702. goto out_quota;
  703. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  704. if (error)
  705. goto out_quota;
  706. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  707. gfs2_quota_change(dip, +1, uid, gid);
  708. gfs2_trans_end(sdp);
  709. out_quota:
  710. gfs2_quota_unlock(dip);
  711. out:
  712. gfs2_alloc_put(dip);
  713. return error;
  714. }
  715. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  716. struct gfs2_inode *ip)
  717. {
  718. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  719. struct gfs2_alloc *al;
  720. int alloc_required;
  721. struct buffer_head *dibh;
  722. int error;
  723. al = gfs2_alloc_get(dip);
  724. if (!al)
  725. return -ENOMEM;
  726. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  727. if (error)
  728. goto fail;
  729. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  730. if (alloc_required < 0)
  731. goto fail_quota_locks;
  732. if (alloc_required) {
  733. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  734. if (error)
  735. goto fail_quota_locks;
  736. al->al_requested = sdp->sd_max_dirres;
  737. error = gfs2_inplace_reserve(dip);
  738. if (error)
  739. goto fail_quota_locks;
  740. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  741. al->al_rgd->rd_length +
  742. 2 * RES_DINODE +
  743. RES_STATFS + RES_QUOTA, 0);
  744. if (error)
  745. goto fail_ipreserv;
  746. } else {
  747. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  748. if (error)
  749. goto fail_quota_locks;
  750. }
  751. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  752. if (error)
  753. goto fail_end_trans;
  754. error = gfs2_meta_inode_buffer(ip, &dibh);
  755. if (error)
  756. goto fail_end_trans;
  757. ip->i_inode.i_nlink = 1;
  758. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  759. gfs2_dinode_out(ip, dibh->b_data);
  760. brelse(dibh);
  761. return 0;
  762. fail_end_trans:
  763. gfs2_trans_end(sdp);
  764. fail_ipreserv:
  765. if (dip->i_alloc->al_rgd)
  766. gfs2_inplace_release(dip);
  767. fail_quota_locks:
  768. gfs2_quota_unlock(dip);
  769. fail:
  770. gfs2_alloc_put(dip);
  771. return error;
  772. }
  773. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  774. {
  775. int err;
  776. size_t len;
  777. void *value;
  778. char *name;
  779. struct gfs2_ea_request er;
  780. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  781. &name, &value, &len);
  782. if (err) {
  783. if (err == -EOPNOTSUPP)
  784. return 0;
  785. return err;
  786. }
  787. memset(&er, 0, sizeof(struct gfs2_ea_request));
  788. er.er_type = GFS2_EATYPE_SECURITY;
  789. er.er_name = name;
  790. er.er_data = value;
  791. er.er_name_len = strlen(name);
  792. er.er_data_len = len;
  793. err = gfs2_ea_set_i(ip, &er);
  794. kfree(value);
  795. kfree(name);
  796. return err;
  797. }
  798. /**
  799. * gfs2_createi - Create a new inode
  800. * @ghs: An array of two holders
  801. * @name: The name of the new file
  802. * @mode: the permissions on the new inode
  803. *
  804. * @ghs[0] is an initialized holder for the directory
  805. * @ghs[1] is the holder for the inode lock
  806. *
  807. * If the return value is not NULL, the glocks on both the directory and the new
  808. * file are held. A transaction has been started and an inplace reservation
  809. * is held, as well.
  810. *
  811. * Returns: An inode
  812. */
  813. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  814. unsigned int mode, dev_t dev)
  815. {
  816. struct inode *inode = NULL;
  817. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  818. struct inode *dir = &dip->i_inode;
  819. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  820. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  821. int error;
  822. u64 generation;
  823. struct buffer_head *bh = NULL;
  824. if (!name->len || name->len > GFS2_FNAMESIZE)
  825. return ERR_PTR(-ENAMETOOLONG);
  826. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  827. error = gfs2_glock_nq(ghs);
  828. if (error)
  829. goto fail;
  830. error = create_ok(dip, name, mode);
  831. if (error)
  832. goto fail_gunlock;
  833. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  834. if (error)
  835. goto fail_gunlock;
  836. error = alloc_dinode(dip, &inum.no_addr, &generation);
  837. if (error)
  838. goto fail_gunlock;
  839. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  840. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  841. if (error)
  842. goto fail_gunlock;
  843. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  844. if (error)
  845. goto fail_gunlock2;
  846. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  847. inum.no_addr,
  848. inum.no_formal_ino, 0);
  849. if (IS_ERR(inode))
  850. goto fail_gunlock2;
  851. error = gfs2_inode_refresh(GFS2_I(inode));
  852. if (error)
  853. goto fail_gunlock2;
  854. error = gfs2_acl_create(dip, GFS2_I(inode));
  855. if (error)
  856. goto fail_gunlock2;
  857. error = gfs2_security_init(dip, GFS2_I(inode));
  858. if (error)
  859. goto fail_gunlock2;
  860. error = link_dinode(dip, name, GFS2_I(inode));
  861. if (error)
  862. goto fail_gunlock2;
  863. if (bh)
  864. brelse(bh);
  865. return inode;
  866. fail_gunlock2:
  867. gfs2_glock_dq_uninit(ghs + 1);
  868. if (inode && !IS_ERR(inode))
  869. iput(inode);
  870. fail_gunlock:
  871. gfs2_glock_dq(ghs);
  872. fail:
  873. if (bh)
  874. brelse(bh);
  875. return ERR_PTR(error);
  876. }
  877. /**
  878. * gfs2_rmdiri - Remove a directory
  879. * @dip: The parent directory of the directory to be removed
  880. * @name: The name of the directory to be removed
  881. * @ip: The GFS2 inode of the directory to be removed
  882. *
  883. * Assumes Glocks on dip and ip are held
  884. *
  885. * Returns: errno
  886. */
  887. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  888. struct gfs2_inode *ip)
  889. {
  890. struct qstr dotname;
  891. int error;
  892. if (ip->i_entries != 2) {
  893. if (gfs2_consist_inode(ip))
  894. gfs2_dinode_print(ip);
  895. return -EIO;
  896. }
  897. error = gfs2_dir_del(dip, name);
  898. if (error)
  899. return error;
  900. error = gfs2_change_nlink(dip, -1);
  901. if (error)
  902. return error;
  903. gfs2_str2qstr(&dotname, ".");
  904. error = gfs2_dir_del(ip, &dotname);
  905. if (error)
  906. return error;
  907. gfs2_str2qstr(&dotname, "..");
  908. error = gfs2_dir_del(ip, &dotname);
  909. if (error)
  910. return error;
  911. /* It looks odd, but it really should be done twice */
  912. error = gfs2_change_nlink(ip, -1);
  913. if (error)
  914. return error;
  915. error = gfs2_change_nlink(ip, -1);
  916. if (error)
  917. return error;
  918. return error;
  919. }
  920. /*
  921. * gfs2_unlink_ok - check to see that a inode is still in a directory
  922. * @dip: the directory
  923. * @name: the name of the file
  924. * @ip: the inode
  925. *
  926. * Assumes that the lock on (at least) @dip is held.
  927. *
  928. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  929. */
  930. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  931. const struct gfs2_inode *ip)
  932. {
  933. int error;
  934. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  935. return -EPERM;
  936. if ((dip->i_inode.i_mode & S_ISVTX) &&
  937. dip->i_inode.i_uid != current_fsuid() &&
  938. ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER))
  939. return -EPERM;
  940. if (IS_APPEND(&dip->i_inode))
  941. return -EPERM;
  942. error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
  943. if (error)
  944. return error;
  945. error = gfs2_dir_check(&dip->i_inode, name, ip);
  946. if (error)
  947. return error;
  948. return 0;
  949. }
  950. /**
  951. * gfs2_readlinki - return the contents of a symlink
  952. * @ip: the symlink's inode
  953. * @buf: a pointer to the buffer to be filled
  954. * @len: a pointer to the length of @buf
  955. *
  956. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  957. * to be freed by the caller.
  958. *
  959. * Returns: errno
  960. */
  961. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  962. {
  963. struct gfs2_holder i_gh;
  964. struct buffer_head *dibh;
  965. unsigned int x;
  966. int error;
  967. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  968. error = gfs2_glock_nq(&i_gh);
  969. if (error) {
  970. gfs2_holder_uninit(&i_gh);
  971. return error;
  972. }
  973. if (!ip->i_di.di_size) {
  974. gfs2_consist_inode(ip);
  975. error = -EIO;
  976. goto out;
  977. }
  978. error = gfs2_meta_inode_buffer(ip, &dibh);
  979. if (error)
  980. goto out;
  981. x = ip->i_di.di_size + 1;
  982. if (x > *len) {
  983. *buf = kmalloc(x, GFP_NOFS);
  984. if (!*buf) {
  985. error = -ENOMEM;
  986. goto out_brelse;
  987. }
  988. }
  989. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  990. *len = x;
  991. out_brelse:
  992. brelse(dibh);
  993. out:
  994. gfs2_glock_dq_uninit(&i_gh);
  995. return error;
  996. }
  997. static int
  998. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  999. {
  1000. struct buffer_head *dibh;
  1001. int error;
  1002. error = gfs2_meta_inode_buffer(ip, &dibh);
  1003. if (!error) {
  1004. error = inode_setattr(&ip->i_inode, attr);
  1005. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1006. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1007. gfs2_dinode_out(ip, dibh->b_data);
  1008. brelse(dibh);
  1009. }
  1010. return error;
  1011. }
  1012. /**
  1013. * gfs2_setattr_simple -
  1014. * @ip:
  1015. * @attr:
  1016. *
  1017. * Called with a reference on the vnode.
  1018. *
  1019. * Returns: errno
  1020. */
  1021. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1022. {
  1023. int error;
  1024. if (current->journal_info)
  1025. return __gfs2_setattr_simple(ip, attr);
  1026. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1027. if (error)
  1028. return error;
  1029. error = __gfs2_setattr_simple(ip, attr);
  1030. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1031. return error;
  1032. }
  1033. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1034. {
  1035. const struct gfs2_dinode_host *di = &ip->i_di;
  1036. struct gfs2_dinode *str = buf;
  1037. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1038. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1039. str->di_header.__pad0 = 0;
  1040. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1041. str->di_header.__pad1 = 0;
  1042. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1043. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1044. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1045. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1046. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1047. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1048. str->di_size = cpu_to_be64(di->di_size);
  1049. str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
  1050. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1051. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1052. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1053. str->di_goal_meta = cpu_to_be64(ip->i_goal);
  1054. str->di_goal_data = cpu_to_be64(ip->i_goal);
  1055. str->di_generation = cpu_to_be64(ip->i_generation);
  1056. str->di_flags = cpu_to_be32(di->di_flags);
  1057. str->di_height = cpu_to_be16(ip->i_height);
  1058. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1059. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1060. GFS2_FORMAT_DE : 0);
  1061. str->di_depth = cpu_to_be16(ip->i_depth);
  1062. str->di_entries = cpu_to_be32(ip->i_entries);
  1063. str->di_eattr = cpu_to_be64(ip->i_eattr);
  1064. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1065. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1066. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1067. }
  1068. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1069. {
  1070. const struct gfs2_dinode_host *di = &ip->i_di;
  1071. printk(KERN_INFO " no_formal_ino = %llu\n",
  1072. (unsigned long long)ip->i_no_formal_ino);
  1073. printk(KERN_INFO " no_addr = %llu\n",
  1074. (unsigned long long)ip->i_no_addr);
  1075. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1076. printk(KERN_INFO " blocks = %llu\n",
  1077. (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
  1078. printk(KERN_INFO " i_goal = %llu\n",
  1079. (unsigned long long)ip->i_goal);
  1080. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1081. printk(KERN_INFO " i_height = %u\n", ip->i_height);
  1082. printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
  1083. printk(KERN_INFO " i_entries = %u\n", ip->i_entries);
  1084. printk(KERN_INFO " i_eattr = %llu\n",
  1085. (unsigned long long)ip->i_eattr);
  1086. }