inode.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include <linux/time.h>
  21. #include "gfs2.h"
  22. #include "incore.h"
  23. #include "acl.h"
  24. #include "bmap.h"
  25. #include "dir.h"
  26. #include "eattr.h"
  27. #include "glock.h"
  28. #include "glops.h"
  29. #include "inode.h"
  30. #include "log.h"
  31. #include "meta_io.h"
  32. #include "ops_address.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "util.h"
  37. struct gfs2_inum_range_host {
  38. u64 ir_start;
  39. u64 ir_length;
  40. };
  41. static int iget_test(struct inode *inode, void *opaque)
  42. {
  43. struct gfs2_inode *ip = GFS2_I(inode);
  44. u64 *no_addr = opaque;
  45. if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
  46. return 1;
  47. return 0;
  48. }
  49. static int iget_set(struct inode *inode, void *opaque)
  50. {
  51. struct gfs2_inode *ip = GFS2_I(inode);
  52. u64 *no_addr = opaque;
  53. inode->i_ino = (unsigned long)*no_addr;
  54. ip->i_no_addr = *no_addr;
  55. set_bit(GIF_USER, &ip->i_flags);
  56. return 0;
  57. }
  58. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  59. {
  60. unsigned long hash = (unsigned long)no_addr;
  61. return ilookup5(sb, hash, iget_test, &no_addr);
  62. }
  63. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  64. {
  65. unsigned long hash = (unsigned long)no_addr;
  66. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  67. }
  68. struct gfs2_skip_data {
  69. u64 no_addr;
  70. int skipped;
  71. };
  72. static int iget_skip_test(struct inode *inode, void *opaque)
  73. {
  74. struct gfs2_inode *ip = GFS2_I(inode);
  75. struct gfs2_skip_data *data = opaque;
  76. if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
  77. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  78. data->skipped = 1;
  79. return 0;
  80. }
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. static int iget_skip_set(struct inode *inode, void *opaque)
  86. {
  87. struct gfs2_inode *ip = GFS2_I(inode);
  88. struct gfs2_skip_data *data = opaque;
  89. if (data->skipped)
  90. return 1;
  91. inode->i_ino = (unsigned long)(data->no_addr);
  92. ip->i_no_addr = data->no_addr;
  93. set_bit(GIF_USER, &ip->i_flags);
  94. return 0;
  95. }
  96. static struct inode *gfs2_iget_skip(struct super_block *sb,
  97. u64 no_addr)
  98. {
  99. struct gfs2_skip_data data;
  100. unsigned long hash = (unsigned long)no_addr;
  101. data.no_addr = no_addr;
  102. data.skipped = 0;
  103. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  104. }
  105. /**
  106. * GFS2 lookup code fills in vfs inode contents based on info obtained
  107. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  108. * with NFS code path since its get_dentry routine doesn't have the relevant
  109. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  110. * segment inside gfs2_inode_lookup code needs to get moved around.
  111. *
  112. * Clean up I_LOCK and I_NEW as well.
  113. **/
  114. void gfs2_set_iop(struct inode *inode)
  115. {
  116. struct gfs2_sbd *sdp = GFS2_SB(inode);
  117. umode_t mode = inode->i_mode;
  118. if (S_ISREG(mode)) {
  119. inode->i_op = &gfs2_file_iops;
  120. if (sdp->sd_args.ar_localflocks)
  121. inode->i_fop = &gfs2_file_fops_nolock;
  122. else
  123. inode->i_fop = &gfs2_file_fops;
  124. } else if (S_ISDIR(mode)) {
  125. inode->i_op = &gfs2_dir_iops;
  126. if (sdp->sd_args.ar_localflocks)
  127. inode->i_fop = &gfs2_dir_fops_nolock;
  128. else
  129. inode->i_fop = &gfs2_dir_fops;
  130. } else if (S_ISLNK(mode)) {
  131. inode->i_op = &gfs2_symlink_iops;
  132. } else {
  133. inode->i_op = &gfs2_file_iops;
  134. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  135. }
  136. unlock_new_inode(inode);
  137. }
  138. /**
  139. * gfs2_inode_lookup - Lookup an inode
  140. * @sb: The super block
  141. * @no_addr: The inode number
  142. * @type: The type of the inode
  143. * @skip_freeing: set this not return an inode if it is currently being freed.
  144. *
  145. * Returns: A VFS inode, or an error
  146. */
  147. struct inode *gfs2_inode_lookup(struct super_block *sb,
  148. unsigned int type,
  149. u64 no_addr,
  150. u64 no_formal_ino, int skip_freeing)
  151. {
  152. struct inode *inode;
  153. struct gfs2_inode *ip;
  154. struct gfs2_glock *io_gl;
  155. int error;
  156. if (skip_freeing)
  157. inode = gfs2_iget_skip(sb, no_addr);
  158. else
  159. inode = gfs2_iget(sb, no_addr);
  160. ip = GFS2_I(inode);
  161. if (!inode)
  162. return ERR_PTR(-ENOBUFS);
  163. if (inode->i_state & I_NEW) {
  164. struct gfs2_sbd *sdp = GFS2_SB(inode);
  165. ip->i_no_formal_ino = no_formal_ino;
  166. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  167. if (unlikely(error))
  168. goto fail;
  169. ip->i_gl->gl_object = ip;
  170. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  171. if (unlikely(error))
  172. goto fail_put;
  173. set_bit(GIF_INVALID, &ip->i_flags);
  174. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  175. if (unlikely(error))
  176. goto fail_iopen;
  177. ip->i_iopen_gh.gh_gl->gl_object = ip;
  178. gfs2_glock_put(io_gl);
  179. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  180. goto gfs2_nfsbypass;
  181. inode->i_mode = DT2IF(type);
  182. /*
  183. * We must read the inode in order to work out its type in
  184. * this case. Note that this doesn't happen often as we normally
  185. * know the type beforehand. This code path only occurs during
  186. * unlinked inode recovery (where it is safe to do this glock,
  187. * which is not true in the general case).
  188. */
  189. if (type == DT_UNKNOWN) {
  190. struct gfs2_holder gh;
  191. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  192. if (unlikely(error))
  193. goto fail_glock;
  194. /* Inode is now uptodate */
  195. gfs2_glock_dq_uninit(&gh);
  196. }
  197. gfs2_set_iop(inode);
  198. }
  199. gfs2_nfsbypass:
  200. return inode;
  201. fail_glock:
  202. gfs2_glock_dq(&ip->i_iopen_gh);
  203. fail_iopen:
  204. gfs2_glock_put(io_gl);
  205. fail_put:
  206. ip->i_gl->gl_object = NULL;
  207. gfs2_glock_put(ip->i_gl);
  208. fail:
  209. iget_failed(inode);
  210. return ERR_PTR(error);
  211. }
  212. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  213. {
  214. const struct gfs2_dinode *str = buf;
  215. struct timespec atime;
  216. u16 height, depth;
  217. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  218. goto corrupt;
  219. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  220. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  221. ip->i_inode.i_rdev = 0;
  222. switch (ip->i_inode.i_mode & S_IFMT) {
  223. case S_IFBLK:
  224. case S_IFCHR:
  225. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  226. be32_to_cpu(str->di_minor));
  227. break;
  228. };
  229. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  230. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  231. /*
  232. * We will need to review setting the nlink count here in the
  233. * light of the forthcoming ro bind mount work. This is a reminder
  234. * to do that.
  235. */
  236. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  237. ip->i_disksize = be64_to_cpu(str->di_size);
  238. i_size_write(&ip->i_inode, ip->i_disksize);
  239. gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
  240. atime.tv_sec = be64_to_cpu(str->di_atime);
  241. atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  242. if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
  243. ip->i_inode.i_atime = atime;
  244. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  245. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  246. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  247. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  248. ip->i_goal = be64_to_cpu(str->di_goal_meta);
  249. ip->i_generation = be64_to_cpu(str->di_generation);
  250. ip->i_diskflags = be32_to_cpu(str->di_flags);
  251. gfs2_set_inode_flags(&ip->i_inode);
  252. height = be16_to_cpu(str->di_height);
  253. if (unlikely(height > GFS2_MAX_META_HEIGHT))
  254. goto corrupt;
  255. ip->i_height = (u8)height;
  256. depth = be16_to_cpu(str->di_depth);
  257. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  258. goto corrupt;
  259. ip->i_depth = (u8)depth;
  260. ip->i_entries = be32_to_cpu(str->di_entries);
  261. ip->i_eattr = be64_to_cpu(str->di_eattr);
  262. if (S_ISREG(ip->i_inode.i_mode))
  263. gfs2_set_aops(&ip->i_inode);
  264. return 0;
  265. corrupt:
  266. if (gfs2_consist_inode(ip))
  267. gfs2_dinode_print(ip);
  268. return -EIO;
  269. }
  270. /**
  271. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  272. * @ip: The GFS2 inode
  273. *
  274. * Returns: errno
  275. */
  276. int gfs2_inode_refresh(struct gfs2_inode *ip)
  277. {
  278. struct buffer_head *dibh;
  279. int error;
  280. error = gfs2_meta_inode_buffer(ip, &dibh);
  281. if (error)
  282. return error;
  283. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  284. brelse(dibh);
  285. return -EIO;
  286. }
  287. error = gfs2_dinode_in(ip, dibh->b_data);
  288. brelse(dibh);
  289. clear_bit(GIF_INVALID, &ip->i_flags);
  290. return error;
  291. }
  292. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  293. {
  294. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  295. struct gfs2_alloc *al;
  296. struct gfs2_rgrpd *rgd;
  297. int error;
  298. if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
  299. if (gfs2_consist_inode(ip))
  300. gfs2_dinode_print(ip);
  301. return -EIO;
  302. }
  303. al = gfs2_alloc_get(ip);
  304. if (!al)
  305. return -ENOMEM;
  306. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  307. if (error)
  308. goto out;
  309. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  310. if (error)
  311. goto out_qs;
  312. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  313. if (!rgd) {
  314. gfs2_consist_inode(ip);
  315. error = -EIO;
  316. goto out_rindex_relse;
  317. }
  318. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  319. &al->al_rgd_gh);
  320. if (error)
  321. goto out_rindex_relse;
  322. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  323. if (error)
  324. goto out_rg_gunlock;
  325. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  326. set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
  327. gfs2_free_di(rgd, ip);
  328. gfs2_trans_end(sdp);
  329. out_rg_gunlock:
  330. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  331. out_rindex_relse:
  332. gfs2_glock_dq_uninit(&al->al_ri_gh);
  333. out_qs:
  334. gfs2_quota_unhold(ip);
  335. out:
  336. gfs2_alloc_put(ip);
  337. return error;
  338. }
  339. /**
  340. * gfs2_change_nlink - Change nlink count on inode
  341. * @ip: The GFS2 inode
  342. * @diff: The change in the nlink count required
  343. *
  344. * Returns: errno
  345. */
  346. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  347. {
  348. struct buffer_head *dibh;
  349. u32 nlink;
  350. int error;
  351. BUG_ON(diff != 1 && diff != -1);
  352. nlink = ip->i_inode.i_nlink + diff;
  353. /* If we are reducing the nlink count, but the new value ends up being
  354. bigger than the old one, we must have underflowed. */
  355. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  356. if (gfs2_consist_inode(ip))
  357. gfs2_dinode_print(ip);
  358. return -EIO;
  359. }
  360. error = gfs2_meta_inode_buffer(ip, &dibh);
  361. if (error)
  362. return error;
  363. if (diff > 0)
  364. inc_nlink(&ip->i_inode);
  365. else
  366. drop_nlink(&ip->i_inode);
  367. ip->i_inode.i_ctime = CURRENT_TIME;
  368. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  369. gfs2_dinode_out(ip, dibh->b_data);
  370. brelse(dibh);
  371. mark_inode_dirty(&ip->i_inode);
  372. if (ip->i_inode.i_nlink == 0)
  373. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  374. return error;
  375. }
  376. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  377. {
  378. struct qstr qstr;
  379. struct inode *inode;
  380. gfs2_str2qstr(&qstr, name);
  381. inode = gfs2_lookupi(dip, &qstr, 1);
  382. /* gfs2_lookupi has inconsistent callers: vfs
  383. * related routines expect NULL for no entry found,
  384. * gfs2_lookup_simple callers expect ENOENT
  385. * and do not check for NULL.
  386. */
  387. if (inode == NULL)
  388. return ERR_PTR(-ENOENT);
  389. else
  390. return inode;
  391. }
  392. /**
  393. * gfs2_lookupi - Look up a filename in a directory and return its inode
  394. * @d_gh: An initialized holder for the directory glock
  395. * @name: The name of the inode to look for
  396. * @is_root: If 1, ignore the caller's permissions
  397. * @i_gh: An uninitialized holder for the new inode glock
  398. *
  399. * This can be called via the VFS filldir function when NFS is doing
  400. * a readdirplus and the inode which its intending to stat isn't
  401. * already in cache. In this case we must not take the directory glock
  402. * again, since the readdir call will have already taken that lock.
  403. *
  404. * Returns: errno
  405. */
  406. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  407. int is_root)
  408. {
  409. struct super_block *sb = dir->i_sb;
  410. struct gfs2_inode *dip = GFS2_I(dir);
  411. struct gfs2_holder d_gh;
  412. int error = 0;
  413. struct inode *inode = NULL;
  414. int unlock = 0;
  415. if (!name->len || name->len > GFS2_FNAMESIZE)
  416. return ERR_PTR(-ENAMETOOLONG);
  417. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  418. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  419. dir == sb->s_root->d_inode)) {
  420. igrab(dir);
  421. return dir;
  422. }
  423. if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
  424. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  425. if (error)
  426. return ERR_PTR(error);
  427. unlock = 1;
  428. }
  429. if (!is_root) {
  430. error = gfs2_permission(dir, MAY_EXEC);
  431. if (error)
  432. goto out;
  433. }
  434. inode = gfs2_dir_search(dir, name);
  435. if (IS_ERR(inode))
  436. error = PTR_ERR(inode);
  437. out:
  438. if (unlock)
  439. gfs2_glock_dq_uninit(&d_gh);
  440. if (error == -ENOENT)
  441. return NULL;
  442. return inode ? inode : ERR_PTR(error);
  443. }
  444. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  445. {
  446. const struct gfs2_inum_range *str = buf;
  447. ir->ir_start = be64_to_cpu(str->ir_start);
  448. ir->ir_length = be64_to_cpu(str->ir_length);
  449. }
  450. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  451. {
  452. struct gfs2_inum_range *str = buf;
  453. str->ir_start = cpu_to_be64(ir->ir_start);
  454. str->ir_length = cpu_to_be64(ir->ir_length);
  455. }
  456. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  457. {
  458. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  459. struct buffer_head *bh;
  460. struct gfs2_inum_range_host ir;
  461. int error;
  462. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  463. if (error)
  464. return error;
  465. mutex_lock(&sdp->sd_inum_mutex);
  466. error = gfs2_meta_inode_buffer(ip, &bh);
  467. if (error) {
  468. mutex_unlock(&sdp->sd_inum_mutex);
  469. gfs2_trans_end(sdp);
  470. return error;
  471. }
  472. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  473. if (ir.ir_length) {
  474. *formal_ino = ir.ir_start++;
  475. ir.ir_length--;
  476. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  477. gfs2_inum_range_out(&ir,
  478. bh->b_data + sizeof(struct gfs2_dinode));
  479. brelse(bh);
  480. mutex_unlock(&sdp->sd_inum_mutex);
  481. gfs2_trans_end(sdp);
  482. return 0;
  483. }
  484. brelse(bh);
  485. mutex_unlock(&sdp->sd_inum_mutex);
  486. gfs2_trans_end(sdp);
  487. return 1;
  488. }
  489. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  490. {
  491. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  492. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  493. struct gfs2_holder gh;
  494. struct buffer_head *bh;
  495. struct gfs2_inum_range_host ir;
  496. int error;
  497. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  498. if (error)
  499. return error;
  500. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  501. if (error)
  502. goto out;
  503. mutex_lock(&sdp->sd_inum_mutex);
  504. error = gfs2_meta_inode_buffer(ip, &bh);
  505. if (error)
  506. goto out_end_trans;
  507. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  508. if (!ir.ir_length) {
  509. struct buffer_head *m_bh;
  510. u64 x, y;
  511. __be64 z;
  512. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  513. if (error)
  514. goto out_brelse;
  515. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  516. x = y = be64_to_cpu(z);
  517. ir.ir_start = x;
  518. ir.ir_length = GFS2_INUM_QUANTUM;
  519. x += GFS2_INUM_QUANTUM;
  520. if (x < y)
  521. gfs2_consist_inode(m_ip);
  522. z = cpu_to_be64(x);
  523. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  524. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  525. brelse(m_bh);
  526. }
  527. *formal_ino = ir.ir_start++;
  528. ir.ir_length--;
  529. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  530. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  531. out_brelse:
  532. brelse(bh);
  533. out_end_trans:
  534. mutex_unlock(&sdp->sd_inum_mutex);
  535. gfs2_trans_end(sdp);
  536. out:
  537. gfs2_glock_dq_uninit(&gh);
  538. return error;
  539. }
  540. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  541. {
  542. int error;
  543. error = pick_formal_ino_1(sdp, inum);
  544. if (error <= 0)
  545. return error;
  546. error = pick_formal_ino_2(sdp, inum);
  547. return error;
  548. }
  549. /**
  550. * create_ok - OK to create a new on-disk inode here?
  551. * @dip: Directory in which dinode is to be created
  552. * @name: Name of new dinode
  553. * @mode:
  554. *
  555. * Returns: errno
  556. */
  557. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  558. unsigned int mode)
  559. {
  560. int error;
  561. error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
  562. if (error)
  563. return error;
  564. /* Don't create entries in an unlinked directory */
  565. if (!dip->i_inode.i_nlink)
  566. return -EPERM;
  567. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  568. switch (error) {
  569. case -ENOENT:
  570. error = 0;
  571. break;
  572. case 0:
  573. return -EEXIST;
  574. default:
  575. return error;
  576. }
  577. if (dip->i_entries == (u32)-1)
  578. return -EFBIG;
  579. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  580. return -EMLINK;
  581. return 0;
  582. }
  583. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  584. unsigned int *uid, unsigned int *gid)
  585. {
  586. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  587. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  588. if (S_ISDIR(*mode))
  589. *mode |= S_ISUID;
  590. else if (dip->i_inode.i_uid != current_fsuid())
  591. *mode &= ~07111;
  592. *uid = dip->i_inode.i_uid;
  593. } else
  594. *uid = current_fsuid();
  595. if (dip->i_inode.i_mode & S_ISGID) {
  596. if (S_ISDIR(*mode))
  597. *mode |= S_ISGID;
  598. *gid = dip->i_inode.i_gid;
  599. } else
  600. *gid = current_fsgid();
  601. }
  602. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  603. {
  604. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  605. int error;
  606. if (gfs2_alloc_get(dip) == NULL)
  607. return -ENOMEM;
  608. dip->i_alloc->al_requested = RES_DINODE;
  609. error = gfs2_inplace_reserve(dip);
  610. if (error)
  611. goto out;
  612. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  613. if (error)
  614. goto out_ipreserv;
  615. *no_addr = gfs2_alloc_di(dip, generation);
  616. gfs2_trans_end(sdp);
  617. out_ipreserv:
  618. gfs2_inplace_release(dip);
  619. out:
  620. gfs2_alloc_put(dip);
  621. return error;
  622. }
  623. /**
  624. * init_dinode - Fill in a new dinode structure
  625. * @dip: the directory this inode is being created in
  626. * @gl: The glock covering the new inode
  627. * @inum: the inode number
  628. * @mode: the file permissions
  629. * @uid:
  630. * @gid:
  631. *
  632. */
  633. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  634. const struct gfs2_inum_host *inum, unsigned int mode,
  635. unsigned int uid, unsigned int gid,
  636. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  637. {
  638. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  639. struct gfs2_dinode *di;
  640. struct buffer_head *dibh;
  641. struct timespec tv = CURRENT_TIME;
  642. dibh = gfs2_meta_new(gl, inum->no_addr);
  643. gfs2_trans_add_bh(gl, dibh, 1);
  644. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  645. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  646. di = (struct gfs2_dinode *)dibh->b_data;
  647. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  648. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  649. di->di_mode = cpu_to_be32(mode);
  650. di->di_uid = cpu_to_be32(uid);
  651. di->di_gid = cpu_to_be32(gid);
  652. di->di_nlink = 0;
  653. di->di_size = 0;
  654. di->di_blocks = cpu_to_be64(1);
  655. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  656. di->di_major = cpu_to_be32(MAJOR(dev));
  657. di->di_minor = cpu_to_be32(MINOR(dev));
  658. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  659. di->di_generation = cpu_to_be64(*generation);
  660. di->di_flags = 0;
  661. if (S_ISREG(mode)) {
  662. if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
  663. gfs2_tune_get(sdp, gt_new_files_jdata))
  664. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  665. } else if (S_ISDIR(mode)) {
  666. di->di_flags |= cpu_to_be32(dip->i_diskflags &
  667. GFS2_DIF_INHERIT_JDATA);
  668. }
  669. di->__pad1 = 0;
  670. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  671. di->di_height = 0;
  672. di->__pad2 = 0;
  673. di->__pad3 = 0;
  674. di->di_depth = 0;
  675. di->di_entries = 0;
  676. memset(&di->__pad4, 0, sizeof(di->__pad4));
  677. di->di_eattr = 0;
  678. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  679. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  680. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  681. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  682. set_buffer_uptodate(dibh);
  683. *bhp = dibh;
  684. }
  685. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  686. unsigned int mode, const struct gfs2_inum_host *inum,
  687. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  688. {
  689. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  690. unsigned int uid, gid;
  691. int error;
  692. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  693. if (!gfs2_alloc_get(dip))
  694. return -ENOMEM;
  695. error = gfs2_quota_lock(dip, uid, gid);
  696. if (error)
  697. goto out;
  698. error = gfs2_quota_check(dip, uid, gid);
  699. if (error)
  700. goto out_quota;
  701. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  702. if (error)
  703. goto out_quota;
  704. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  705. gfs2_quota_change(dip, +1, uid, gid);
  706. gfs2_trans_end(sdp);
  707. out_quota:
  708. gfs2_quota_unlock(dip);
  709. out:
  710. gfs2_alloc_put(dip);
  711. return error;
  712. }
  713. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  714. struct gfs2_inode *ip)
  715. {
  716. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  717. struct gfs2_alloc *al;
  718. int alloc_required;
  719. struct buffer_head *dibh;
  720. int error;
  721. al = gfs2_alloc_get(dip);
  722. if (!al)
  723. return -ENOMEM;
  724. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  725. if (error)
  726. goto fail;
  727. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  728. if (alloc_required < 0)
  729. goto fail_quota_locks;
  730. if (alloc_required) {
  731. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  732. if (error)
  733. goto fail_quota_locks;
  734. al->al_requested = sdp->sd_max_dirres;
  735. error = gfs2_inplace_reserve(dip);
  736. if (error)
  737. goto fail_quota_locks;
  738. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  739. al->al_rgd->rd_length +
  740. 2 * RES_DINODE +
  741. RES_STATFS + RES_QUOTA, 0);
  742. if (error)
  743. goto fail_ipreserv;
  744. } else {
  745. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  746. if (error)
  747. goto fail_quota_locks;
  748. }
  749. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  750. if (error)
  751. goto fail_end_trans;
  752. error = gfs2_meta_inode_buffer(ip, &dibh);
  753. if (error)
  754. goto fail_end_trans;
  755. ip->i_inode.i_nlink = 1;
  756. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  757. gfs2_dinode_out(ip, dibh->b_data);
  758. brelse(dibh);
  759. return 0;
  760. fail_end_trans:
  761. gfs2_trans_end(sdp);
  762. fail_ipreserv:
  763. if (dip->i_alloc->al_rgd)
  764. gfs2_inplace_release(dip);
  765. fail_quota_locks:
  766. gfs2_quota_unlock(dip);
  767. fail:
  768. gfs2_alloc_put(dip);
  769. return error;
  770. }
  771. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  772. {
  773. int err;
  774. size_t len;
  775. void *value;
  776. char *name;
  777. struct gfs2_ea_request er;
  778. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  779. &name, &value, &len);
  780. if (err) {
  781. if (err == -EOPNOTSUPP)
  782. return 0;
  783. return err;
  784. }
  785. memset(&er, 0, sizeof(struct gfs2_ea_request));
  786. er.er_type = GFS2_EATYPE_SECURITY;
  787. er.er_name = name;
  788. er.er_data = value;
  789. er.er_name_len = strlen(name);
  790. er.er_data_len = len;
  791. err = gfs2_ea_set_i(ip, &er);
  792. kfree(value);
  793. kfree(name);
  794. return err;
  795. }
  796. /**
  797. * gfs2_createi - Create a new inode
  798. * @ghs: An array of two holders
  799. * @name: The name of the new file
  800. * @mode: the permissions on the new inode
  801. *
  802. * @ghs[0] is an initialized holder for the directory
  803. * @ghs[1] is the holder for the inode lock
  804. *
  805. * If the return value is not NULL, the glocks on both the directory and the new
  806. * file are held. A transaction has been started and an inplace reservation
  807. * is held, as well.
  808. *
  809. * Returns: An inode
  810. */
  811. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  812. unsigned int mode, dev_t dev)
  813. {
  814. struct inode *inode = NULL;
  815. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  816. struct inode *dir = &dip->i_inode;
  817. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  818. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  819. int error;
  820. u64 generation;
  821. struct buffer_head *bh = NULL;
  822. if (!name->len || name->len > GFS2_FNAMESIZE)
  823. return ERR_PTR(-ENAMETOOLONG);
  824. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  825. error = gfs2_glock_nq(ghs);
  826. if (error)
  827. goto fail;
  828. error = create_ok(dip, name, mode);
  829. if (error)
  830. goto fail_gunlock;
  831. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  832. if (error)
  833. goto fail_gunlock;
  834. error = alloc_dinode(dip, &inum.no_addr, &generation);
  835. if (error)
  836. goto fail_gunlock;
  837. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  838. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  839. if (error)
  840. goto fail_gunlock;
  841. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  842. if (error)
  843. goto fail_gunlock2;
  844. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  845. inum.no_addr,
  846. inum.no_formal_ino, 0);
  847. if (IS_ERR(inode))
  848. goto fail_gunlock2;
  849. error = gfs2_inode_refresh(GFS2_I(inode));
  850. if (error)
  851. goto fail_gunlock2;
  852. error = gfs2_acl_create(dip, GFS2_I(inode));
  853. if (error)
  854. goto fail_gunlock2;
  855. error = gfs2_security_init(dip, GFS2_I(inode));
  856. if (error)
  857. goto fail_gunlock2;
  858. error = link_dinode(dip, name, GFS2_I(inode));
  859. if (error)
  860. goto fail_gunlock2;
  861. if (bh)
  862. brelse(bh);
  863. return inode;
  864. fail_gunlock2:
  865. gfs2_glock_dq_uninit(ghs + 1);
  866. if (inode && !IS_ERR(inode))
  867. iput(inode);
  868. fail_gunlock:
  869. gfs2_glock_dq(ghs);
  870. fail:
  871. if (bh)
  872. brelse(bh);
  873. return ERR_PTR(error);
  874. }
  875. /**
  876. * gfs2_rmdiri - Remove a directory
  877. * @dip: The parent directory of the directory to be removed
  878. * @name: The name of the directory to be removed
  879. * @ip: The GFS2 inode of the directory to be removed
  880. *
  881. * Assumes Glocks on dip and ip are held
  882. *
  883. * Returns: errno
  884. */
  885. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  886. struct gfs2_inode *ip)
  887. {
  888. struct qstr dotname;
  889. int error;
  890. if (ip->i_entries != 2) {
  891. if (gfs2_consist_inode(ip))
  892. gfs2_dinode_print(ip);
  893. return -EIO;
  894. }
  895. error = gfs2_dir_del(dip, name);
  896. if (error)
  897. return error;
  898. error = gfs2_change_nlink(dip, -1);
  899. if (error)
  900. return error;
  901. gfs2_str2qstr(&dotname, ".");
  902. error = gfs2_dir_del(ip, &dotname);
  903. if (error)
  904. return error;
  905. gfs2_str2qstr(&dotname, "..");
  906. error = gfs2_dir_del(ip, &dotname);
  907. if (error)
  908. return error;
  909. /* It looks odd, but it really should be done twice */
  910. error = gfs2_change_nlink(ip, -1);
  911. if (error)
  912. return error;
  913. error = gfs2_change_nlink(ip, -1);
  914. if (error)
  915. return error;
  916. return error;
  917. }
  918. /*
  919. * gfs2_unlink_ok - check to see that a inode is still in a directory
  920. * @dip: the directory
  921. * @name: the name of the file
  922. * @ip: the inode
  923. *
  924. * Assumes that the lock on (at least) @dip is held.
  925. *
  926. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  927. */
  928. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  929. const struct gfs2_inode *ip)
  930. {
  931. int error;
  932. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  933. return -EPERM;
  934. if ((dip->i_inode.i_mode & S_ISVTX) &&
  935. dip->i_inode.i_uid != current_fsuid() &&
  936. ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER))
  937. return -EPERM;
  938. if (IS_APPEND(&dip->i_inode))
  939. return -EPERM;
  940. error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
  941. if (error)
  942. return error;
  943. error = gfs2_dir_check(&dip->i_inode, name, ip);
  944. if (error)
  945. return error;
  946. return 0;
  947. }
  948. /**
  949. * gfs2_readlinki - return the contents of a symlink
  950. * @ip: the symlink's inode
  951. * @buf: a pointer to the buffer to be filled
  952. * @len: a pointer to the length of @buf
  953. *
  954. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  955. * to be freed by the caller.
  956. *
  957. * Returns: errno
  958. */
  959. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  960. {
  961. struct gfs2_holder i_gh;
  962. struct buffer_head *dibh;
  963. unsigned int x;
  964. int error;
  965. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  966. error = gfs2_glock_nq(&i_gh);
  967. if (error) {
  968. gfs2_holder_uninit(&i_gh);
  969. return error;
  970. }
  971. if (!ip->i_disksize) {
  972. gfs2_consist_inode(ip);
  973. error = -EIO;
  974. goto out;
  975. }
  976. error = gfs2_meta_inode_buffer(ip, &dibh);
  977. if (error)
  978. goto out;
  979. x = ip->i_disksize + 1;
  980. if (x > *len) {
  981. *buf = kmalloc(x, GFP_NOFS);
  982. if (!*buf) {
  983. error = -ENOMEM;
  984. goto out_brelse;
  985. }
  986. }
  987. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  988. *len = x;
  989. out_brelse:
  990. brelse(dibh);
  991. out:
  992. gfs2_glock_dq_uninit(&i_gh);
  993. return error;
  994. }
  995. static int
  996. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  997. {
  998. struct buffer_head *dibh;
  999. int error;
  1000. error = gfs2_meta_inode_buffer(ip, &dibh);
  1001. if (!error) {
  1002. error = inode_setattr(&ip->i_inode, attr);
  1003. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1004. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1005. gfs2_dinode_out(ip, dibh->b_data);
  1006. brelse(dibh);
  1007. }
  1008. return error;
  1009. }
  1010. /**
  1011. * gfs2_setattr_simple -
  1012. * @ip:
  1013. * @attr:
  1014. *
  1015. * Called with a reference on the vnode.
  1016. *
  1017. * Returns: errno
  1018. */
  1019. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1020. {
  1021. int error;
  1022. if (current->journal_info)
  1023. return __gfs2_setattr_simple(ip, attr);
  1024. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1025. if (error)
  1026. return error;
  1027. error = __gfs2_setattr_simple(ip, attr);
  1028. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1029. return error;
  1030. }
  1031. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1032. {
  1033. struct gfs2_dinode *str = buf;
  1034. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1035. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1036. str->di_header.__pad0 = 0;
  1037. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1038. str->di_header.__pad1 = 0;
  1039. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1040. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1041. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1042. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1043. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1044. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1045. str->di_size = cpu_to_be64(ip->i_disksize);
  1046. str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
  1047. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1048. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1049. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1050. str->di_goal_meta = cpu_to_be64(ip->i_goal);
  1051. str->di_goal_data = cpu_to_be64(ip->i_goal);
  1052. str->di_generation = cpu_to_be64(ip->i_generation);
  1053. str->di_flags = cpu_to_be32(ip->i_diskflags);
  1054. str->di_height = cpu_to_be16(ip->i_height);
  1055. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1056. !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
  1057. GFS2_FORMAT_DE : 0);
  1058. str->di_depth = cpu_to_be16(ip->i_depth);
  1059. str->di_entries = cpu_to_be32(ip->i_entries);
  1060. str->di_eattr = cpu_to_be64(ip->i_eattr);
  1061. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1062. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1063. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1064. }
  1065. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1066. {
  1067. printk(KERN_INFO " no_formal_ino = %llu\n",
  1068. (unsigned long long)ip->i_no_formal_ino);
  1069. printk(KERN_INFO " no_addr = %llu\n",
  1070. (unsigned long long)ip->i_no_addr);
  1071. printk(KERN_INFO " i_disksize = %llu\n",
  1072. (unsigned long long)ip->i_disksize);
  1073. printk(KERN_INFO " blocks = %llu\n",
  1074. (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
  1075. printk(KERN_INFO " i_goal = %llu\n",
  1076. (unsigned long long)ip->i_goal);
  1077. printk(KERN_INFO " i_diskflags = 0x%.8X\n", ip->i_diskflags);
  1078. printk(KERN_INFO " i_height = %u\n", ip->i_height);
  1079. printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
  1080. printk(KERN_INFO " i_entries = %u\n", ip->i_entries);
  1081. printk(KERN_INFO " i_eattr = %llu\n",
  1082. (unsigned long long)ip->i_eattr);
  1083. }