inode.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_file.h"
  33. #include "ops_inode.h"
  34. #include "quota.h"
  35. #include "rgrp.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. struct gfs2_inum_range_host {
  39. u64 ir_start;
  40. u64 ir_length;
  41. };
  42. static int iget_test(struct inode *inode, void *opaque)
  43. {
  44. struct gfs2_inode *ip = GFS2_I(inode);
  45. u64 *no_addr = opaque;
  46. if (ip->i_no_addr == *no_addr &&
  47. inode->i_private != NULL)
  48. return 1;
  49. return 0;
  50. }
  51. static int iget_set(struct inode *inode, void *opaque)
  52. {
  53. struct gfs2_inode *ip = GFS2_I(inode);
  54. u64 *no_addr = opaque;
  55. inode->i_ino = (unsigned long)*no_addr;
  56. ip->i_no_addr = *no_addr;
  57. return 0;
  58. }
  59. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  60. {
  61. unsigned long hash = (unsigned long)no_addr;
  62. return ilookup5(sb, hash, iget_test, &no_addr);
  63. }
  64. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  65. {
  66. unsigned long hash = (unsigned long)no_addr;
  67. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  68. }
  69. /**
  70. * gfs2_inode_lookup - Lookup an inode
  71. * @sb: The super block
  72. * @no_addr: The inode number
  73. * @type: The type of the inode
  74. *
  75. * Returns: A VFS inode, or an error
  76. */
  77. struct inode *gfs2_inode_lookup(struct super_block *sb,
  78. unsigned int type,
  79. u64 no_addr,
  80. u64 no_formal_ino)
  81. {
  82. struct inode *inode = gfs2_iget(sb, no_addr);
  83. struct gfs2_inode *ip = GFS2_I(inode);
  84. struct gfs2_glock *io_gl;
  85. int error;
  86. if (!inode)
  87. return ERR_PTR(-ENOBUFS);
  88. if (inode->i_state & I_NEW) {
  89. struct gfs2_sbd *sdp = GFS2_SB(inode);
  90. umode_t mode;
  91. inode->i_private = ip;
  92. ip->i_no_formal_ino = no_formal_ino;
  93. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  94. if (unlikely(error))
  95. goto fail;
  96. ip->i_gl->gl_object = ip;
  97. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  98. if (unlikely(error))
  99. goto fail_put;
  100. set_bit(GIF_INVALID, &ip->i_flags);
  101. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  102. if (unlikely(error))
  103. goto fail_iopen;
  104. ip->i_iopen_gh.gh_gl->gl_object = ip;
  105. gfs2_glock_put(io_gl);
  106. /*
  107. * We must read the inode in order to work out its type in
  108. * this case. Note that this doesn't happen often as we normally
  109. * know the type beforehand. This code path only occurs during
  110. * unlinked inode recovery (where it is safe to do this glock,
  111. * which is not true in the general case).
  112. */
  113. inode->i_mode = mode = DT2IF(type);
  114. if (type == DT_UNKNOWN) {
  115. struct gfs2_holder gh;
  116. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  117. if (unlikely(error))
  118. goto fail_glock;
  119. /* Inode is now uptodate */
  120. mode = inode->i_mode;
  121. gfs2_glock_dq_uninit(&gh);
  122. }
  123. if (S_ISREG(mode)) {
  124. inode->i_op = &gfs2_file_iops;
  125. inode->i_fop = &gfs2_file_fops;
  126. inode->i_mapping->a_ops = &gfs2_file_aops;
  127. } else if (S_ISDIR(mode)) {
  128. inode->i_op = &gfs2_dir_iops;
  129. inode->i_fop = &gfs2_dir_fops;
  130. } else if (S_ISLNK(mode)) {
  131. inode->i_op = &gfs2_symlink_iops;
  132. } else {
  133. inode->i_op = &gfs2_dev_iops;
  134. }
  135. unlock_new_inode(inode);
  136. }
  137. return inode;
  138. fail_glock:
  139. gfs2_glock_dq(&ip->i_iopen_gh);
  140. fail_iopen:
  141. gfs2_glock_put(io_gl);
  142. fail_put:
  143. ip->i_gl->gl_object = NULL;
  144. gfs2_glock_put(ip->i_gl);
  145. fail:
  146. iput(inode);
  147. return ERR_PTR(error);
  148. }
  149. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  150. {
  151. struct gfs2_dinode_host *di = &ip->i_di;
  152. const struct gfs2_dinode *str = buf;
  153. if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
  154. if (gfs2_consist_inode(ip))
  155. gfs2_dinode_print(ip);
  156. return -EIO;
  157. }
  158. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  159. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  160. ip->i_inode.i_rdev = 0;
  161. switch (ip->i_inode.i_mode & S_IFMT) {
  162. case S_IFBLK:
  163. case S_IFCHR:
  164. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  165. be32_to_cpu(str->di_minor));
  166. break;
  167. };
  168. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  169. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  170. /*
  171. * We will need to review setting the nlink count here in the
  172. * light of the forthcoming ro bind mount work. This is a reminder
  173. * to do that.
  174. */
  175. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  176. di->di_size = be64_to_cpu(str->di_size);
  177. i_size_write(&ip->i_inode, di->di_size);
  178. di->di_blocks = be64_to_cpu(str->di_blocks);
  179. gfs2_set_inode_blocks(&ip->i_inode);
  180. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  181. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  182. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  183. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  184. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  185. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  186. di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
  187. di->di_goal_data = be64_to_cpu(str->di_goal_data);
  188. di->di_generation = be64_to_cpu(str->di_generation);
  189. di->di_flags = be32_to_cpu(str->di_flags);
  190. gfs2_set_inode_flags(&ip->i_inode);
  191. di->di_height = be16_to_cpu(str->di_height);
  192. di->di_depth = be16_to_cpu(str->di_depth);
  193. di->di_entries = be32_to_cpu(str->di_entries);
  194. di->di_eattr = be64_to_cpu(str->di_eattr);
  195. return 0;
  196. }
  197. /**
  198. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  199. * @ip: The GFS2 inode
  200. *
  201. * Returns: errno
  202. */
  203. int gfs2_inode_refresh(struct gfs2_inode *ip)
  204. {
  205. struct buffer_head *dibh;
  206. int error;
  207. error = gfs2_meta_inode_buffer(ip, &dibh);
  208. if (error)
  209. return error;
  210. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  211. brelse(dibh);
  212. return -EIO;
  213. }
  214. error = gfs2_dinode_in(ip, dibh->b_data);
  215. brelse(dibh);
  216. clear_bit(GIF_INVALID, &ip->i_flags);
  217. return error;
  218. }
  219. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  220. {
  221. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  222. struct gfs2_alloc *al;
  223. struct gfs2_rgrpd *rgd;
  224. int error;
  225. if (ip->i_di.di_blocks != 1) {
  226. if (gfs2_consist_inode(ip))
  227. gfs2_dinode_print(ip);
  228. return -EIO;
  229. }
  230. al = gfs2_alloc_get(ip);
  231. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  232. if (error)
  233. goto out;
  234. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  235. if (error)
  236. goto out_qs;
  237. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  238. if (!rgd) {
  239. gfs2_consist_inode(ip);
  240. error = -EIO;
  241. goto out_rindex_relse;
  242. }
  243. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  244. &al->al_rgd_gh);
  245. if (error)
  246. goto out_rindex_relse;
  247. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  248. if (error)
  249. goto out_rg_gunlock;
  250. gfs2_trans_add_gl(ip->i_gl);
  251. gfs2_free_di(rgd, ip);
  252. gfs2_trans_end(sdp);
  253. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  254. out_rg_gunlock:
  255. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  256. out_rindex_relse:
  257. gfs2_glock_dq_uninit(&al->al_ri_gh);
  258. out_qs:
  259. gfs2_quota_unhold(ip);
  260. out:
  261. gfs2_alloc_put(ip);
  262. return error;
  263. }
  264. /**
  265. * gfs2_change_nlink - Change nlink count on inode
  266. * @ip: The GFS2 inode
  267. * @diff: The change in the nlink count required
  268. *
  269. * Returns: errno
  270. */
  271. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  272. {
  273. struct buffer_head *dibh;
  274. u32 nlink;
  275. int error;
  276. BUG_ON(diff != 1 && diff != -1);
  277. nlink = ip->i_inode.i_nlink + diff;
  278. /* If we are reducing the nlink count, but the new value ends up being
  279. bigger than the old one, we must have underflowed. */
  280. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  281. if (gfs2_consist_inode(ip))
  282. gfs2_dinode_print(ip);
  283. return -EIO;
  284. }
  285. error = gfs2_meta_inode_buffer(ip, &dibh);
  286. if (error)
  287. return error;
  288. if (diff > 0)
  289. inc_nlink(&ip->i_inode);
  290. else
  291. drop_nlink(&ip->i_inode);
  292. ip->i_inode.i_ctime = CURRENT_TIME;
  293. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  294. gfs2_dinode_out(ip, dibh->b_data);
  295. brelse(dibh);
  296. mark_inode_dirty(&ip->i_inode);
  297. if (ip->i_inode.i_nlink == 0)
  298. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  299. return error;
  300. }
  301. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  302. {
  303. struct qstr qstr;
  304. struct inode *inode;
  305. gfs2_str2qstr(&qstr, name);
  306. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  307. /* gfs2_lookupi has inconsistent callers: vfs
  308. * related routines expect NULL for no entry found,
  309. * gfs2_lookup_simple callers expect ENOENT
  310. * and do not check for NULL.
  311. */
  312. if (inode == NULL)
  313. return ERR_PTR(-ENOENT);
  314. else
  315. return inode;
  316. }
  317. /**
  318. * gfs2_lookupi - Look up a filename in a directory and return its inode
  319. * @d_gh: An initialized holder for the directory glock
  320. * @name: The name of the inode to look for
  321. * @is_root: If 1, ignore the caller's permissions
  322. * @i_gh: An uninitialized holder for the new inode glock
  323. *
  324. * This can be called via the VFS filldir function when NFS is doing
  325. * a readdirplus and the inode which its intending to stat isn't
  326. * already in cache. In this case we must not take the directory glock
  327. * again, since the readdir call will have already taken that lock.
  328. *
  329. * Returns: errno
  330. */
  331. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  332. int is_root, struct nameidata *nd)
  333. {
  334. struct super_block *sb = dir->i_sb;
  335. struct gfs2_inode *dip = GFS2_I(dir);
  336. struct gfs2_holder d_gh;
  337. int error = 0;
  338. struct inode *inode = NULL;
  339. int unlock = 0;
  340. if (!name->len || name->len > GFS2_FNAMESIZE)
  341. return ERR_PTR(-ENAMETOOLONG);
  342. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  343. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  344. dir == sb->s_root->d_inode)) {
  345. igrab(dir);
  346. return dir;
  347. }
  348. if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
  349. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  350. if (error)
  351. return ERR_PTR(error);
  352. unlock = 1;
  353. }
  354. if (!is_root) {
  355. error = permission(dir, MAY_EXEC, NULL);
  356. if (error)
  357. goto out;
  358. }
  359. inode = gfs2_dir_search(dir, name);
  360. if (IS_ERR(inode))
  361. error = PTR_ERR(inode);
  362. out:
  363. if (unlock)
  364. gfs2_glock_dq_uninit(&d_gh);
  365. if (error == -ENOENT)
  366. return NULL;
  367. return inode ? inode : ERR_PTR(error);
  368. }
  369. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  370. {
  371. const struct gfs2_inum_range *str = buf;
  372. ir->ir_start = be64_to_cpu(str->ir_start);
  373. ir->ir_length = be64_to_cpu(str->ir_length);
  374. }
  375. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  376. {
  377. struct gfs2_inum_range *str = buf;
  378. str->ir_start = cpu_to_be64(ir->ir_start);
  379. str->ir_length = cpu_to_be64(ir->ir_length);
  380. }
  381. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  382. {
  383. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  384. struct buffer_head *bh;
  385. struct gfs2_inum_range_host ir;
  386. int error;
  387. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  388. if (error)
  389. return error;
  390. mutex_lock(&sdp->sd_inum_mutex);
  391. error = gfs2_meta_inode_buffer(ip, &bh);
  392. if (error) {
  393. mutex_unlock(&sdp->sd_inum_mutex);
  394. gfs2_trans_end(sdp);
  395. return error;
  396. }
  397. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  398. if (ir.ir_length) {
  399. *formal_ino = ir.ir_start++;
  400. ir.ir_length--;
  401. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  402. gfs2_inum_range_out(&ir,
  403. bh->b_data + sizeof(struct gfs2_dinode));
  404. brelse(bh);
  405. mutex_unlock(&sdp->sd_inum_mutex);
  406. gfs2_trans_end(sdp);
  407. return 0;
  408. }
  409. brelse(bh);
  410. mutex_unlock(&sdp->sd_inum_mutex);
  411. gfs2_trans_end(sdp);
  412. return 1;
  413. }
  414. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  415. {
  416. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  417. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  418. struct gfs2_holder gh;
  419. struct buffer_head *bh;
  420. struct gfs2_inum_range_host ir;
  421. int error;
  422. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  423. if (error)
  424. return error;
  425. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  426. if (error)
  427. goto out;
  428. mutex_lock(&sdp->sd_inum_mutex);
  429. error = gfs2_meta_inode_buffer(ip, &bh);
  430. if (error)
  431. goto out_end_trans;
  432. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  433. if (!ir.ir_length) {
  434. struct buffer_head *m_bh;
  435. u64 x, y;
  436. __be64 z;
  437. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  438. if (error)
  439. goto out_brelse;
  440. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  441. x = y = be64_to_cpu(z);
  442. ir.ir_start = x;
  443. ir.ir_length = GFS2_INUM_QUANTUM;
  444. x += GFS2_INUM_QUANTUM;
  445. if (x < y)
  446. gfs2_consist_inode(m_ip);
  447. z = cpu_to_be64(x);
  448. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  449. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  450. brelse(m_bh);
  451. }
  452. *formal_ino = ir.ir_start++;
  453. ir.ir_length--;
  454. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  455. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  456. out_brelse:
  457. brelse(bh);
  458. out_end_trans:
  459. mutex_unlock(&sdp->sd_inum_mutex);
  460. gfs2_trans_end(sdp);
  461. out:
  462. gfs2_glock_dq_uninit(&gh);
  463. return error;
  464. }
  465. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  466. {
  467. int error;
  468. error = pick_formal_ino_1(sdp, inum);
  469. if (error <= 0)
  470. return error;
  471. error = pick_formal_ino_2(sdp, inum);
  472. return error;
  473. }
  474. /**
  475. * create_ok - OK to create a new on-disk inode here?
  476. * @dip: Directory in which dinode is to be created
  477. * @name: Name of new dinode
  478. * @mode:
  479. *
  480. * Returns: errno
  481. */
  482. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  483. unsigned int mode)
  484. {
  485. int error;
  486. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  487. if (error)
  488. return error;
  489. /* Don't create entries in an unlinked directory */
  490. if (!dip->i_inode.i_nlink)
  491. return -EPERM;
  492. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  493. switch (error) {
  494. case -ENOENT:
  495. error = 0;
  496. break;
  497. case 0:
  498. return -EEXIST;
  499. default:
  500. return error;
  501. }
  502. if (dip->i_di.di_entries == (u32)-1)
  503. return -EFBIG;
  504. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  505. return -EMLINK;
  506. return 0;
  507. }
  508. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  509. unsigned int *uid, unsigned int *gid)
  510. {
  511. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  512. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  513. if (S_ISDIR(*mode))
  514. *mode |= S_ISUID;
  515. else if (dip->i_inode.i_uid != current->fsuid)
  516. *mode &= ~07111;
  517. *uid = dip->i_inode.i_uid;
  518. } else
  519. *uid = current->fsuid;
  520. if (dip->i_inode.i_mode & S_ISGID) {
  521. if (S_ISDIR(*mode))
  522. *mode |= S_ISGID;
  523. *gid = dip->i_inode.i_gid;
  524. } else
  525. *gid = current->fsgid;
  526. }
  527. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  528. {
  529. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  530. int error;
  531. gfs2_alloc_get(dip);
  532. dip->i_alloc.al_requested = RES_DINODE;
  533. error = gfs2_inplace_reserve(dip);
  534. if (error)
  535. goto out;
  536. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  537. if (error)
  538. goto out_ipreserv;
  539. *no_addr = gfs2_alloc_di(dip, generation);
  540. gfs2_trans_end(sdp);
  541. out_ipreserv:
  542. gfs2_inplace_release(dip);
  543. out:
  544. gfs2_alloc_put(dip);
  545. return error;
  546. }
  547. /**
  548. * init_dinode - Fill in a new dinode structure
  549. * @dip: the directory this inode is being created in
  550. * @gl: The glock covering the new inode
  551. * @inum: the inode number
  552. * @mode: the file permissions
  553. * @uid:
  554. * @gid:
  555. *
  556. */
  557. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  558. const struct gfs2_inum_host *inum, unsigned int mode,
  559. unsigned int uid, unsigned int gid,
  560. const u64 *generation, dev_t dev)
  561. {
  562. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  563. struct gfs2_dinode *di;
  564. struct buffer_head *dibh;
  565. struct timespec tv = CURRENT_TIME;
  566. dibh = gfs2_meta_new(gl, inum->no_addr);
  567. gfs2_trans_add_bh(gl, dibh, 1);
  568. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  569. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  570. di = (struct gfs2_dinode *)dibh->b_data;
  571. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  572. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  573. di->di_mode = cpu_to_be32(mode);
  574. di->di_uid = cpu_to_be32(uid);
  575. di->di_gid = cpu_to_be32(gid);
  576. di->di_nlink = 0;
  577. di->di_size = 0;
  578. di->di_blocks = cpu_to_be64(1);
  579. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  580. di->di_major = cpu_to_be32(MAJOR(dev));
  581. di->di_minor = cpu_to_be32(MINOR(dev));
  582. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  583. di->di_generation = cpu_to_be64(*generation);
  584. di->di_flags = 0;
  585. if (S_ISREG(mode)) {
  586. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  587. gfs2_tune_get(sdp, gt_new_files_jdata))
  588. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  589. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  590. gfs2_tune_get(sdp, gt_new_files_directio))
  591. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  592. } else if (S_ISDIR(mode)) {
  593. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  594. GFS2_DIF_INHERIT_DIRECTIO);
  595. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  596. GFS2_DIF_INHERIT_JDATA);
  597. }
  598. di->__pad1 = 0;
  599. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  600. di->di_height = 0;
  601. di->__pad2 = 0;
  602. di->__pad3 = 0;
  603. di->di_depth = 0;
  604. di->di_entries = 0;
  605. memset(&di->__pad4, 0, sizeof(di->__pad4));
  606. di->di_eattr = 0;
  607. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  608. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  609. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  610. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  611. brelse(dibh);
  612. }
  613. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  614. unsigned int mode, const struct gfs2_inum_host *inum,
  615. const u64 *generation, dev_t dev)
  616. {
  617. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  618. unsigned int uid, gid;
  619. int error;
  620. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  621. gfs2_alloc_get(dip);
  622. error = gfs2_quota_lock(dip, uid, gid);
  623. if (error)
  624. goto out;
  625. error = gfs2_quota_check(dip, uid, gid);
  626. if (error)
  627. goto out_quota;
  628. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  629. if (error)
  630. goto out_quota;
  631. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
  632. gfs2_quota_change(dip, +1, uid, gid);
  633. gfs2_trans_end(sdp);
  634. out_quota:
  635. gfs2_quota_unlock(dip);
  636. out:
  637. gfs2_alloc_put(dip);
  638. return error;
  639. }
  640. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  641. struct gfs2_inode *ip)
  642. {
  643. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  644. struct gfs2_alloc *al;
  645. int alloc_required;
  646. struct buffer_head *dibh;
  647. int error;
  648. al = gfs2_alloc_get(dip);
  649. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  650. if (error)
  651. goto fail;
  652. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  653. if (alloc_required < 0)
  654. goto fail;
  655. if (alloc_required) {
  656. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  657. if (error)
  658. goto fail_quota_locks;
  659. al->al_requested = sdp->sd_max_dirres;
  660. error = gfs2_inplace_reserve(dip);
  661. if (error)
  662. goto fail_quota_locks;
  663. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  664. al->al_rgd->rd_length +
  665. 2 * RES_DINODE +
  666. RES_STATFS + RES_QUOTA, 0);
  667. if (error)
  668. goto fail_ipreserv;
  669. } else {
  670. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  671. if (error)
  672. goto fail_quota_locks;
  673. }
  674. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  675. if (error)
  676. goto fail_end_trans;
  677. error = gfs2_meta_inode_buffer(ip, &dibh);
  678. if (error)
  679. goto fail_end_trans;
  680. ip->i_inode.i_nlink = 1;
  681. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  682. gfs2_dinode_out(ip, dibh->b_data);
  683. brelse(dibh);
  684. return 0;
  685. fail_end_trans:
  686. gfs2_trans_end(sdp);
  687. fail_ipreserv:
  688. if (dip->i_alloc.al_rgd)
  689. gfs2_inplace_release(dip);
  690. fail_quota_locks:
  691. gfs2_quota_unlock(dip);
  692. fail:
  693. gfs2_alloc_put(dip);
  694. return error;
  695. }
  696. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  697. {
  698. int err;
  699. size_t len;
  700. void *value;
  701. char *name;
  702. struct gfs2_ea_request er;
  703. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  704. &name, &value, &len);
  705. if (err) {
  706. if (err == -EOPNOTSUPP)
  707. return 0;
  708. return err;
  709. }
  710. memset(&er, 0, sizeof(struct gfs2_ea_request));
  711. er.er_type = GFS2_EATYPE_SECURITY;
  712. er.er_name = name;
  713. er.er_data = value;
  714. er.er_name_len = strlen(name);
  715. er.er_data_len = len;
  716. err = gfs2_ea_set_i(ip, &er);
  717. kfree(value);
  718. kfree(name);
  719. return err;
  720. }
  721. /**
  722. * gfs2_createi - Create a new inode
  723. * @ghs: An array of two holders
  724. * @name: The name of the new file
  725. * @mode: the permissions on the new inode
  726. *
  727. * @ghs[0] is an initialized holder for the directory
  728. * @ghs[1] is the holder for the inode lock
  729. *
  730. * If the return value is not NULL, the glocks on both the directory and the new
  731. * file are held. A transaction has been started and an inplace reservation
  732. * is held, as well.
  733. *
  734. * Returns: An inode
  735. */
  736. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  737. unsigned int mode, dev_t dev)
  738. {
  739. struct inode *inode = NULL;
  740. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  741. struct inode *dir = &dip->i_inode;
  742. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  743. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  744. int error;
  745. u64 generation;
  746. if (!name->len || name->len > GFS2_FNAMESIZE)
  747. return ERR_PTR(-ENAMETOOLONG);
  748. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  749. error = gfs2_glock_nq(ghs);
  750. if (error)
  751. goto fail;
  752. error = create_ok(dip, name, mode);
  753. if (error)
  754. goto fail_gunlock;
  755. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  756. if (error)
  757. goto fail_gunlock;
  758. error = alloc_dinode(dip, &inum.no_addr, &generation);
  759. if (error)
  760. goto fail_gunlock;
  761. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  762. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  763. if (error)
  764. goto fail_gunlock;
  765. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
  766. if (error)
  767. goto fail_gunlock2;
  768. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  769. inum.no_addr,
  770. inum.no_formal_ino);
  771. if (IS_ERR(inode))
  772. goto fail_gunlock2;
  773. error = gfs2_inode_refresh(GFS2_I(inode));
  774. if (error)
  775. goto fail_gunlock2;
  776. error = gfs2_acl_create(dip, GFS2_I(inode));
  777. if (error)
  778. goto fail_gunlock2;
  779. error = gfs2_security_init(dip, GFS2_I(inode));
  780. if (error)
  781. goto fail_gunlock2;
  782. error = link_dinode(dip, name, GFS2_I(inode));
  783. if (error)
  784. goto fail_gunlock2;
  785. if (!inode)
  786. return ERR_PTR(-ENOMEM);
  787. return inode;
  788. fail_gunlock2:
  789. gfs2_glock_dq_uninit(ghs + 1);
  790. if (inode)
  791. iput(inode);
  792. fail_gunlock:
  793. gfs2_glock_dq(ghs);
  794. fail:
  795. return ERR_PTR(error);
  796. }
  797. /**
  798. * gfs2_rmdiri - Remove a directory
  799. * @dip: The parent directory of the directory to be removed
  800. * @name: The name of the directory to be removed
  801. * @ip: The GFS2 inode of the directory to be removed
  802. *
  803. * Assumes Glocks on dip and ip are held
  804. *
  805. * Returns: errno
  806. */
  807. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  808. struct gfs2_inode *ip)
  809. {
  810. struct qstr dotname;
  811. int error;
  812. if (ip->i_di.di_entries != 2) {
  813. if (gfs2_consist_inode(ip))
  814. gfs2_dinode_print(ip);
  815. return -EIO;
  816. }
  817. error = gfs2_dir_del(dip, name);
  818. if (error)
  819. return error;
  820. error = gfs2_change_nlink(dip, -1);
  821. if (error)
  822. return error;
  823. gfs2_str2qstr(&dotname, ".");
  824. error = gfs2_dir_del(ip, &dotname);
  825. if (error)
  826. return error;
  827. gfs2_str2qstr(&dotname, "..");
  828. error = gfs2_dir_del(ip, &dotname);
  829. if (error)
  830. return error;
  831. /* It looks odd, but it really should be done twice */
  832. error = gfs2_change_nlink(ip, -1);
  833. if (error)
  834. return error;
  835. error = gfs2_change_nlink(ip, -1);
  836. if (error)
  837. return error;
  838. return error;
  839. }
  840. /*
  841. * gfs2_unlink_ok - check to see that a inode is still in a directory
  842. * @dip: the directory
  843. * @name: the name of the file
  844. * @ip: the inode
  845. *
  846. * Assumes that the lock on (at least) @dip is held.
  847. *
  848. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  849. */
  850. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  851. const struct gfs2_inode *ip)
  852. {
  853. int error;
  854. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  855. return -EPERM;
  856. if ((dip->i_inode.i_mode & S_ISVTX) &&
  857. dip->i_inode.i_uid != current->fsuid &&
  858. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  859. return -EPERM;
  860. if (IS_APPEND(&dip->i_inode))
  861. return -EPERM;
  862. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  863. if (error)
  864. return error;
  865. error = gfs2_dir_check(&dip->i_inode, name, ip);
  866. if (error)
  867. return error;
  868. return 0;
  869. }
  870. /*
  871. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  872. * @this: move this
  873. * @to: to here
  874. *
  875. * Follow @to back to the root and make sure we don't encounter @this
  876. * Assumes we already hold the rename lock.
  877. *
  878. * Returns: errno
  879. */
  880. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  881. {
  882. struct inode *dir = &to->i_inode;
  883. struct super_block *sb = dir->i_sb;
  884. struct inode *tmp;
  885. struct qstr dotdot;
  886. int error = 0;
  887. gfs2_str2qstr(&dotdot, "..");
  888. igrab(dir);
  889. for (;;) {
  890. if (dir == &this->i_inode) {
  891. error = -EINVAL;
  892. break;
  893. }
  894. if (dir == sb->s_root->d_inode) {
  895. error = 0;
  896. break;
  897. }
  898. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  899. if (IS_ERR(tmp)) {
  900. error = PTR_ERR(tmp);
  901. break;
  902. }
  903. iput(dir);
  904. dir = tmp;
  905. }
  906. iput(dir);
  907. return error;
  908. }
  909. /**
  910. * gfs2_readlinki - return the contents of a symlink
  911. * @ip: the symlink's inode
  912. * @buf: a pointer to the buffer to be filled
  913. * @len: a pointer to the length of @buf
  914. *
  915. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  916. * to be freed by the caller.
  917. *
  918. * Returns: errno
  919. */
  920. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  921. {
  922. struct gfs2_holder i_gh;
  923. struct buffer_head *dibh;
  924. unsigned int x;
  925. int error;
  926. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  927. error = gfs2_glock_nq_atime(&i_gh);
  928. if (error) {
  929. gfs2_holder_uninit(&i_gh);
  930. return error;
  931. }
  932. if (!ip->i_di.di_size) {
  933. gfs2_consist_inode(ip);
  934. error = -EIO;
  935. goto out;
  936. }
  937. error = gfs2_meta_inode_buffer(ip, &dibh);
  938. if (error)
  939. goto out;
  940. x = ip->i_di.di_size + 1;
  941. if (x > *len) {
  942. *buf = kmalloc(x, GFP_KERNEL);
  943. if (!*buf) {
  944. error = -ENOMEM;
  945. goto out_brelse;
  946. }
  947. }
  948. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  949. *len = x;
  950. out_brelse:
  951. brelse(dibh);
  952. out:
  953. gfs2_glock_dq_uninit(&i_gh);
  954. return error;
  955. }
  956. /**
  957. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  958. * conditionally update the inode's atime
  959. * @gh: the holder to acquire
  960. *
  961. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  962. * Update if the difference between the current time and the inode's current
  963. * atime is greater than an interval specified at mount.
  964. *
  965. * Returns: errno
  966. */
  967. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  968. {
  969. struct gfs2_glock *gl = gh->gh_gl;
  970. struct gfs2_sbd *sdp = gl->gl_sbd;
  971. struct gfs2_inode *ip = gl->gl_object;
  972. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  973. unsigned int state;
  974. int flags;
  975. int error;
  976. struct timespec tv = CURRENT_TIME;
  977. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  978. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  979. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  980. return -EINVAL;
  981. state = gh->gh_state;
  982. flags = gh->gh_flags;
  983. error = gfs2_glock_nq(gh);
  984. if (error)
  985. return error;
  986. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  987. (sdp->sd_vfs->s_flags & MS_RDONLY))
  988. return 0;
  989. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  990. gfs2_glock_dq(gh);
  991. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  992. gh);
  993. error = gfs2_glock_nq(gh);
  994. if (error)
  995. return error;
  996. /* Verify that atime hasn't been updated while we were
  997. trying to get exclusive lock. */
  998. tv = CURRENT_TIME;
  999. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1000. struct buffer_head *dibh;
  1001. struct gfs2_dinode *di;
  1002. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1003. if (error == -EROFS)
  1004. return 0;
  1005. if (error)
  1006. goto fail;
  1007. error = gfs2_meta_inode_buffer(ip, &dibh);
  1008. if (error)
  1009. goto fail_end_trans;
  1010. ip->i_inode.i_atime = tv;
  1011. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1012. di = (struct gfs2_dinode *)dibh->b_data;
  1013. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1014. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1015. brelse(dibh);
  1016. gfs2_trans_end(sdp);
  1017. }
  1018. /* If someone else has asked for the glock,
  1019. unlock and let them have it. Then reacquire
  1020. in the original state. */
  1021. if (gfs2_glock_is_blocking(gl)) {
  1022. gfs2_glock_dq(gh);
  1023. gfs2_holder_reinit(state, flags, gh);
  1024. return gfs2_glock_nq(gh);
  1025. }
  1026. }
  1027. return 0;
  1028. fail_end_trans:
  1029. gfs2_trans_end(sdp);
  1030. fail:
  1031. gfs2_glock_dq(gh);
  1032. return error;
  1033. }
  1034. static int
  1035. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1036. {
  1037. struct buffer_head *dibh;
  1038. int error;
  1039. error = gfs2_meta_inode_buffer(ip, &dibh);
  1040. if (!error) {
  1041. error = inode_setattr(&ip->i_inode, attr);
  1042. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1043. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1044. gfs2_dinode_out(ip, dibh->b_data);
  1045. brelse(dibh);
  1046. }
  1047. return error;
  1048. }
  1049. /**
  1050. * gfs2_setattr_simple -
  1051. * @ip:
  1052. * @attr:
  1053. *
  1054. * Called with a reference on the vnode.
  1055. *
  1056. * Returns: errno
  1057. */
  1058. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1059. {
  1060. int error;
  1061. if (current->journal_info)
  1062. return __gfs2_setattr_simple(ip, attr);
  1063. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1064. if (error)
  1065. return error;
  1066. error = __gfs2_setattr_simple(ip, attr);
  1067. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1068. return error;
  1069. }
  1070. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1071. {
  1072. const struct gfs2_dinode_host *di = &ip->i_di;
  1073. struct gfs2_dinode *str = buf;
  1074. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1075. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1076. str->di_header.__pad0 = 0;
  1077. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1078. str->di_header.__pad1 = 0;
  1079. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1080. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1081. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1082. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1083. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1084. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1085. str->di_size = cpu_to_be64(di->di_size);
  1086. str->di_blocks = cpu_to_be64(di->di_blocks);
  1087. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1088. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1089. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1090. str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
  1091. str->di_goal_data = cpu_to_be64(di->di_goal_data);
  1092. str->di_generation = cpu_to_be64(di->di_generation);
  1093. str->di_flags = cpu_to_be32(di->di_flags);
  1094. str->di_height = cpu_to_be16(di->di_height);
  1095. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1096. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1097. GFS2_FORMAT_DE : 0);
  1098. str->di_depth = cpu_to_be16(di->di_depth);
  1099. str->di_entries = cpu_to_be32(di->di_entries);
  1100. str->di_eattr = cpu_to_be64(di->di_eattr);
  1101. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1102. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1103. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1104. }
  1105. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1106. {
  1107. const struct gfs2_dinode_host *di = &ip->i_di;
  1108. printk(KERN_INFO " no_formal_ino = %llu\n",
  1109. (unsigned long long)ip->i_no_formal_ino);
  1110. printk(KERN_INFO " no_addr = %llu\n",
  1111. (unsigned long long)ip->i_no_addr);
  1112. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1113. printk(KERN_INFO " di_blocks = %llu\n",
  1114. (unsigned long long)di->di_blocks);
  1115. printk(KERN_INFO " di_goal_meta = %llu\n",
  1116. (unsigned long long)di->di_goal_meta);
  1117. printk(KERN_INFO " di_goal_data = %llu\n",
  1118. (unsigned long long)di->di_goal_data);
  1119. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1120. printk(KERN_INFO " di_height = %u\n", di->di_height);
  1121. printk(KERN_INFO " di_depth = %u\n", di->di_depth);
  1122. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1123. printk(KERN_INFO " di_eattr = %llu\n",
  1124. (unsigned long long)di->di_eattr);
  1125. }