inode.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_inode.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "util.h"
  37. struct gfs2_inum_range_host {
  38. u64 ir_start;
  39. u64 ir_length;
  40. };
  41. static int iget_test(struct inode *inode, void *opaque)
  42. {
  43. struct gfs2_inode *ip = GFS2_I(inode);
  44. u64 *no_addr = opaque;
  45. if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
  46. return 1;
  47. return 0;
  48. }
  49. static int iget_set(struct inode *inode, void *opaque)
  50. {
  51. struct gfs2_inode *ip = GFS2_I(inode);
  52. u64 *no_addr = opaque;
  53. inode->i_ino = (unsigned long)*no_addr;
  54. ip->i_no_addr = *no_addr;
  55. set_bit(GIF_USER, &ip->i_flags);
  56. return 0;
  57. }
  58. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  59. {
  60. unsigned long hash = (unsigned long)no_addr;
  61. return ilookup5(sb, hash, iget_test, &no_addr);
  62. }
  63. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  64. {
  65. unsigned long hash = (unsigned long)no_addr;
  66. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  67. }
  68. struct gfs2_skip_data {
  69. u64 no_addr;
  70. int skipped;
  71. };
  72. static int iget_skip_test(struct inode *inode, void *opaque)
  73. {
  74. struct gfs2_inode *ip = GFS2_I(inode);
  75. struct gfs2_skip_data *data = opaque;
  76. if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
  77. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  78. data->skipped = 1;
  79. return 0;
  80. }
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. static int iget_skip_set(struct inode *inode, void *opaque)
  86. {
  87. struct gfs2_inode *ip = GFS2_I(inode);
  88. struct gfs2_skip_data *data = opaque;
  89. if (data->skipped)
  90. return 1;
  91. inode->i_ino = (unsigned long)(data->no_addr);
  92. ip->i_no_addr = data->no_addr;
  93. set_bit(GIF_USER, &ip->i_flags);
  94. return 0;
  95. }
  96. static struct inode *gfs2_iget_skip(struct super_block *sb,
  97. u64 no_addr)
  98. {
  99. struct gfs2_skip_data data;
  100. unsigned long hash = (unsigned long)no_addr;
  101. data.no_addr = no_addr;
  102. data.skipped = 0;
  103. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  104. }
  105. /**
  106. * GFS2 lookup code fills in vfs inode contents based on info obtained
  107. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  108. * with NFS code path since its get_dentry routine doesn't have the relevant
  109. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  110. * segment inside gfs2_inode_lookup code needs to get moved around.
  111. *
  112. * Clean up I_LOCK and I_NEW as well.
  113. **/
  114. void gfs2_set_iop(struct inode *inode)
  115. {
  116. struct gfs2_sbd *sdp = GFS2_SB(inode);
  117. umode_t mode = inode->i_mode;
  118. if (S_ISREG(mode)) {
  119. inode->i_op = &gfs2_file_iops;
  120. if (sdp->sd_args.ar_localflocks)
  121. inode->i_fop = &gfs2_file_fops_nolock;
  122. else
  123. inode->i_fop = &gfs2_file_fops;
  124. } else if (S_ISDIR(mode)) {
  125. inode->i_op = &gfs2_dir_iops;
  126. if (sdp->sd_args.ar_localflocks)
  127. inode->i_fop = &gfs2_dir_fops_nolock;
  128. else
  129. inode->i_fop = &gfs2_dir_fops;
  130. } else if (S_ISLNK(mode)) {
  131. inode->i_op = &gfs2_symlink_iops;
  132. } else {
  133. inode->i_op = &gfs2_file_iops;
  134. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  135. }
  136. unlock_new_inode(inode);
  137. }
  138. /**
  139. * gfs2_inode_lookup - Lookup an inode
  140. * @sb: The super block
  141. * @no_addr: The inode number
  142. * @type: The type of the inode
  143. * @skip_freeing: set this not return an inode if it is currently being freed.
  144. *
  145. * Returns: A VFS inode, or an error
  146. */
  147. struct inode *gfs2_inode_lookup(struct super_block *sb,
  148. unsigned int type,
  149. u64 no_addr,
  150. u64 no_formal_ino, int skip_freeing)
  151. {
  152. struct inode *inode;
  153. struct gfs2_inode *ip;
  154. struct gfs2_glock *io_gl;
  155. int error;
  156. if (skip_freeing)
  157. inode = gfs2_iget_skip(sb, no_addr);
  158. else
  159. inode = gfs2_iget(sb, no_addr);
  160. ip = GFS2_I(inode);
  161. if (!inode)
  162. return ERR_PTR(-ENOBUFS);
  163. if (inode->i_state & I_NEW) {
  164. struct gfs2_sbd *sdp = GFS2_SB(inode);
  165. ip->i_no_formal_ino = no_formal_ino;
  166. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  167. if (unlikely(error))
  168. goto fail;
  169. ip->i_gl->gl_object = ip;
  170. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  171. if (unlikely(error))
  172. goto fail_put;
  173. set_bit(GIF_INVALID, &ip->i_flags);
  174. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  175. if (unlikely(error))
  176. goto fail_iopen;
  177. ip->i_iopen_gh.gh_gl->gl_object = ip;
  178. gfs2_glock_put(io_gl);
  179. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  180. goto gfs2_nfsbypass;
  181. inode->i_mode = DT2IF(type);
  182. /*
  183. * We must read the inode in order to work out its type in
  184. * this case. Note that this doesn't happen often as we normally
  185. * know the type beforehand. This code path only occurs during
  186. * unlinked inode recovery (where it is safe to do this glock,
  187. * which is not true in the general case).
  188. */
  189. if (type == DT_UNKNOWN) {
  190. struct gfs2_holder gh;
  191. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  192. if (unlikely(error))
  193. goto fail_glock;
  194. /* Inode is now uptodate */
  195. gfs2_glock_dq_uninit(&gh);
  196. }
  197. gfs2_set_iop(inode);
  198. }
  199. gfs2_nfsbypass:
  200. return inode;
  201. fail_glock:
  202. gfs2_glock_dq(&ip->i_iopen_gh);
  203. fail_iopen:
  204. gfs2_glock_put(io_gl);
  205. fail_put:
  206. ip->i_gl->gl_object = NULL;
  207. gfs2_glock_put(ip->i_gl);
  208. fail:
  209. iget_failed(inode);
  210. return ERR_PTR(error);
  211. }
  212. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  213. {
  214. struct gfs2_dinode_host *di = &ip->i_di;
  215. const struct gfs2_dinode *str = buf;
  216. u16 height, depth;
  217. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  218. goto corrupt;
  219. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  220. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  221. ip->i_inode.i_rdev = 0;
  222. switch (ip->i_inode.i_mode & S_IFMT) {
  223. case S_IFBLK:
  224. case S_IFCHR:
  225. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  226. be32_to_cpu(str->di_minor));
  227. break;
  228. };
  229. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  230. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  231. /*
  232. * We will need to review setting the nlink count here in the
  233. * light of the forthcoming ro bind mount work. This is a reminder
  234. * to do that.
  235. */
  236. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  237. di->di_size = be64_to_cpu(str->di_size);
  238. i_size_write(&ip->i_inode, di->di_size);
  239. gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
  240. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  241. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  242. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  243. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  244. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  245. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  246. ip->i_goal = be64_to_cpu(str->di_goal_meta);
  247. di->di_generation = be64_to_cpu(str->di_generation);
  248. di->di_flags = be32_to_cpu(str->di_flags);
  249. gfs2_set_inode_flags(&ip->i_inode);
  250. height = be16_to_cpu(str->di_height);
  251. if (unlikely(height > GFS2_MAX_META_HEIGHT))
  252. goto corrupt;
  253. ip->i_height = (u8)height;
  254. depth = be16_to_cpu(str->di_depth);
  255. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  256. goto corrupt;
  257. ip->i_depth = (u8)depth;
  258. di->di_entries = be32_to_cpu(str->di_entries);
  259. di->di_eattr = be64_to_cpu(str->di_eattr);
  260. if (S_ISREG(ip->i_inode.i_mode))
  261. gfs2_set_aops(&ip->i_inode);
  262. return 0;
  263. corrupt:
  264. if (gfs2_consist_inode(ip))
  265. gfs2_dinode_print(ip);
  266. return -EIO;
  267. }
  268. /**
  269. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  270. * @ip: The GFS2 inode
  271. *
  272. * Returns: errno
  273. */
  274. int gfs2_inode_refresh(struct gfs2_inode *ip)
  275. {
  276. struct buffer_head *dibh;
  277. int error;
  278. error = gfs2_meta_inode_buffer(ip, &dibh);
  279. if (error)
  280. return error;
  281. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  282. brelse(dibh);
  283. return -EIO;
  284. }
  285. error = gfs2_dinode_in(ip, dibh->b_data);
  286. brelse(dibh);
  287. clear_bit(GIF_INVALID, &ip->i_flags);
  288. return error;
  289. }
  290. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  291. {
  292. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  293. struct gfs2_alloc *al;
  294. struct gfs2_rgrpd *rgd;
  295. int error;
  296. if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
  297. if (gfs2_consist_inode(ip))
  298. gfs2_dinode_print(ip);
  299. return -EIO;
  300. }
  301. al = gfs2_alloc_get(ip);
  302. if (!al)
  303. return -ENOMEM;
  304. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  305. if (error)
  306. goto out;
  307. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  308. if (error)
  309. goto out_qs;
  310. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  311. if (!rgd) {
  312. gfs2_consist_inode(ip);
  313. error = -EIO;
  314. goto out_rindex_relse;
  315. }
  316. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  317. &al->al_rgd_gh);
  318. if (error)
  319. goto out_rindex_relse;
  320. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  321. if (error)
  322. goto out_rg_gunlock;
  323. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  324. set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
  325. gfs2_free_di(rgd, ip);
  326. gfs2_trans_end(sdp);
  327. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  328. out_rg_gunlock:
  329. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  330. out_rindex_relse:
  331. gfs2_glock_dq_uninit(&al->al_ri_gh);
  332. out_qs:
  333. gfs2_quota_unhold(ip);
  334. out:
  335. gfs2_alloc_put(ip);
  336. return error;
  337. }
  338. /**
  339. * gfs2_change_nlink - Change nlink count on inode
  340. * @ip: The GFS2 inode
  341. * @diff: The change in the nlink count required
  342. *
  343. * Returns: errno
  344. */
  345. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  346. {
  347. struct buffer_head *dibh;
  348. u32 nlink;
  349. int error;
  350. BUG_ON(diff != 1 && diff != -1);
  351. nlink = ip->i_inode.i_nlink + diff;
  352. /* If we are reducing the nlink count, but the new value ends up being
  353. bigger than the old one, we must have underflowed. */
  354. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  355. if (gfs2_consist_inode(ip))
  356. gfs2_dinode_print(ip);
  357. return -EIO;
  358. }
  359. error = gfs2_meta_inode_buffer(ip, &dibh);
  360. if (error)
  361. return error;
  362. if (diff > 0)
  363. inc_nlink(&ip->i_inode);
  364. else
  365. drop_nlink(&ip->i_inode);
  366. ip->i_inode.i_ctime = CURRENT_TIME;
  367. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  368. gfs2_dinode_out(ip, dibh->b_data);
  369. brelse(dibh);
  370. mark_inode_dirty(&ip->i_inode);
  371. if (ip->i_inode.i_nlink == 0)
  372. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  373. return error;
  374. }
  375. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  376. {
  377. struct qstr qstr;
  378. struct inode *inode;
  379. gfs2_str2qstr(&qstr, name);
  380. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  381. /* gfs2_lookupi has inconsistent callers: vfs
  382. * related routines expect NULL for no entry found,
  383. * gfs2_lookup_simple callers expect ENOENT
  384. * and do not check for NULL.
  385. */
  386. if (inode == NULL)
  387. return ERR_PTR(-ENOENT);
  388. else
  389. return inode;
  390. }
  391. /**
  392. * gfs2_lookupi - Look up a filename in a directory and return its inode
  393. * @d_gh: An initialized holder for the directory glock
  394. * @name: The name of the inode to look for
  395. * @is_root: If 1, ignore the caller's permissions
  396. * @i_gh: An uninitialized holder for the new inode glock
  397. *
  398. * This can be called via the VFS filldir function when NFS is doing
  399. * a readdirplus and the inode which its intending to stat isn't
  400. * already in cache. In this case we must not take the directory glock
  401. * again, since the readdir call will have already taken that lock.
  402. *
  403. * Returns: errno
  404. */
  405. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  406. int is_root, struct nameidata *nd)
  407. {
  408. struct super_block *sb = dir->i_sb;
  409. struct gfs2_inode *dip = GFS2_I(dir);
  410. struct gfs2_holder d_gh;
  411. int error = 0;
  412. struct inode *inode = NULL;
  413. int unlock = 0;
  414. if (!name->len || name->len > GFS2_FNAMESIZE)
  415. return ERR_PTR(-ENAMETOOLONG);
  416. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  417. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  418. dir == sb->s_root->d_inode)) {
  419. igrab(dir);
  420. return dir;
  421. }
  422. if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
  423. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  424. if (error)
  425. return ERR_PTR(error);
  426. unlock = 1;
  427. }
  428. if (!is_root) {
  429. error = permission(dir, MAY_EXEC, NULL);
  430. if (error)
  431. goto out;
  432. }
  433. inode = gfs2_dir_search(dir, name);
  434. if (IS_ERR(inode))
  435. error = PTR_ERR(inode);
  436. out:
  437. if (unlock)
  438. gfs2_glock_dq_uninit(&d_gh);
  439. if (error == -ENOENT)
  440. return NULL;
  441. return inode ? inode : ERR_PTR(error);
  442. }
  443. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  444. {
  445. const struct gfs2_inum_range *str = buf;
  446. ir->ir_start = be64_to_cpu(str->ir_start);
  447. ir->ir_length = be64_to_cpu(str->ir_length);
  448. }
  449. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  450. {
  451. struct gfs2_inum_range *str = buf;
  452. str->ir_start = cpu_to_be64(ir->ir_start);
  453. str->ir_length = cpu_to_be64(ir->ir_length);
  454. }
  455. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  456. {
  457. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  458. struct buffer_head *bh;
  459. struct gfs2_inum_range_host ir;
  460. int error;
  461. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  462. if (error)
  463. return error;
  464. mutex_lock(&sdp->sd_inum_mutex);
  465. error = gfs2_meta_inode_buffer(ip, &bh);
  466. if (error) {
  467. mutex_unlock(&sdp->sd_inum_mutex);
  468. gfs2_trans_end(sdp);
  469. return error;
  470. }
  471. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  472. if (ir.ir_length) {
  473. *formal_ino = ir.ir_start++;
  474. ir.ir_length--;
  475. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  476. gfs2_inum_range_out(&ir,
  477. bh->b_data + sizeof(struct gfs2_dinode));
  478. brelse(bh);
  479. mutex_unlock(&sdp->sd_inum_mutex);
  480. gfs2_trans_end(sdp);
  481. return 0;
  482. }
  483. brelse(bh);
  484. mutex_unlock(&sdp->sd_inum_mutex);
  485. gfs2_trans_end(sdp);
  486. return 1;
  487. }
  488. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  489. {
  490. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  491. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  492. struct gfs2_holder gh;
  493. struct buffer_head *bh;
  494. struct gfs2_inum_range_host ir;
  495. int error;
  496. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  497. if (error)
  498. return error;
  499. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  500. if (error)
  501. goto out;
  502. mutex_lock(&sdp->sd_inum_mutex);
  503. error = gfs2_meta_inode_buffer(ip, &bh);
  504. if (error)
  505. goto out_end_trans;
  506. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  507. if (!ir.ir_length) {
  508. struct buffer_head *m_bh;
  509. u64 x, y;
  510. __be64 z;
  511. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  512. if (error)
  513. goto out_brelse;
  514. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  515. x = y = be64_to_cpu(z);
  516. ir.ir_start = x;
  517. ir.ir_length = GFS2_INUM_QUANTUM;
  518. x += GFS2_INUM_QUANTUM;
  519. if (x < y)
  520. gfs2_consist_inode(m_ip);
  521. z = cpu_to_be64(x);
  522. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  523. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  524. brelse(m_bh);
  525. }
  526. *formal_ino = ir.ir_start++;
  527. ir.ir_length--;
  528. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  529. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  530. out_brelse:
  531. brelse(bh);
  532. out_end_trans:
  533. mutex_unlock(&sdp->sd_inum_mutex);
  534. gfs2_trans_end(sdp);
  535. out:
  536. gfs2_glock_dq_uninit(&gh);
  537. return error;
  538. }
  539. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  540. {
  541. int error;
  542. error = pick_formal_ino_1(sdp, inum);
  543. if (error <= 0)
  544. return error;
  545. error = pick_formal_ino_2(sdp, inum);
  546. return error;
  547. }
  548. /**
  549. * create_ok - OK to create a new on-disk inode here?
  550. * @dip: Directory in which dinode is to be created
  551. * @name: Name of new dinode
  552. * @mode:
  553. *
  554. * Returns: errno
  555. */
  556. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  557. unsigned int mode)
  558. {
  559. int error;
  560. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  561. if (error)
  562. return error;
  563. /* Don't create entries in an unlinked directory */
  564. if (!dip->i_inode.i_nlink)
  565. return -EPERM;
  566. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  567. switch (error) {
  568. case -ENOENT:
  569. error = 0;
  570. break;
  571. case 0:
  572. return -EEXIST;
  573. default:
  574. return error;
  575. }
  576. if (dip->i_di.di_entries == (u32)-1)
  577. return -EFBIG;
  578. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  579. return -EMLINK;
  580. return 0;
  581. }
  582. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  583. unsigned int *uid, unsigned int *gid)
  584. {
  585. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  586. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  587. if (S_ISDIR(*mode))
  588. *mode |= S_ISUID;
  589. else if (dip->i_inode.i_uid != current->fsuid)
  590. *mode &= ~07111;
  591. *uid = dip->i_inode.i_uid;
  592. } else
  593. *uid = current->fsuid;
  594. if (dip->i_inode.i_mode & S_ISGID) {
  595. if (S_ISDIR(*mode))
  596. *mode |= S_ISGID;
  597. *gid = dip->i_inode.i_gid;
  598. } else
  599. *gid = current->fsgid;
  600. }
  601. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  602. {
  603. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  604. int error;
  605. if (gfs2_alloc_get(dip) == NULL)
  606. return -ENOMEM;
  607. dip->i_alloc->al_requested = RES_DINODE;
  608. error = gfs2_inplace_reserve(dip);
  609. if (error)
  610. goto out;
  611. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  612. if (error)
  613. goto out_ipreserv;
  614. *no_addr = gfs2_alloc_di(dip, generation);
  615. gfs2_trans_end(sdp);
  616. out_ipreserv:
  617. gfs2_inplace_release(dip);
  618. out:
  619. gfs2_alloc_put(dip);
  620. return error;
  621. }
  622. /**
  623. * init_dinode - Fill in a new dinode structure
  624. * @dip: the directory this inode is being created in
  625. * @gl: The glock covering the new inode
  626. * @inum: the inode number
  627. * @mode: the file permissions
  628. * @uid:
  629. * @gid:
  630. *
  631. */
  632. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  633. const struct gfs2_inum_host *inum, unsigned int mode,
  634. unsigned int uid, unsigned int gid,
  635. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  636. {
  637. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  638. struct gfs2_dinode *di;
  639. struct buffer_head *dibh;
  640. struct timespec tv = CURRENT_TIME;
  641. dibh = gfs2_meta_new(gl, inum->no_addr);
  642. gfs2_trans_add_bh(gl, dibh, 1);
  643. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  644. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  645. di = (struct gfs2_dinode *)dibh->b_data;
  646. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  647. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  648. di->di_mode = cpu_to_be32(mode);
  649. di->di_uid = cpu_to_be32(uid);
  650. di->di_gid = cpu_to_be32(gid);
  651. di->di_nlink = 0;
  652. di->di_size = 0;
  653. di->di_blocks = cpu_to_be64(1);
  654. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  655. di->di_major = cpu_to_be32(MAJOR(dev));
  656. di->di_minor = cpu_to_be32(MINOR(dev));
  657. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  658. di->di_generation = cpu_to_be64(*generation);
  659. di->di_flags = 0;
  660. if (S_ISREG(mode)) {
  661. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  662. gfs2_tune_get(sdp, gt_new_files_jdata))
  663. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  664. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  665. gfs2_tune_get(sdp, gt_new_files_directio))
  666. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  667. } else if (S_ISDIR(mode)) {
  668. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  669. GFS2_DIF_INHERIT_DIRECTIO);
  670. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  671. GFS2_DIF_INHERIT_JDATA);
  672. }
  673. di->__pad1 = 0;
  674. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  675. di->di_height = 0;
  676. di->__pad2 = 0;
  677. di->__pad3 = 0;
  678. di->di_depth = 0;
  679. di->di_entries = 0;
  680. memset(&di->__pad4, 0, sizeof(di->__pad4));
  681. di->di_eattr = 0;
  682. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  683. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  684. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  685. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  686. set_buffer_uptodate(dibh);
  687. *bhp = dibh;
  688. }
  689. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  690. unsigned int mode, const struct gfs2_inum_host *inum,
  691. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  692. {
  693. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  694. unsigned int uid, gid;
  695. int error;
  696. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  697. if (!gfs2_alloc_get(dip))
  698. return -ENOMEM;
  699. error = gfs2_quota_lock(dip, uid, gid);
  700. if (error)
  701. goto out;
  702. error = gfs2_quota_check(dip, uid, gid);
  703. if (error)
  704. goto out_quota;
  705. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  706. if (error)
  707. goto out_quota;
  708. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  709. gfs2_quota_change(dip, +1, uid, gid);
  710. gfs2_trans_end(sdp);
  711. out_quota:
  712. gfs2_quota_unlock(dip);
  713. out:
  714. gfs2_alloc_put(dip);
  715. return error;
  716. }
  717. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  718. struct gfs2_inode *ip)
  719. {
  720. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  721. struct gfs2_alloc *al;
  722. int alloc_required;
  723. struct buffer_head *dibh;
  724. int error;
  725. al = gfs2_alloc_get(dip);
  726. if (!al)
  727. return -ENOMEM;
  728. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  729. if (error)
  730. goto fail;
  731. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  732. if (alloc_required < 0)
  733. goto fail_quota_locks;
  734. if (alloc_required) {
  735. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  736. if (error)
  737. goto fail_quota_locks;
  738. al->al_requested = sdp->sd_max_dirres;
  739. error = gfs2_inplace_reserve(dip);
  740. if (error)
  741. goto fail_quota_locks;
  742. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  743. al->al_rgd->rd_length +
  744. 2 * RES_DINODE +
  745. RES_STATFS + RES_QUOTA, 0);
  746. if (error)
  747. goto fail_ipreserv;
  748. } else {
  749. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  750. if (error)
  751. goto fail_quota_locks;
  752. }
  753. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  754. if (error)
  755. goto fail_end_trans;
  756. error = gfs2_meta_inode_buffer(ip, &dibh);
  757. if (error)
  758. goto fail_end_trans;
  759. ip->i_inode.i_nlink = 1;
  760. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  761. gfs2_dinode_out(ip, dibh->b_data);
  762. brelse(dibh);
  763. return 0;
  764. fail_end_trans:
  765. gfs2_trans_end(sdp);
  766. fail_ipreserv:
  767. if (dip->i_alloc->al_rgd)
  768. gfs2_inplace_release(dip);
  769. fail_quota_locks:
  770. gfs2_quota_unlock(dip);
  771. fail:
  772. gfs2_alloc_put(dip);
  773. return error;
  774. }
  775. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  776. {
  777. int err;
  778. size_t len;
  779. void *value;
  780. char *name;
  781. struct gfs2_ea_request er;
  782. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  783. &name, &value, &len);
  784. if (err) {
  785. if (err == -EOPNOTSUPP)
  786. return 0;
  787. return err;
  788. }
  789. memset(&er, 0, sizeof(struct gfs2_ea_request));
  790. er.er_type = GFS2_EATYPE_SECURITY;
  791. er.er_name = name;
  792. er.er_data = value;
  793. er.er_name_len = strlen(name);
  794. er.er_data_len = len;
  795. err = gfs2_ea_set_i(ip, &er);
  796. kfree(value);
  797. kfree(name);
  798. return err;
  799. }
  800. /**
  801. * gfs2_createi - Create a new inode
  802. * @ghs: An array of two holders
  803. * @name: The name of the new file
  804. * @mode: the permissions on the new inode
  805. *
  806. * @ghs[0] is an initialized holder for the directory
  807. * @ghs[1] is the holder for the inode lock
  808. *
  809. * If the return value is not NULL, the glocks on both the directory and the new
  810. * file are held. A transaction has been started and an inplace reservation
  811. * is held, as well.
  812. *
  813. * Returns: An inode
  814. */
  815. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  816. unsigned int mode, dev_t dev)
  817. {
  818. struct inode *inode = NULL;
  819. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  820. struct inode *dir = &dip->i_inode;
  821. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  822. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  823. int error;
  824. u64 generation;
  825. struct buffer_head *bh = NULL;
  826. if (!name->len || name->len > GFS2_FNAMESIZE)
  827. return ERR_PTR(-ENAMETOOLONG);
  828. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  829. error = gfs2_glock_nq(ghs);
  830. if (error)
  831. goto fail;
  832. error = create_ok(dip, name, mode);
  833. if (error)
  834. goto fail_gunlock;
  835. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  836. if (error)
  837. goto fail_gunlock;
  838. error = alloc_dinode(dip, &inum.no_addr, &generation);
  839. if (error)
  840. goto fail_gunlock;
  841. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  842. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  843. if (error)
  844. goto fail_gunlock;
  845. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  846. if (error)
  847. goto fail_gunlock2;
  848. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  849. inum.no_addr,
  850. inum.no_formal_ino, 0);
  851. if (IS_ERR(inode))
  852. goto fail_gunlock2;
  853. error = gfs2_inode_refresh(GFS2_I(inode));
  854. if (error)
  855. goto fail_gunlock2;
  856. error = gfs2_acl_create(dip, GFS2_I(inode));
  857. if (error)
  858. goto fail_gunlock2;
  859. error = gfs2_security_init(dip, GFS2_I(inode));
  860. if (error)
  861. goto fail_gunlock2;
  862. error = link_dinode(dip, name, GFS2_I(inode));
  863. if (error)
  864. goto fail_gunlock2;
  865. if (bh)
  866. brelse(bh);
  867. if (!inode)
  868. return ERR_PTR(-ENOMEM);
  869. return inode;
  870. fail_gunlock2:
  871. gfs2_glock_dq_uninit(ghs + 1);
  872. if (inode)
  873. iput(inode);
  874. fail_gunlock:
  875. gfs2_glock_dq(ghs);
  876. fail:
  877. if (bh)
  878. brelse(bh);
  879. return ERR_PTR(error);
  880. }
  881. /**
  882. * gfs2_rmdiri - Remove a directory
  883. * @dip: The parent directory of the directory to be removed
  884. * @name: The name of the directory to be removed
  885. * @ip: The GFS2 inode of the directory to be removed
  886. *
  887. * Assumes Glocks on dip and ip are held
  888. *
  889. * Returns: errno
  890. */
  891. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  892. struct gfs2_inode *ip)
  893. {
  894. struct qstr dotname;
  895. int error;
  896. if (ip->i_di.di_entries != 2) {
  897. if (gfs2_consist_inode(ip))
  898. gfs2_dinode_print(ip);
  899. return -EIO;
  900. }
  901. error = gfs2_dir_del(dip, name);
  902. if (error)
  903. return error;
  904. error = gfs2_change_nlink(dip, -1);
  905. if (error)
  906. return error;
  907. gfs2_str2qstr(&dotname, ".");
  908. error = gfs2_dir_del(ip, &dotname);
  909. if (error)
  910. return error;
  911. gfs2_str2qstr(&dotname, "..");
  912. error = gfs2_dir_del(ip, &dotname);
  913. if (error)
  914. return error;
  915. /* It looks odd, but it really should be done twice */
  916. error = gfs2_change_nlink(ip, -1);
  917. if (error)
  918. return error;
  919. error = gfs2_change_nlink(ip, -1);
  920. if (error)
  921. return error;
  922. return error;
  923. }
  924. /*
  925. * gfs2_unlink_ok - check to see that a inode is still in a directory
  926. * @dip: the directory
  927. * @name: the name of the file
  928. * @ip: the inode
  929. *
  930. * Assumes that the lock on (at least) @dip is held.
  931. *
  932. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  933. */
  934. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  935. const struct gfs2_inode *ip)
  936. {
  937. int error;
  938. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  939. return -EPERM;
  940. if ((dip->i_inode.i_mode & S_ISVTX) &&
  941. dip->i_inode.i_uid != current->fsuid &&
  942. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  943. return -EPERM;
  944. if (IS_APPEND(&dip->i_inode))
  945. return -EPERM;
  946. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  947. if (error)
  948. return error;
  949. error = gfs2_dir_check(&dip->i_inode, name, ip);
  950. if (error)
  951. return error;
  952. return 0;
  953. }
  954. /*
  955. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  956. * @this: move this
  957. * @to: to here
  958. *
  959. * Follow @to back to the root and make sure we don't encounter @this
  960. * Assumes we already hold the rename lock.
  961. *
  962. * Returns: errno
  963. */
  964. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  965. {
  966. struct inode *dir = &to->i_inode;
  967. struct super_block *sb = dir->i_sb;
  968. struct inode *tmp;
  969. struct qstr dotdot;
  970. int error = 0;
  971. gfs2_str2qstr(&dotdot, "..");
  972. igrab(dir);
  973. for (;;) {
  974. if (dir == &this->i_inode) {
  975. error = -EINVAL;
  976. break;
  977. }
  978. if (dir == sb->s_root->d_inode) {
  979. error = 0;
  980. break;
  981. }
  982. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  983. if (IS_ERR(tmp)) {
  984. error = PTR_ERR(tmp);
  985. break;
  986. }
  987. iput(dir);
  988. dir = tmp;
  989. }
  990. iput(dir);
  991. return error;
  992. }
  993. /**
  994. * gfs2_readlinki - return the contents of a symlink
  995. * @ip: the symlink's inode
  996. * @buf: a pointer to the buffer to be filled
  997. * @len: a pointer to the length of @buf
  998. *
  999. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  1000. * to be freed by the caller.
  1001. *
  1002. * Returns: errno
  1003. */
  1004. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  1005. {
  1006. struct gfs2_holder i_gh;
  1007. struct buffer_head *dibh;
  1008. unsigned int x;
  1009. int error;
  1010. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  1011. error = gfs2_glock_nq_atime(&i_gh);
  1012. if (error) {
  1013. gfs2_holder_uninit(&i_gh);
  1014. return error;
  1015. }
  1016. if (!ip->i_di.di_size) {
  1017. gfs2_consist_inode(ip);
  1018. error = -EIO;
  1019. goto out;
  1020. }
  1021. error = gfs2_meta_inode_buffer(ip, &dibh);
  1022. if (error)
  1023. goto out;
  1024. x = ip->i_di.di_size + 1;
  1025. if (x > *len) {
  1026. *buf = kmalloc(x, GFP_NOFS);
  1027. if (!*buf) {
  1028. error = -ENOMEM;
  1029. goto out_brelse;
  1030. }
  1031. }
  1032. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  1033. *len = x;
  1034. out_brelse:
  1035. brelse(dibh);
  1036. out:
  1037. gfs2_glock_dq_uninit(&i_gh);
  1038. return error;
  1039. }
  1040. /**
  1041. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  1042. * conditionally update the inode's atime
  1043. * @gh: the holder to acquire
  1044. *
  1045. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  1046. * Update if the difference between the current time and the inode's current
  1047. * atime is greater than an interval specified at mount.
  1048. *
  1049. * Returns: errno
  1050. */
  1051. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  1052. {
  1053. struct gfs2_glock *gl = gh->gh_gl;
  1054. struct gfs2_sbd *sdp = gl->gl_sbd;
  1055. struct gfs2_inode *ip = gl->gl_object;
  1056. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  1057. unsigned int state;
  1058. int flags;
  1059. int error;
  1060. struct timespec tv = CURRENT_TIME;
  1061. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  1062. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1063. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1064. return -EINVAL;
  1065. state = gh->gh_state;
  1066. flags = gh->gh_flags;
  1067. error = gfs2_glock_nq(gh);
  1068. if (error)
  1069. return error;
  1070. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1071. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1072. return 0;
  1073. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1074. gfs2_glock_dq(gh);
  1075. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1076. gh);
  1077. error = gfs2_glock_nq(gh);
  1078. if (error)
  1079. return error;
  1080. /* Verify that atime hasn't been updated while we were
  1081. trying to get exclusive lock. */
  1082. tv = CURRENT_TIME;
  1083. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1084. struct buffer_head *dibh;
  1085. struct gfs2_dinode *di;
  1086. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1087. if (error == -EROFS)
  1088. return 0;
  1089. if (error)
  1090. goto fail;
  1091. error = gfs2_meta_inode_buffer(ip, &dibh);
  1092. if (error)
  1093. goto fail_end_trans;
  1094. ip->i_inode.i_atime = tv;
  1095. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1096. di = (struct gfs2_dinode *)dibh->b_data;
  1097. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1098. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1099. brelse(dibh);
  1100. gfs2_trans_end(sdp);
  1101. }
  1102. /* If someone else has asked for the glock,
  1103. unlock and let them have it. Then reacquire
  1104. in the original state. */
  1105. if (gfs2_glock_is_blocking(gl)) {
  1106. gfs2_glock_dq(gh);
  1107. gfs2_holder_reinit(state, flags, gh);
  1108. return gfs2_glock_nq(gh);
  1109. }
  1110. }
  1111. return 0;
  1112. fail_end_trans:
  1113. gfs2_trans_end(sdp);
  1114. fail:
  1115. gfs2_glock_dq(gh);
  1116. return error;
  1117. }
  1118. static int
  1119. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1120. {
  1121. struct buffer_head *dibh;
  1122. int error;
  1123. error = gfs2_meta_inode_buffer(ip, &dibh);
  1124. if (!error) {
  1125. error = inode_setattr(&ip->i_inode, attr);
  1126. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1127. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1128. gfs2_dinode_out(ip, dibh->b_data);
  1129. brelse(dibh);
  1130. }
  1131. return error;
  1132. }
  1133. /**
  1134. * gfs2_setattr_simple -
  1135. * @ip:
  1136. * @attr:
  1137. *
  1138. * Called with a reference on the vnode.
  1139. *
  1140. * Returns: errno
  1141. */
  1142. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1143. {
  1144. int error;
  1145. if (current->journal_info)
  1146. return __gfs2_setattr_simple(ip, attr);
  1147. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1148. if (error)
  1149. return error;
  1150. error = __gfs2_setattr_simple(ip, attr);
  1151. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1152. return error;
  1153. }
  1154. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1155. {
  1156. const struct gfs2_dinode_host *di = &ip->i_di;
  1157. struct gfs2_dinode *str = buf;
  1158. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1159. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1160. str->di_header.__pad0 = 0;
  1161. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1162. str->di_header.__pad1 = 0;
  1163. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1164. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1165. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1166. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1167. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1168. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1169. str->di_size = cpu_to_be64(di->di_size);
  1170. str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
  1171. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1172. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1173. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1174. str->di_goal_meta = cpu_to_be64(ip->i_goal);
  1175. str->di_goal_data = cpu_to_be64(ip->i_goal);
  1176. str->di_generation = cpu_to_be64(di->di_generation);
  1177. str->di_flags = cpu_to_be32(di->di_flags);
  1178. str->di_height = cpu_to_be16(ip->i_height);
  1179. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1180. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1181. GFS2_FORMAT_DE : 0);
  1182. str->di_depth = cpu_to_be16(ip->i_depth);
  1183. str->di_entries = cpu_to_be32(di->di_entries);
  1184. str->di_eattr = cpu_to_be64(di->di_eattr);
  1185. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1186. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1187. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1188. }
  1189. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1190. {
  1191. const struct gfs2_dinode_host *di = &ip->i_di;
  1192. printk(KERN_INFO " no_formal_ino = %llu\n",
  1193. (unsigned long long)ip->i_no_formal_ino);
  1194. printk(KERN_INFO " no_addr = %llu\n",
  1195. (unsigned long long)ip->i_no_addr);
  1196. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1197. printk(KERN_INFO " blocks = %llu\n",
  1198. (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
  1199. printk(KERN_INFO " i_goal = %llu\n",
  1200. (unsigned long long)ip->i_goal);
  1201. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1202. printk(KERN_INFO " i_height = %u\n", ip->i_height);
  1203. printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
  1204. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1205. printk(KERN_INFO " di_eattr = %llu\n",
  1206. (unsigned long long)di->di_eattr);
  1207. }