inode.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_inode.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "util.h"
  37. struct gfs2_inum_range_host {
  38. u64 ir_start;
  39. u64 ir_length;
  40. };
  41. static int iget_test(struct inode *inode, void *opaque)
  42. {
  43. struct gfs2_inode *ip = GFS2_I(inode);
  44. u64 *no_addr = opaque;
  45. if (ip->i_no_addr == *no_addr &&
  46. inode->i_private != NULL)
  47. return 1;
  48. return 0;
  49. }
  50. static int iget_set(struct inode *inode, void *opaque)
  51. {
  52. struct gfs2_inode *ip = GFS2_I(inode);
  53. u64 *no_addr = opaque;
  54. inode->i_ino = (unsigned long)*no_addr;
  55. ip->i_no_addr = *no_addr;
  56. return 0;
  57. }
  58. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  59. {
  60. unsigned long hash = (unsigned long)no_addr;
  61. return ilookup5(sb, hash, iget_test, &no_addr);
  62. }
  63. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  64. {
  65. unsigned long hash = (unsigned long)no_addr;
  66. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  67. }
  68. struct gfs2_skip_data {
  69. u64 no_addr;
  70. int skipped;
  71. };
  72. static int iget_skip_test(struct inode *inode, void *opaque)
  73. {
  74. struct gfs2_inode *ip = GFS2_I(inode);
  75. struct gfs2_skip_data *data = opaque;
  76. if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
  77. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  78. data->skipped = 1;
  79. return 0;
  80. }
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. static int iget_skip_set(struct inode *inode, void *opaque)
  86. {
  87. struct gfs2_inode *ip = GFS2_I(inode);
  88. struct gfs2_skip_data *data = opaque;
  89. if (data->skipped)
  90. return 1;
  91. inode->i_ino = (unsigned long)(data->no_addr);
  92. ip->i_no_addr = data->no_addr;
  93. return 0;
  94. }
  95. static struct inode *gfs2_iget_skip(struct super_block *sb,
  96. u64 no_addr)
  97. {
  98. struct gfs2_skip_data data;
  99. unsigned long hash = (unsigned long)no_addr;
  100. data.no_addr = no_addr;
  101. data.skipped = 0;
  102. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  103. }
  104. /**
  105. * GFS2 lookup code fills in vfs inode contents based on info obtained
  106. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  107. * with NFS code path since its get_dentry routine doesn't have the relevant
  108. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  109. * segment inside gfs2_inode_lookup code needs to get moved around.
  110. *
  111. * Clean up I_LOCK and I_NEW as well.
  112. **/
  113. void gfs2_set_iop(struct inode *inode)
  114. {
  115. struct gfs2_sbd *sdp = GFS2_SB(inode);
  116. umode_t mode = inode->i_mode;
  117. if (S_ISREG(mode)) {
  118. inode->i_op = &gfs2_file_iops;
  119. if (sdp->sd_args.ar_localflocks)
  120. inode->i_fop = &gfs2_file_fops_nolock;
  121. else
  122. inode->i_fop = &gfs2_file_fops;
  123. } else if (S_ISDIR(mode)) {
  124. inode->i_op = &gfs2_dir_iops;
  125. if (sdp->sd_args.ar_localflocks)
  126. inode->i_fop = &gfs2_dir_fops_nolock;
  127. else
  128. inode->i_fop = &gfs2_dir_fops;
  129. } else if (S_ISLNK(mode)) {
  130. inode->i_op = &gfs2_symlink_iops;
  131. } else {
  132. inode->i_op = &gfs2_dev_iops;
  133. }
  134. unlock_new_inode(inode);
  135. }
  136. /**
  137. * gfs2_inode_lookup - Lookup an inode
  138. * @sb: The super block
  139. * @no_addr: The inode number
  140. * @type: The type of the inode
  141. * @skip_freeing: set this not return an inode if it is currently being freed.
  142. *
  143. * Returns: A VFS inode, or an error
  144. */
  145. struct inode *gfs2_inode_lookup(struct super_block *sb,
  146. unsigned int type,
  147. u64 no_addr,
  148. u64 no_formal_ino, int skip_freeing)
  149. {
  150. struct inode *inode;
  151. struct gfs2_inode *ip;
  152. struct gfs2_glock *io_gl;
  153. int error;
  154. if (skip_freeing)
  155. inode = gfs2_iget_skip(sb, no_addr);
  156. else
  157. inode = gfs2_iget(sb, no_addr);
  158. ip = GFS2_I(inode);
  159. if (!inode)
  160. return ERR_PTR(-ENOBUFS);
  161. if (inode->i_state & I_NEW) {
  162. struct gfs2_sbd *sdp = GFS2_SB(inode);
  163. inode->i_private = ip;
  164. ip->i_no_formal_ino = no_formal_ino;
  165. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  166. if (unlikely(error))
  167. goto fail;
  168. ip->i_gl->gl_object = ip;
  169. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  170. if (unlikely(error))
  171. goto fail_put;
  172. set_bit(GIF_INVALID, &ip->i_flags);
  173. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  174. if (unlikely(error))
  175. goto fail_iopen;
  176. ip->i_iopen_gh.gh_gl->gl_object = ip;
  177. gfs2_glock_put(io_gl);
  178. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  179. goto gfs2_nfsbypass;
  180. inode->i_mode = DT2IF(type);
  181. /*
  182. * We must read the inode in order to work out its type in
  183. * this case. Note that this doesn't happen often as we normally
  184. * know the type beforehand. This code path only occurs during
  185. * unlinked inode recovery (where it is safe to do this glock,
  186. * which is not true in the general case).
  187. */
  188. if (type == DT_UNKNOWN) {
  189. struct gfs2_holder gh;
  190. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  191. if (unlikely(error))
  192. goto fail_glock;
  193. /* Inode is now uptodate */
  194. gfs2_glock_dq_uninit(&gh);
  195. }
  196. gfs2_set_iop(inode);
  197. }
  198. gfs2_nfsbypass:
  199. return inode;
  200. fail_glock:
  201. gfs2_glock_dq(&ip->i_iopen_gh);
  202. fail_iopen:
  203. gfs2_glock_put(io_gl);
  204. fail_put:
  205. ip->i_gl->gl_object = NULL;
  206. gfs2_glock_put(ip->i_gl);
  207. fail:
  208. iget_failed(inode);
  209. return ERR_PTR(error);
  210. }
  211. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  212. {
  213. struct gfs2_dinode_host *di = &ip->i_di;
  214. const struct gfs2_dinode *str = buf;
  215. u16 height, depth;
  216. if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
  217. goto corrupt;
  218. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  219. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  220. ip->i_inode.i_rdev = 0;
  221. switch (ip->i_inode.i_mode & S_IFMT) {
  222. case S_IFBLK:
  223. case S_IFCHR:
  224. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  225. be32_to_cpu(str->di_minor));
  226. break;
  227. };
  228. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  229. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  230. /*
  231. * We will need to review setting the nlink count here in the
  232. * light of the forthcoming ro bind mount work. This is a reminder
  233. * to do that.
  234. */
  235. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  236. di->di_size = be64_to_cpu(str->di_size);
  237. i_size_write(&ip->i_inode, di->di_size);
  238. gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
  239. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  240. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  241. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  242. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  243. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  244. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  245. ip->i_goal = be64_to_cpu(str->di_goal_meta);
  246. di->di_generation = be64_to_cpu(str->di_generation);
  247. di->di_flags = be32_to_cpu(str->di_flags);
  248. gfs2_set_inode_flags(&ip->i_inode);
  249. height = be16_to_cpu(str->di_height);
  250. if (unlikely(height > GFS2_MAX_META_HEIGHT))
  251. goto corrupt;
  252. ip->i_height = (u8)height;
  253. depth = be16_to_cpu(str->di_depth);
  254. if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
  255. goto corrupt;
  256. ip->i_depth = (u8)depth;
  257. di->di_entries = be32_to_cpu(str->di_entries);
  258. di->di_eattr = be64_to_cpu(str->di_eattr);
  259. if (S_ISREG(ip->i_inode.i_mode))
  260. gfs2_set_aops(&ip->i_inode);
  261. return 0;
  262. corrupt:
  263. if (gfs2_consist_inode(ip))
  264. gfs2_dinode_print(ip);
  265. return -EIO;
  266. }
  267. /**
  268. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  269. * @ip: The GFS2 inode
  270. *
  271. * Returns: errno
  272. */
  273. int gfs2_inode_refresh(struct gfs2_inode *ip)
  274. {
  275. struct buffer_head *dibh;
  276. int error;
  277. error = gfs2_meta_inode_buffer(ip, &dibh);
  278. if (error)
  279. return error;
  280. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  281. brelse(dibh);
  282. return -EIO;
  283. }
  284. error = gfs2_dinode_in(ip, dibh->b_data);
  285. brelse(dibh);
  286. clear_bit(GIF_INVALID, &ip->i_flags);
  287. return error;
  288. }
  289. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  290. {
  291. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  292. struct gfs2_alloc *al;
  293. struct gfs2_rgrpd *rgd;
  294. int error;
  295. if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
  296. if (gfs2_consist_inode(ip))
  297. gfs2_dinode_print(ip);
  298. return -EIO;
  299. }
  300. al = gfs2_alloc_get(ip);
  301. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  302. if (error)
  303. goto out;
  304. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  305. if (error)
  306. goto out_qs;
  307. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  308. if (!rgd) {
  309. gfs2_consist_inode(ip);
  310. error = -EIO;
  311. goto out_rindex_relse;
  312. }
  313. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  314. &al->al_rgd_gh);
  315. if (error)
  316. goto out_rindex_relse;
  317. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  318. if (error)
  319. goto out_rg_gunlock;
  320. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  321. set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
  322. gfs2_free_di(rgd, ip);
  323. gfs2_trans_end(sdp);
  324. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  325. out_rg_gunlock:
  326. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  327. out_rindex_relse:
  328. gfs2_glock_dq_uninit(&al->al_ri_gh);
  329. out_qs:
  330. gfs2_quota_unhold(ip);
  331. out:
  332. gfs2_alloc_put(ip);
  333. return error;
  334. }
  335. /**
  336. * gfs2_change_nlink - Change nlink count on inode
  337. * @ip: The GFS2 inode
  338. * @diff: The change in the nlink count required
  339. *
  340. * Returns: errno
  341. */
  342. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  343. {
  344. struct buffer_head *dibh;
  345. u32 nlink;
  346. int error;
  347. BUG_ON(diff != 1 && diff != -1);
  348. nlink = ip->i_inode.i_nlink + diff;
  349. /* If we are reducing the nlink count, but the new value ends up being
  350. bigger than the old one, we must have underflowed. */
  351. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  352. if (gfs2_consist_inode(ip))
  353. gfs2_dinode_print(ip);
  354. return -EIO;
  355. }
  356. error = gfs2_meta_inode_buffer(ip, &dibh);
  357. if (error)
  358. return error;
  359. if (diff > 0)
  360. inc_nlink(&ip->i_inode);
  361. else
  362. drop_nlink(&ip->i_inode);
  363. ip->i_inode.i_ctime = CURRENT_TIME;
  364. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  365. gfs2_dinode_out(ip, dibh->b_data);
  366. brelse(dibh);
  367. mark_inode_dirty(&ip->i_inode);
  368. if (ip->i_inode.i_nlink == 0)
  369. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  370. return error;
  371. }
  372. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  373. {
  374. struct qstr qstr;
  375. struct inode *inode;
  376. gfs2_str2qstr(&qstr, name);
  377. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  378. /* gfs2_lookupi has inconsistent callers: vfs
  379. * related routines expect NULL for no entry found,
  380. * gfs2_lookup_simple callers expect ENOENT
  381. * and do not check for NULL.
  382. */
  383. if (inode == NULL)
  384. return ERR_PTR(-ENOENT);
  385. else
  386. return inode;
  387. }
  388. /**
  389. * gfs2_lookupi - Look up a filename in a directory and return its inode
  390. * @d_gh: An initialized holder for the directory glock
  391. * @name: The name of the inode to look for
  392. * @is_root: If 1, ignore the caller's permissions
  393. * @i_gh: An uninitialized holder for the new inode glock
  394. *
  395. * This can be called via the VFS filldir function when NFS is doing
  396. * a readdirplus and the inode which its intending to stat isn't
  397. * already in cache. In this case we must not take the directory glock
  398. * again, since the readdir call will have already taken that lock.
  399. *
  400. * Returns: errno
  401. */
  402. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  403. int is_root, struct nameidata *nd)
  404. {
  405. struct super_block *sb = dir->i_sb;
  406. struct gfs2_inode *dip = GFS2_I(dir);
  407. struct gfs2_holder d_gh;
  408. int error = 0;
  409. struct inode *inode = NULL;
  410. int unlock = 0;
  411. if (!name->len || name->len > GFS2_FNAMESIZE)
  412. return ERR_PTR(-ENAMETOOLONG);
  413. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  414. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  415. dir == sb->s_root->d_inode)) {
  416. igrab(dir);
  417. return dir;
  418. }
  419. if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
  420. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  421. if (error)
  422. return ERR_PTR(error);
  423. unlock = 1;
  424. }
  425. if (!is_root) {
  426. error = permission(dir, MAY_EXEC, NULL);
  427. if (error)
  428. goto out;
  429. }
  430. inode = gfs2_dir_search(dir, name);
  431. if (IS_ERR(inode))
  432. error = PTR_ERR(inode);
  433. out:
  434. if (unlock)
  435. gfs2_glock_dq_uninit(&d_gh);
  436. if (error == -ENOENT)
  437. return NULL;
  438. return inode ? inode : ERR_PTR(error);
  439. }
  440. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  441. {
  442. const struct gfs2_inum_range *str = buf;
  443. ir->ir_start = be64_to_cpu(str->ir_start);
  444. ir->ir_length = be64_to_cpu(str->ir_length);
  445. }
  446. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  447. {
  448. struct gfs2_inum_range *str = buf;
  449. str->ir_start = cpu_to_be64(ir->ir_start);
  450. str->ir_length = cpu_to_be64(ir->ir_length);
  451. }
  452. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  453. {
  454. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  455. struct buffer_head *bh;
  456. struct gfs2_inum_range_host ir;
  457. int error;
  458. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  459. if (error)
  460. return error;
  461. mutex_lock(&sdp->sd_inum_mutex);
  462. error = gfs2_meta_inode_buffer(ip, &bh);
  463. if (error) {
  464. mutex_unlock(&sdp->sd_inum_mutex);
  465. gfs2_trans_end(sdp);
  466. return error;
  467. }
  468. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  469. if (ir.ir_length) {
  470. *formal_ino = ir.ir_start++;
  471. ir.ir_length--;
  472. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  473. gfs2_inum_range_out(&ir,
  474. bh->b_data + sizeof(struct gfs2_dinode));
  475. brelse(bh);
  476. mutex_unlock(&sdp->sd_inum_mutex);
  477. gfs2_trans_end(sdp);
  478. return 0;
  479. }
  480. brelse(bh);
  481. mutex_unlock(&sdp->sd_inum_mutex);
  482. gfs2_trans_end(sdp);
  483. return 1;
  484. }
  485. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  486. {
  487. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  488. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  489. struct gfs2_holder gh;
  490. struct buffer_head *bh;
  491. struct gfs2_inum_range_host ir;
  492. int error;
  493. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  494. if (error)
  495. return error;
  496. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  497. if (error)
  498. goto out;
  499. mutex_lock(&sdp->sd_inum_mutex);
  500. error = gfs2_meta_inode_buffer(ip, &bh);
  501. if (error)
  502. goto out_end_trans;
  503. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  504. if (!ir.ir_length) {
  505. struct buffer_head *m_bh;
  506. u64 x, y;
  507. __be64 z;
  508. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  509. if (error)
  510. goto out_brelse;
  511. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  512. x = y = be64_to_cpu(z);
  513. ir.ir_start = x;
  514. ir.ir_length = GFS2_INUM_QUANTUM;
  515. x += GFS2_INUM_QUANTUM;
  516. if (x < y)
  517. gfs2_consist_inode(m_ip);
  518. z = cpu_to_be64(x);
  519. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  520. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  521. brelse(m_bh);
  522. }
  523. *formal_ino = ir.ir_start++;
  524. ir.ir_length--;
  525. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  526. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  527. out_brelse:
  528. brelse(bh);
  529. out_end_trans:
  530. mutex_unlock(&sdp->sd_inum_mutex);
  531. gfs2_trans_end(sdp);
  532. out:
  533. gfs2_glock_dq_uninit(&gh);
  534. return error;
  535. }
  536. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  537. {
  538. int error;
  539. error = pick_formal_ino_1(sdp, inum);
  540. if (error <= 0)
  541. return error;
  542. error = pick_formal_ino_2(sdp, inum);
  543. return error;
  544. }
  545. /**
  546. * create_ok - OK to create a new on-disk inode here?
  547. * @dip: Directory in which dinode is to be created
  548. * @name: Name of new dinode
  549. * @mode:
  550. *
  551. * Returns: errno
  552. */
  553. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  554. unsigned int mode)
  555. {
  556. int error;
  557. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  558. if (error)
  559. return error;
  560. /* Don't create entries in an unlinked directory */
  561. if (!dip->i_inode.i_nlink)
  562. return -EPERM;
  563. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  564. switch (error) {
  565. case -ENOENT:
  566. error = 0;
  567. break;
  568. case 0:
  569. return -EEXIST;
  570. default:
  571. return error;
  572. }
  573. if (dip->i_di.di_entries == (u32)-1)
  574. return -EFBIG;
  575. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  576. return -EMLINK;
  577. return 0;
  578. }
  579. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  580. unsigned int *uid, unsigned int *gid)
  581. {
  582. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  583. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  584. if (S_ISDIR(*mode))
  585. *mode |= S_ISUID;
  586. else if (dip->i_inode.i_uid != current->fsuid)
  587. *mode &= ~07111;
  588. *uid = dip->i_inode.i_uid;
  589. } else
  590. *uid = current->fsuid;
  591. if (dip->i_inode.i_mode & S_ISGID) {
  592. if (S_ISDIR(*mode))
  593. *mode |= S_ISGID;
  594. *gid = dip->i_inode.i_gid;
  595. } else
  596. *gid = current->fsgid;
  597. }
  598. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  599. {
  600. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  601. int error;
  602. if (gfs2_alloc_get(dip) == NULL)
  603. return -ENOMEM;
  604. dip->i_alloc->al_requested = RES_DINODE;
  605. error = gfs2_inplace_reserve(dip);
  606. if (error)
  607. goto out;
  608. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  609. if (error)
  610. goto out_ipreserv;
  611. *no_addr = gfs2_alloc_di(dip, generation);
  612. gfs2_trans_end(sdp);
  613. out_ipreserv:
  614. gfs2_inplace_release(dip);
  615. out:
  616. gfs2_alloc_put(dip);
  617. return error;
  618. }
  619. /**
  620. * init_dinode - Fill in a new dinode structure
  621. * @dip: the directory this inode is being created in
  622. * @gl: The glock covering the new inode
  623. * @inum: the inode number
  624. * @mode: the file permissions
  625. * @uid:
  626. * @gid:
  627. *
  628. */
  629. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  630. const struct gfs2_inum_host *inum, unsigned int mode,
  631. unsigned int uid, unsigned int gid,
  632. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  633. {
  634. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  635. struct gfs2_dinode *di;
  636. struct buffer_head *dibh;
  637. struct timespec tv = CURRENT_TIME;
  638. dibh = gfs2_meta_new(gl, inum->no_addr);
  639. gfs2_trans_add_bh(gl, dibh, 1);
  640. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  641. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  642. di = (struct gfs2_dinode *)dibh->b_data;
  643. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  644. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  645. di->di_mode = cpu_to_be32(mode);
  646. di->di_uid = cpu_to_be32(uid);
  647. di->di_gid = cpu_to_be32(gid);
  648. di->di_nlink = 0;
  649. di->di_size = 0;
  650. di->di_blocks = cpu_to_be64(1);
  651. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  652. di->di_major = cpu_to_be32(MAJOR(dev));
  653. di->di_minor = cpu_to_be32(MINOR(dev));
  654. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  655. di->di_generation = cpu_to_be64(*generation);
  656. di->di_flags = 0;
  657. if (S_ISREG(mode)) {
  658. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  659. gfs2_tune_get(sdp, gt_new_files_jdata))
  660. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  661. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  662. gfs2_tune_get(sdp, gt_new_files_directio))
  663. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  664. } else if (S_ISDIR(mode)) {
  665. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  666. GFS2_DIF_INHERIT_DIRECTIO);
  667. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  668. GFS2_DIF_INHERIT_JDATA);
  669. }
  670. di->__pad1 = 0;
  671. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  672. di->di_height = 0;
  673. di->__pad2 = 0;
  674. di->__pad3 = 0;
  675. di->di_depth = 0;
  676. di->di_entries = 0;
  677. memset(&di->__pad4, 0, sizeof(di->__pad4));
  678. di->di_eattr = 0;
  679. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  680. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  681. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  682. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  683. set_buffer_uptodate(dibh);
  684. *bhp = dibh;
  685. }
  686. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  687. unsigned int mode, const struct gfs2_inum_host *inum,
  688. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  689. {
  690. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  691. unsigned int uid, gid;
  692. int error;
  693. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  694. gfs2_alloc_get(dip);
  695. error = gfs2_quota_lock(dip, uid, gid);
  696. if (error)
  697. goto out;
  698. error = gfs2_quota_check(dip, uid, gid);
  699. if (error)
  700. goto out_quota;
  701. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  702. if (error)
  703. goto out_quota;
  704. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  705. gfs2_quota_change(dip, +1, uid, gid);
  706. gfs2_trans_end(sdp);
  707. out_quota:
  708. gfs2_quota_unlock(dip);
  709. out:
  710. gfs2_alloc_put(dip);
  711. return error;
  712. }
  713. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  714. struct gfs2_inode *ip)
  715. {
  716. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  717. struct gfs2_alloc *al;
  718. int alloc_required;
  719. struct buffer_head *dibh;
  720. int error;
  721. al = gfs2_alloc_get(dip);
  722. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  723. if (error)
  724. goto fail;
  725. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  726. if (alloc_required < 0)
  727. goto fail_quota_locks;
  728. if (alloc_required) {
  729. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  730. if (error)
  731. goto fail_quota_locks;
  732. al->al_requested = sdp->sd_max_dirres;
  733. error = gfs2_inplace_reserve(dip);
  734. if (error)
  735. goto fail_quota_locks;
  736. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  737. al->al_rgd->rd_length +
  738. 2 * RES_DINODE +
  739. RES_STATFS + RES_QUOTA, 0);
  740. if (error)
  741. goto fail_ipreserv;
  742. } else {
  743. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  744. if (error)
  745. goto fail_quota_locks;
  746. }
  747. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  748. if (error)
  749. goto fail_end_trans;
  750. error = gfs2_meta_inode_buffer(ip, &dibh);
  751. if (error)
  752. goto fail_end_trans;
  753. ip->i_inode.i_nlink = 1;
  754. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  755. gfs2_dinode_out(ip, dibh->b_data);
  756. brelse(dibh);
  757. return 0;
  758. fail_end_trans:
  759. gfs2_trans_end(sdp);
  760. fail_ipreserv:
  761. if (dip->i_alloc->al_rgd)
  762. gfs2_inplace_release(dip);
  763. fail_quota_locks:
  764. gfs2_quota_unlock(dip);
  765. fail:
  766. gfs2_alloc_put(dip);
  767. return error;
  768. }
  769. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  770. {
  771. int err;
  772. size_t len;
  773. void *value;
  774. char *name;
  775. struct gfs2_ea_request er;
  776. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  777. &name, &value, &len);
  778. if (err) {
  779. if (err == -EOPNOTSUPP)
  780. return 0;
  781. return err;
  782. }
  783. memset(&er, 0, sizeof(struct gfs2_ea_request));
  784. er.er_type = GFS2_EATYPE_SECURITY;
  785. er.er_name = name;
  786. er.er_data = value;
  787. er.er_name_len = strlen(name);
  788. er.er_data_len = len;
  789. err = gfs2_ea_set_i(ip, &er);
  790. kfree(value);
  791. kfree(name);
  792. return err;
  793. }
  794. /**
  795. * gfs2_createi - Create a new inode
  796. * @ghs: An array of two holders
  797. * @name: The name of the new file
  798. * @mode: the permissions on the new inode
  799. *
  800. * @ghs[0] is an initialized holder for the directory
  801. * @ghs[1] is the holder for the inode lock
  802. *
  803. * If the return value is not NULL, the glocks on both the directory and the new
  804. * file are held. A transaction has been started and an inplace reservation
  805. * is held, as well.
  806. *
  807. * Returns: An inode
  808. */
  809. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  810. unsigned int mode, dev_t dev)
  811. {
  812. struct inode *inode = NULL;
  813. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  814. struct inode *dir = &dip->i_inode;
  815. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  816. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  817. int error;
  818. u64 generation;
  819. struct buffer_head *bh = NULL;
  820. if (!name->len || name->len > GFS2_FNAMESIZE)
  821. return ERR_PTR(-ENAMETOOLONG);
  822. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  823. error = gfs2_glock_nq(ghs);
  824. if (error)
  825. goto fail;
  826. error = create_ok(dip, name, mode);
  827. if (error)
  828. goto fail_gunlock;
  829. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  830. if (error)
  831. goto fail_gunlock;
  832. error = alloc_dinode(dip, &inum.no_addr, &generation);
  833. if (error)
  834. goto fail_gunlock;
  835. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  836. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  837. if (error)
  838. goto fail_gunlock;
  839. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  840. if (error)
  841. goto fail_gunlock2;
  842. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  843. inum.no_addr,
  844. inum.no_formal_ino, 0);
  845. if (IS_ERR(inode))
  846. goto fail_gunlock2;
  847. error = gfs2_inode_refresh(GFS2_I(inode));
  848. if (error)
  849. goto fail_gunlock2;
  850. error = gfs2_acl_create(dip, GFS2_I(inode));
  851. if (error)
  852. goto fail_gunlock2;
  853. error = gfs2_security_init(dip, GFS2_I(inode));
  854. if (error)
  855. goto fail_gunlock2;
  856. error = link_dinode(dip, name, GFS2_I(inode));
  857. if (error)
  858. goto fail_gunlock2;
  859. if (bh)
  860. brelse(bh);
  861. if (!inode)
  862. return ERR_PTR(-ENOMEM);
  863. return inode;
  864. fail_gunlock2:
  865. gfs2_glock_dq_uninit(ghs + 1);
  866. if (inode)
  867. iput(inode);
  868. fail_gunlock:
  869. gfs2_glock_dq(ghs);
  870. fail:
  871. if (bh)
  872. brelse(bh);
  873. return ERR_PTR(error);
  874. }
  875. /**
  876. * gfs2_rmdiri - Remove a directory
  877. * @dip: The parent directory of the directory to be removed
  878. * @name: The name of the directory to be removed
  879. * @ip: The GFS2 inode of the directory to be removed
  880. *
  881. * Assumes Glocks on dip and ip are held
  882. *
  883. * Returns: errno
  884. */
  885. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  886. struct gfs2_inode *ip)
  887. {
  888. struct qstr dotname;
  889. int error;
  890. if (ip->i_di.di_entries != 2) {
  891. if (gfs2_consist_inode(ip))
  892. gfs2_dinode_print(ip);
  893. return -EIO;
  894. }
  895. error = gfs2_dir_del(dip, name);
  896. if (error)
  897. return error;
  898. error = gfs2_change_nlink(dip, -1);
  899. if (error)
  900. return error;
  901. gfs2_str2qstr(&dotname, ".");
  902. error = gfs2_dir_del(ip, &dotname);
  903. if (error)
  904. return error;
  905. gfs2_str2qstr(&dotname, "..");
  906. error = gfs2_dir_del(ip, &dotname);
  907. if (error)
  908. return error;
  909. /* It looks odd, but it really should be done twice */
  910. error = gfs2_change_nlink(ip, -1);
  911. if (error)
  912. return error;
  913. error = gfs2_change_nlink(ip, -1);
  914. if (error)
  915. return error;
  916. return error;
  917. }
  918. /*
  919. * gfs2_unlink_ok - check to see that a inode is still in a directory
  920. * @dip: the directory
  921. * @name: the name of the file
  922. * @ip: the inode
  923. *
  924. * Assumes that the lock on (at least) @dip is held.
  925. *
  926. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  927. */
  928. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  929. const struct gfs2_inode *ip)
  930. {
  931. int error;
  932. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  933. return -EPERM;
  934. if ((dip->i_inode.i_mode & S_ISVTX) &&
  935. dip->i_inode.i_uid != current->fsuid &&
  936. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  937. return -EPERM;
  938. if (IS_APPEND(&dip->i_inode))
  939. return -EPERM;
  940. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  941. if (error)
  942. return error;
  943. error = gfs2_dir_check(&dip->i_inode, name, ip);
  944. if (error)
  945. return error;
  946. return 0;
  947. }
  948. /*
  949. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  950. * @this: move this
  951. * @to: to here
  952. *
  953. * Follow @to back to the root and make sure we don't encounter @this
  954. * Assumes we already hold the rename lock.
  955. *
  956. * Returns: errno
  957. */
  958. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  959. {
  960. struct inode *dir = &to->i_inode;
  961. struct super_block *sb = dir->i_sb;
  962. struct inode *tmp;
  963. struct qstr dotdot;
  964. int error = 0;
  965. gfs2_str2qstr(&dotdot, "..");
  966. igrab(dir);
  967. for (;;) {
  968. if (dir == &this->i_inode) {
  969. error = -EINVAL;
  970. break;
  971. }
  972. if (dir == sb->s_root->d_inode) {
  973. error = 0;
  974. break;
  975. }
  976. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  977. if (IS_ERR(tmp)) {
  978. error = PTR_ERR(tmp);
  979. break;
  980. }
  981. iput(dir);
  982. dir = tmp;
  983. }
  984. iput(dir);
  985. return error;
  986. }
  987. /**
  988. * gfs2_readlinki - return the contents of a symlink
  989. * @ip: the symlink's inode
  990. * @buf: a pointer to the buffer to be filled
  991. * @len: a pointer to the length of @buf
  992. *
  993. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  994. * to be freed by the caller.
  995. *
  996. * Returns: errno
  997. */
  998. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  999. {
  1000. struct gfs2_holder i_gh;
  1001. struct buffer_head *dibh;
  1002. unsigned int x;
  1003. int error;
  1004. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  1005. error = gfs2_glock_nq_atime(&i_gh);
  1006. if (error) {
  1007. gfs2_holder_uninit(&i_gh);
  1008. return error;
  1009. }
  1010. if (!ip->i_di.di_size) {
  1011. gfs2_consist_inode(ip);
  1012. error = -EIO;
  1013. goto out;
  1014. }
  1015. error = gfs2_meta_inode_buffer(ip, &dibh);
  1016. if (error)
  1017. goto out;
  1018. x = ip->i_di.di_size + 1;
  1019. if (x > *len) {
  1020. *buf = kmalloc(x, GFP_KERNEL);
  1021. if (!*buf) {
  1022. error = -ENOMEM;
  1023. goto out_brelse;
  1024. }
  1025. }
  1026. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  1027. *len = x;
  1028. out_brelse:
  1029. brelse(dibh);
  1030. out:
  1031. gfs2_glock_dq_uninit(&i_gh);
  1032. return error;
  1033. }
  1034. /**
  1035. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  1036. * conditionally update the inode's atime
  1037. * @gh: the holder to acquire
  1038. *
  1039. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  1040. * Update if the difference between the current time and the inode's current
  1041. * atime is greater than an interval specified at mount.
  1042. *
  1043. * Returns: errno
  1044. */
  1045. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  1046. {
  1047. struct gfs2_glock *gl = gh->gh_gl;
  1048. struct gfs2_sbd *sdp = gl->gl_sbd;
  1049. struct gfs2_inode *ip = gl->gl_object;
  1050. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  1051. unsigned int state;
  1052. int flags;
  1053. int error;
  1054. struct timespec tv = CURRENT_TIME;
  1055. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  1056. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1057. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1058. return -EINVAL;
  1059. state = gh->gh_state;
  1060. flags = gh->gh_flags;
  1061. error = gfs2_glock_nq(gh);
  1062. if (error)
  1063. return error;
  1064. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1065. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1066. return 0;
  1067. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1068. gfs2_glock_dq(gh);
  1069. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1070. gh);
  1071. error = gfs2_glock_nq(gh);
  1072. if (error)
  1073. return error;
  1074. /* Verify that atime hasn't been updated while we were
  1075. trying to get exclusive lock. */
  1076. tv = CURRENT_TIME;
  1077. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1078. struct buffer_head *dibh;
  1079. struct gfs2_dinode *di;
  1080. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1081. if (error == -EROFS)
  1082. return 0;
  1083. if (error)
  1084. goto fail;
  1085. error = gfs2_meta_inode_buffer(ip, &dibh);
  1086. if (error)
  1087. goto fail_end_trans;
  1088. ip->i_inode.i_atime = tv;
  1089. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1090. di = (struct gfs2_dinode *)dibh->b_data;
  1091. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1092. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1093. brelse(dibh);
  1094. gfs2_trans_end(sdp);
  1095. }
  1096. /* If someone else has asked for the glock,
  1097. unlock and let them have it. Then reacquire
  1098. in the original state. */
  1099. if (gfs2_glock_is_blocking(gl)) {
  1100. gfs2_glock_dq(gh);
  1101. gfs2_holder_reinit(state, flags, gh);
  1102. return gfs2_glock_nq(gh);
  1103. }
  1104. }
  1105. return 0;
  1106. fail_end_trans:
  1107. gfs2_trans_end(sdp);
  1108. fail:
  1109. gfs2_glock_dq(gh);
  1110. return error;
  1111. }
  1112. static int
  1113. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1114. {
  1115. struct buffer_head *dibh;
  1116. int error;
  1117. error = gfs2_meta_inode_buffer(ip, &dibh);
  1118. if (!error) {
  1119. error = inode_setattr(&ip->i_inode, attr);
  1120. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1121. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1122. gfs2_dinode_out(ip, dibh->b_data);
  1123. brelse(dibh);
  1124. }
  1125. return error;
  1126. }
  1127. /**
  1128. * gfs2_setattr_simple -
  1129. * @ip:
  1130. * @attr:
  1131. *
  1132. * Called with a reference on the vnode.
  1133. *
  1134. * Returns: errno
  1135. */
  1136. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1137. {
  1138. int error;
  1139. if (current->journal_info)
  1140. return __gfs2_setattr_simple(ip, attr);
  1141. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1142. if (error)
  1143. return error;
  1144. error = __gfs2_setattr_simple(ip, attr);
  1145. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1146. return error;
  1147. }
  1148. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1149. {
  1150. const struct gfs2_dinode_host *di = &ip->i_di;
  1151. struct gfs2_dinode *str = buf;
  1152. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1153. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1154. str->di_header.__pad0 = 0;
  1155. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1156. str->di_header.__pad1 = 0;
  1157. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1158. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1159. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1160. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1161. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1162. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1163. str->di_size = cpu_to_be64(di->di_size);
  1164. str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
  1165. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1166. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1167. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1168. str->di_goal_meta = cpu_to_be64(ip->i_goal);
  1169. str->di_goal_data = cpu_to_be64(ip->i_goal);
  1170. str->di_generation = cpu_to_be64(di->di_generation);
  1171. str->di_flags = cpu_to_be32(di->di_flags);
  1172. str->di_height = cpu_to_be16(ip->i_height);
  1173. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1174. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1175. GFS2_FORMAT_DE : 0);
  1176. str->di_depth = cpu_to_be16(ip->i_depth);
  1177. str->di_entries = cpu_to_be32(di->di_entries);
  1178. str->di_eattr = cpu_to_be64(di->di_eattr);
  1179. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1180. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1181. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1182. }
  1183. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1184. {
  1185. const struct gfs2_dinode_host *di = &ip->i_di;
  1186. printk(KERN_INFO " no_formal_ino = %llu\n",
  1187. (unsigned long long)ip->i_no_formal_ino);
  1188. printk(KERN_INFO " no_addr = %llu\n",
  1189. (unsigned long long)ip->i_no_addr);
  1190. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1191. printk(KERN_INFO " blocks = %llu\n",
  1192. (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode));
  1193. printk(KERN_INFO " i_goal = %llu\n",
  1194. (unsigned long long)ip->i_goal);
  1195. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1196. printk(KERN_INFO " i_height = %u\n", ip->i_height);
  1197. printk(KERN_INFO " i_depth = %u\n", ip->i_depth);
  1198. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1199. printk(KERN_INFO " di_eattr = %llu\n",
  1200. (unsigned long long)di->di_eattr);
  1201. }