inode.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_inode.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "util.h"
  37. struct gfs2_inum_range_host {
  38. u64 ir_start;
  39. u64 ir_length;
  40. };
  41. static int iget_test(struct inode *inode, void *opaque)
  42. {
  43. struct gfs2_inode *ip = GFS2_I(inode);
  44. u64 *no_addr = opaque;
  45. if (ip->i_no_addr == *no_addr &&
  46. inode->i_private != NULL)
  47. return 1;
  48. return 0;
  49. }
  50. static int iget_set(struct inode *inode, void *opaque)
  51. {
  52. struct gfs2_inode *ip = GFS2_I(inode);
  53. u64 *no_addr = opaque;
  54. inode->i_ino = (unsigned long)*no_addr;
  55. ip->i_no_addr = *no_addr;
  56. return 0;
  57. }
  58. struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
  59. {
  60. unsigned long hash = (unsigned long)no_addr;
  61. return ilookup5(sb, hash, iget_test, &no_addr);
  62. }
  63. static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
  64. {
  65. unsigned long hash = (unsigned long)no_addr;
  66. return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
  67. }
  68. struct gfs2_skip_data {
  69. u64 no_addr;
  70. int skipped;
  71. };
  72. static int iget_skip_test(struct inode *inode, void *opaque)
  73. {
  74. struct gfs2_inode *ip = GFS2_I(inode);
  75. struct gfs2_skip_data *data = opaque;
  76. if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
  77. if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
  78. data->skipped = 1;
  79. return 0;
  80. }
  81. return 1;
  82. }
  83. return 0;
  84. }
  85. static int iget_skip_set(struct inode *inode, void *opaque)
  86. {
  87. struct gfs2_inode *ip = GFS2_I(inode);
  88. struct gfs2_skip_data *data = opaque;
  89. if (data->skipped)
  90. return 1;
  91. inode->i_ino = (unsigned long)(data->no_addr);
  92. ip->i_no_addr = data->no_addr;
  93. return 0;
  94. }
  95. static struct inode *gfs2_iget_skip(struct super_block *sb,
  96. u64 no_addr)
  97. {
  98. struct gfs2_skip_data data;
  99. unsigned long hash = (unsigned long)no_addr;
  100. data.no_addr = no_addr;
  101. data.skipped = 0;
  102. return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
  103. }
  104. /**
  105. * GFS2 lookup code fills in vfs inode contents based on info obtained
  106. * from directory entry inside gfs2_inode_lookup(). This has caused issues
  107. * with NFS code path since its get_dentry routine doesn't have the relevant
  108. * directory entry when gfs2_inode_lookup() is invoked. Part of the code
  109. * segment inside gfs2_inode_lookup code needs to get moved around.
  110. *
  111. * Clean up I_LOCK and I_NEW as well.
  112. **/
  113. void gfs2_set_iop(struct inode *inode)
  114. {
  115. struct gfs2_sbd *sdp = GFS2_SB(inode);
  116. umode_t mode = inode->i_mode;
  117. if (S_ISREG(mode)) {
  118. inode->i_op = &gfs2_file_iops;
  119. if (sdp->sd_args.ar_localflocks)
  120. inode->i_fop = &gfs2_file_fops_nolock;
  121. else
  122. inode->i_fop = &gfs2_file_fops;
  123. } else if (S_ISDIR(mode)) {
  124. inode->i_op = &gfs2_dir_iops;
  125. if (sdp->sd_args.ar_localflocks)
  126. inode->i_fop = &gfs2_dir_fops_nolock;
  127. else
  128. inode->i_fop = &gfs2_dir_fops;
  129. } else if (S_ISLNK(mode)) {
  130. inode->i_op = &gfs2_symlink_iops;
  131. } else {
  132. inode->i_op = &gfs2_dev_iops;
  133. }
  134. unlock_new_inode(inode);
  135. }
  136. /**
  137. * gfs2_inode_lookup - Lookup an inode
  138. * @sb: The super block
  139. * @no_addr: The inode number
  140. * @type: The type of the inode
  141. * @skip_freeing: set this not return an inode if it is currently being freed.
  142. *
  143. * Returns: A VFS inode, or an error
  144. */
  145. struct inode *gfs2_inode_lookup(struct super_block *sb,
  146. unsigned int type,
  147. u64 no_addr,
  148. u64 no_formal_ino, int skip_freeing)
  149. {
  150. struct inode *inode;
  151. struct gfs2_inode *ip;
  152. struct gfs2_glock *io_gl;
  153. int error;
  154. if (skip_freeing)
  155. inode = gfs2_iget_skip(sb, no_addr);
  156. else
  157. inode = gfs2_iget(sb, no_addr);
  158. ip = GFS2_I(inode);
  159. if (!inode)
  160. return ERR_PTR(-ENOBUFS);
  161. if (inode->i_state & I_NEW) {
  162. struct gfs2_sbd *sdp = GFS2_SB(inode);
  163. inode->i_private = ip;
  164. ip->i_no_formal_ino = no_formal_ino;
  165. error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  166. if (unlikely(error))
  167. goto fail;
  168. ip->i_gl->gl_object = ip;
  169. error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  170. if (unlikely(error))
  171. goto fail_put;
  172. set_bit(GIF_INVALID, &ip->i_flags);
  173. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  174. if (unlikely(error))
  175. goto fail_iopen;
  176. ip->i_iopen_gh.gh_gl->gl_object = ip;
  177. gfs2_glock_put(io_gl);
  178. if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
  179. goto gfs2_nfsbypass;
  180. inode->i_mode = DT2IF(type);
  181. /*
  182. * We must read the inode in order to work out its type in
  183. * this case. Note that this doesn't happen often as we normally
  184. * know the type beforehand. This code path only occurs during
  185. * unlinked inode recovery (where it is safe to do this glock,
  186. * which is not true in the general case).
  187. */
  188. if (type == DT_UNKNOWN) {
  189. struct gfs2_holder gh;
  190. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  191. if (unlikely(error))
  192. goto fail_glock;
  193. /* Inode is now uptodate */
  194. gfs2_glock_dq_uninit(&gh);
  195. }
  196. gfs2_set_iop(inode);
  197. }
  198. gfs2_nfsbypass:
  199. return inode;
  200. fail_glock:
  201. gfs2_glock_dq(&ip->i_iopen_gh);
  202. fail_iopen:
  203. gfs2_glock_put(io_gl);
  204. fail_put:
  205. ip->i_gl->gl_object = NULL;
  206. gfs2_glock_put(ip->i_gl);
  207. fail:
  208. iput(inode);
  209. return ERR_PTR(error);
  210. }
  211. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  212. {
  213. struct gfs2_dinode_host *di = &ip->i_di;
  214. const struct gfs2_dinode *str = buf;
  215. if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
  216. if (gfs2_consist_inode(ip))
  217. gfs2_dinode_print(ip);
  218. return -EIO;
  219. }
  220. ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
  221. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  222. ip->i_inode.i_rdev = 0;
  223. switch (ip->i_inode.i_mode & S_IFMT) {
  224. case S_IFBLK:
  225. case S_IFCHR:
  226. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  227. be32_to_cpu(str->di_minor));
  228. break;
  229. };
  230. ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
  231. ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
  232. /*
  233. * We will need to review setting the nlink count here in the
  234. * light of the forthcoming ro bind mount work. This is a reminder
  235. * to do that.
  236. */
  237. ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
  238. di->di_size = be64_to_cpu(str->di_size);
  239. i_size_write(&ip->i_inode, di->di_size);
  240. di->di_blocks = be64_to_cpu(str->di_blocks);
  241. gfs2_set_inode_blocks(&ip->i_inode);
  242. ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
  243. ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
  244. ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
  245. ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
  246. ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
  247. ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
  248. di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
  249. di->di_goal_data = be64_to_cpu(str->di_goal_data);
  250. di->di_generation = be64_to_cpu(str->di_generation);
  251. di->di_flags = be32_to_cpu(str->di_flags);
  252. gfs2_set_inode_flags(&ip->i_inode);
  253. di->di_height = be16_to_cpu(str->di_height);
  254. di->di_depth = be16_to_cpu(str->di_depth);
  255. di->di_entries = be32_to_cpu(str->di_entries);
  256. di->di_eattr = be64_to_cpu(str->di_eattr);
  257. if (S_ISREG(ip->i_inode.i_mode))
  258. gfs2_set_aops(&ip->i_inode);
  259. return 0;
  260. }
  261. /**
  262. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  263. * @ip: The GFS2 inode
  264. *
  265. * Returns: errno
  266. */
  267. int gfs2_inode_refresh(struct gfs2_inode *ip)
  268. {
  269. struct buffer_head *dibh;
  270. int error;
  271. error = gfs2_meta_inode_buffer(ip, &dibh);
  272. if (error)
  273. return error;
  274. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  275. brelse(dibh);
  276. return -EIO;
  277. }
  278. error = gfs2_dinode_in(ip, dibh->b_data);
  279. brelse(dibh);
  280. clear_bit(GIF_INVALID, &ip->i_flags);
  281. return error;
  282. }
  283. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  284. {
  285. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  286. struct gfs2_alloc *al;
  287. struct gfs2_rgrpd *rgd;
  288. int error;
  289. if (ip->i_di.di_blocks != 1) {
  290. if (gfs2_consist_inode(ip))
  291. gfs2_dinode_print(ip);
  292. return -EIO;
  293. }
  294. al = gfs2_alloc_get(ip);
  295. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  296. if (error)
  297. goto out;
  298. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  299. if (error)
  300. goto out_qs;
  301. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
  302. if (!rgd) {
  303. gfs2_consist_inode(ip);
  304. error = -EIO;
  305. goto out_rindex_relse;
  306. }
  307. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  308. &al->al_rgd_gh);
  309. if (error)
  310. goto out_rindex_relse;
  311. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  312. if (error)
  313. goto out_rg_gunlock;
  314. set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
  315. set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
  316. gfs2_free_di(rgd, ip);
  317. gfs2_trans_end(sdp);
  318. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  319. out_rg_gunlock:
  320. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  321. out_rindex_relse:
  322. gfs2_glock_dq_uninit(&al->al_ri_gh);
  323. out_qs:
  324. gfs2_quota_unhold(ip);
  325. out:
  326. gfs2_alloc_put(ip);
  327. return error;
  328. }
  329. /**
  330. * gfs2_change_nlink - Change nlink count on inode
  331. * @ip: The GFS2 inode
  332. * @diff: The change in the nlink count required
  333. *
  334. * Returns: errno
  335. */
  336. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  337. {
  338. struct buffer_head *dibh;
  339. u32 nlink;
  340. int error;
  341. BUG_ON(diff != 1 && diff != -1);
  342. nlink = ip->i_inode.i_nlink + diff;
  343. /* If we are reducing the nlink count, but the new value ends up being
  344. bigger than the old one, we must have underflowed. */
  345. if (diff < 0 && nlink > ip->i_inode.i_nlink) {
  346. if (gfs2_consist_inode(ip))
  347. gfs2_dinode_print(ip);
  348. return -EIO;
  349. }
  350. error = gfs2_meta_inode_buffer(ip, &dibh);
  351. if (error)
  352. return error;
  353. if (diff > 0)
  354. inc_nlink(&ip->i_inode);
  355. else
  356. drop_nlink(&ip->i_inode);
  357. ip->i_inode.i_ctime = CURRENT_TIME;
  358. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  359. gfs2_dinode_out(ip, dibh->b_data);
  360. brelse(dibh);
  361. mark_inode_dirty(&ip->i_inode);
  362. if (ip->i_inode.i_nlink == 0)
  363. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  364. return error;
  365. }
  366. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  367. {
  368. struct qstr qstr;
  369. struct inode *inode;
  370. gfs2_str2qstr(&qstr, name);
  371. inode = gfs2_lookupi(dip, &qstr, 1, NULL);
  372. /* gfs2_lookupi has inconsistent callers: vfs
  373. * related routines expect NULL for no entry found,
  374. * gfs2_lookup_simple callers expect ENOENT
  375. * and do not check for NULL.
  376. */
  377. if (inode == NULL)
  378. return ERR_PTR(-ENOENT);
  379. else
  380. return inode;
  381. }
  382. /**
  383. * gfs2_lookupi - Look up a filename in a directory and return its inode
  384. * @d_gh: An initialized holder for the directory glock
  385. * @name: The name of the inode to look for
  386. * @is_root: If 1, ignore the caller's permissions
  387. * @i_gh: An uninitialized holder for the new inode glock
  388. *
  389. * This can be called via the VFS filldir function when NFS is doing
  390. * a readdirplus and the inode which its intending to stat isn't
  391. * already in cache. In this case we must not take the directory glock
  392. * again, since the readdir call will have already taken that lock.
  393. *
  394. * Returns: errno
  395. */
  396. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  397. int is_root, struct nameidata *nd)
  398. {
  399. struct super_block *sb = dir->i_sb;
  400. struct gfs2_inode *dip = GFS2_I(dir);
  401. struct gfs2_holder d_gh;
  402. int error = 0;
  403. struct inode *inode = NULL;
  404. int unlock = 0;
  405. if (!name->len || name->len > GFS2_FNAMESIZE)
  406. return ERR_PTR(-ENAMETOOLONG);
  407. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  408. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  409. dir == sb->s_root->d_inode)) {
  410. igrab(dir);
  411. return dir;
  412. }
  413. if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
  414. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  415. if (error)
  416. return ERR_PTR(error);
  417. unlock = 1;
  418. }
  419. if (!is_root) {
  420. error = permission(dir, MAY_EXEC, NULL);
  421. if (error)
  422. goto out;
  423. }
  424. inode = gfs2_dir_search(dir, name);
  425. if (IS_ERR(inode))
  426. error = PTR_ERR(inode);
  427. out:
  428. if (unlock)
  429. gfs2_glock_dq_uninit(&d_gh);
  430. if (error == -ENOENT)
  431. return NULL;
  432. return inode ? inode : ERR_PTR(error);
  433. }
  434. static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
  435. {
  436. const struct gfs2_inum_range *str = buf;
  437. ir->ir_start = be64_to_cpu(str->ir_start);
  438. ir->ir_length = be64_to_cpu(str->ir_length);
  439. }
  440. static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
  441. {
  442. struct gfs2_inum_range *str = buf;
  443. str->ir_start = cpu_to_be64(ir->ir_start);
  444. str->ir_length = cpu_to_be64(ir->ir_length);
  445. }
  446. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  447. {
  448. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  449. struct buffer_head *bh;
  450. struct gfs2_inum_range_host ir;
  451. int error;
  452. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  453. if (error)
  454. return error;
  455. mutex_lock(&sdp->sd_inum_mutex);
  456. error = gfs2_meta_inode_buffer(ip, &bh);
  457. if (error) {
  458. mutex_unlock(&sdp->sd_inum_mutex);
  459. gfs2_trans_end(sdp);
  460. return error;
  461. }
  462. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  463. if (ir.ir_length) {
  464. *formal_ino = ir.ir_start++;
  465. ir.ir_length--;
  466. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  467. gfs2_inum_range_out(&ir,
  468. bh->b_data + sizeof(struct gfs2_dinode));
  469. brelse(bh);
  470. mutex_unlock(&sdp->sd_inum_mutex);
  471. gfs2_trans_end(sdp);
  472. return 0;
  473. }
  474. brelse(bh);
  475. mutex_unlock(&sdp->sd_inum_mutex);
  476. gfs2_trans_end(sdp);
  477. return 1;
  478. }
  479. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  480. {
  481. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  482. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  483. struct gfs2_holder gh;
  484. struct buffer_head *bh;
  485. struct gfs2_inum_range_host ir;
  486. int error;
  487. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  488. if (error)
  489. return error;
  490. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  491. if (error)
  492. goto out;
  493. mutex_lock(&sdp->sd_inum_mutex);
  494. error = gfs2_meta_inode_buffer(ip, &bh);
  495. if (error)
  496. goto out_end_trans;
  497. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  498. if (!ir.ir_length) {
  499. struct buffer_head *m_bh;
  500. u64 x, y;
  501. __be64 z;
  502. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  503. if (error)
  504. goto out_brelse;
  505. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  506. x = y = be64_to_cpu(z);
  507. ir.ir_start = x;
  508. ir.ir_length = GFS2_INUM_QUANTUM;
  509. x += GFS2_INUM_QUANTUM;
  510. if (x < y)
  511. gfs2_consist_inode(m_ip);
  512. z = cpu_to_be64(x);
  513. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  514. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  515. brelse(m_bh);
  516. }
  517. *formal_ino = ir.ir_start++;
  518. ir.ir_length--;
  519. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  520. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  521. out_brelse:
  522. brelse(bh);
  523. out_end_trans:
  524. mutex_unlock(&sdp->sd_inum_mutex);
  525. gfs2_trans_end(sdp);
  526. out:
  527. gfs2_glock_dq_uninit(&gh);
  528. return error;
  529. }
  530. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  531. {
  532. int error;
  533. error = pick_formal_ino_1(sdp, inum);
  534. if (error <= 0)
  535. return error;
  536. error = pick_formal_ino_2(sdp, inum);
  537. return error;
  538. }
  539. /**
  540. * create_ok - OK to create a new on-disk inode here?
  541. * @dip: Directory in which dinode is to be created
  542. * @name: Name of new dinode
  543. * @mode:
  544. *
  545. * Returns: errno
  546. */
  547. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  548. unsigned int mode)
  549. {
  550. int error;
  551. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  552. if (error)
  553. return error;
  554. /* Don't create entries in an unlinked directory */
  555. if (!dip->i_inode.i_nlink)
  556. return -EPERM;
  557. error = gfs2_dir_check(&dip->i_inode, name, NULL);
  558. switch (error) {
  559. case -ENOENT:
  560. error = 0;
  561. break;
  562. case 0:
  563. return -EEXIST;
  564. default:
  565. return error;
  566. }
  567. if (dip->i_di.di_entries == (u32)-1)
  568. return -EFBIG;
  569. if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
  570. return -EMLINK;
  571. return 0;
  572. }
  573. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  574. unsigned int *uid, unsigned int *gid)
  575. {
  576. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  577. (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
  578. if (S_ISDIR(*mode))
  579. *mode |= S_ISUID;
  580. else if (dip->i_inode.i_uid != current->fsuid)
  581. *mode &= ~07111;
  582. *uid = dip->i_inode.i_uid;
  583. } else
  584. *uid = current->fsuid;
  585. if (dip->i_inode.i_mode & S_ISGID) {
  586. if (S_ISDIR(*mode))
  587. *mode |= S_ISGID;
  588. *gid = dip->i_inode.i_gid;
  589. } else
  590. *gid = current->fsgid;
  591. }
  592. static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
  593. {
  594. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  595. int error;
  596. if (gfs2_alloc_get(dip) == NULL)
  597. return -ENOMEM;
  598. dip->i_alloc->al_requested = RES_DINODE;
  599. error = gfs2_inplace_reserve(dip);
  600. if (error)
  601. goto out;
  602. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  603. if (error)
  604. goto out_ipreserv;
  605. *no_addr = gfs2_alloc_di(dip, generation);
  606. gfs2_trans_end(sdp);
  607. out_ipreserv:
  608. gfs2_inplace_release(dip);
  609. out:
  610. gfs2_alloc_put(dip);
  611. return error;
  612. }
  613. /**
  614. * init_dinode - Fill in a new dinode structure
  615. * @dip: the directory this inode is being created in
  616. * @gl: The glock covering the new inode
  617. * @inum: the inode number
  618. * @mode: the file permissions
  619. * @uid:
  620. * @gid:
  621. *
  622. */
  623. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  624. const struct gfs2_inum_host *inum, unsigned int mode,
  625. unsigned int uid, unsigned int gid,
  626. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  627. {
  628. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  629. struct gfs2_dinode *di;
  630. struct buffer_head *dibh;
  631. struct timespec tv = CURRENT_TIME;
  632. dibh = gfs2_meta_new(gl, inum->no_addr);
  633. gfs2_trans_add_bh(gl, dibh, 1);
  634. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  635. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  636. di = (struct gfs2_dinode *)dibh->b_data;
  637. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  638. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  639. di->di_mode = cpu_to_be32(mode);
  640. di->di_uid = cpu_to_be32(uid);
  641. di->di_gid = cpu_to_be32(gid);
  642. di->di_nlink = 0;
  643. di->di_size = 0;
  644. di->di_blocks = cpu_to_be64(1);
  645. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
  646. di->di_major = cpu_to_be32(MAJOR(dev));
  647. di->di_minor = cpu_to_be32(MINOR(dev));
  648. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  649. di->di_generation = cpu_to_be64(*generation);
  650. di->di_flags = 0;
  651. if (S_ISREG(mode)) {
  652. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  653. gfs2_tune_get(sdp, gt_new_files_jdata))
  654. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  655. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  656. gfs2_tune_get(sdp, gt_new_files_directio))
  657. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  658. } else if (S_ISDIR(mode)) {
  659. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  660. GFS2_DIF_INHERIT_DIRECTIO);
  661. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  662. GFS2_DIF_INHERIT_JDATA);
  663. }
  664. di->__pad1 = 0;
  665. di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
  666. di->di_height = 0;
  667. di->__pad2 = 0;
  668. di->__pad3 = 0;
  669. di->di_depth = 0;
  670. di->di_entries = 0;
  671. memset(&di->__pad4, 0, sizeof(di->__pad4));
  672. di->di_eattr = 0;
  673. di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
  674. di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
  675. di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
  676. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  677. set_buffer_uptodate(dibh);
  678. *bhp = dibh;
  679. }
  680. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  681. unsigned int mode, const struct gfs2_inum_host *inum,
  682. const u64 *generation, dev_t dev, struct buffer_head **bhp)
  683. {
  684. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  685. unsigned int uid, gid;
  686. int error;
  687. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  688. gfs2_alloc_get(dip);
  689. error = gfs2_quota_lock(dip, uid, gid);
  690. if (error)
  691. goto out;
  692. error = gfs2_quota_check(dip, uid, gid);
  693. if (error)
  694. goto out_quota;
  695. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  696. if (error)
  697. goto out_quota;
  698. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
  699. gfs2_quota_change(dip, +1, uid, gid);
  700. gfs2_trans_end(sdp);
  701. out_quota:
  702. gfs2_quota_unlock(dip);
  703. out:
  704. gfs2_alloc_put(dip);
  705. return error;
  706. }
  707. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  708. struct gfs2_inode *ip)
  709. {
  710. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  711. struct gfs2_alloc *al;
  712. int alloc_required;
  713. struct buffer_head *dibh;
  714. int error;
  715. al = gfs2_alloc_get(dip);
  716. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  717. if (error)
  718. goto fail;
  719. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  720. if (alloc_required < 0)
  721. goto fail_quota_locks;
  722. if (alloc_required) {
  723. error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
  724. if (error)
  725. goto fail_quota_locks;
  726. al->al_requested = sdp->sd_max_dirres;
  727. error = gfs2_inplace_reserve(dip);
  728. if (error)
  729. goto fail_quota_locks;
  730. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  731. al->al_rgd->rd_length +
  732. 2 * RES_DINODE +
  733. RES_STATFS + RES_QUOTA, 0);
  734. if (error)
  735. goto fail_ipreserv;
  736. } else {
  737. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  738. if (error)
  739. goto fail_quota_locks;
  740. }
  741. error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
  742. if (error)
  743. goto fail_end_trans;
  744. error = gfs2_meta_inode_buffer(ip, &dibh);
  745. if (error)
  746. goto fail_end_trans;
  747. ip->i_inode.i_nlink = 1;
  748. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  749. gfs2_dinode_out(ip, dibh->b_data);
  750. brelse(dibh);
  751. return 0;
  752. fail_end_trans:
  753. gfs2_trans_end(sdp);
  754. fail_ipreserv:
  755. if (dip->i_alloc->al_rgd)
  756. gfs2_inplace_release(dip);
  757. fail_quota_locks:
  758. gfs2_quota_unlock(dip);
  759. fail:
  760. gfs2_alloc_put(dip);
  761. return error;
  762. }
  763. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  764. {
  765. int err;
  766. size_t len;
  767. void *value;
  768. char *name;
  769. struct gfs2_ea_request er;
  770. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  771. &name, &value, &len);
  772. if (err) {
  773. if (err == -EOPNOTSUPP)
  774. return 0;
  775. return err;
  776. }
  777. memset(&er, 0, sizeof(struct gfs2_ea_request));
  778. er.er_type = GFS2_EATYPE_SECURITY;
  779. er.er_name = name;
  780. er.er_data = value;
  781. er.er_name_len = strlen(name);
  782. er.er_data_len = len;
  783. err = gfs2_ea_set_i(ip, &er);
  784. kfree(value);
  785. kfree(name);
  786. return err;
  787. }
  788. /**
  789. * gfs2_createi - Create a new inode
  790. * @ghs: An array of two holders
  791. * @name: The name of the new file
  792. * @mode: the permissions on the new inode
  793. *
  794. * @ghs[0] is an initialized holder for the directory
  795. * @ghs[1] is the holder for the inode lock
  796. *
  797. * If the return value is not NULL, the glocks on both the directory and the new
  798. * file are held. A transaction has been started and an inplace reservation
  799. * is held, as well.
  800. *
  801. * Returns: An inode
  802. */
  803. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  804. unsigned int mode, dev_t dev)
  805. {
  806. struct inode *inode = NULL;
  807. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  808. struct inode *dir = &dip->i_inode;
  809. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  810. struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
  811. int error;
  812. u64 generation;
  813. struct buffer_head *bh = NULL;
  814. if (!name->len || name->len > GFS2_FNAMESIZE)
  815. return ERR_PTR(-ENAMETOOLONG);
  816. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  817. error = gfs2_glock_nq(ghs);
  818. if (error)
  819. goto fail;
  820. error = create_ok(dip, name, mode);
  821. if (error)
  822. goto fail_gunlock;
  823. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  824. if (error)
  825. goto fail_gunlock;
  826. error = alloc_dinode(dip, &inum.no_addr, &generation);
  827. if (error)
  828. goto fail_gunlock;
  829. error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
  830. LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
  831. if (error)
  832. goto fail_gunlock;
  833. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
  834. if (error)
  835. goto fail_gunlock2;
  836. inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
  837. inum.no_addr,
  838. inum.no_formal_ino, 0);
  839. if (IS_ERR(inode))
  840. goto fail_gunlock2;
  841. error = gfs2_inode_refresh(GFS2_I(inode));
  842. if (error)
  843. goto fail_gunlock2;
  844. error = gfs2_acl_create(dip, GFS2_I(inode));
  845. if (error)
  846. goto fail_gunlock2;
  847. error = gfs2_security_init(dip, GFS2_I(inode));
  848. if (error)
  849. goto fail_gunlock2;
  850. error = link_dinode(dip, name, GFS2_I(inode));
  851. if (error)
  852. goto fail_gunlock2;
  853. if (bh)
  854. brelse(bh);
  855. if (!inode)
  856. return ERR_PTR(-ENOMEM);
  857. return inode;
  858. fail_gunlock2:
  859. gfs2_glock_dq_uninit(ghs + 1);
  860. if (inode)
  861. iput(inode);
  862. fail_gunlock:
  863. gfs2_glock_dq(ghs);
  864. fail:
  865. if (bh)
  866. brelse(bh);
  867. return ERR_PTR(error);
  868. }
  869. /**
  870. * gfs2_rmdiri - Remove a directory
  871. * @dip: The parent directory of the directory to be removed
  872. * @name: The name of the directory to be removed
  873. * @ip: The GFS2 inode of the directory to be removed
  874. *
  875. * Assumes Glocks on dip and ip are held
  876. *
  877. * Returns: errno
  878. */
  879. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  880. struct gfs2_inode *ip)
  881. {
  882. struct qstr dotname;
  883. int error;
  884. if (ip->i_di.di_entries != 2) {
  885. if (gfs2_consist_inode(ip))
  886. gfs2_dinode_print(ip);
  887. return -EIO;
  888. }
  889. error = gfs2_dir_del(dip, name);
  890. if (error)
  891. return error;
  892. error = gfs2_change_nlink(dip, -1);
  893. if (error)
  894. return error;
  895. gfs2_str2qstr(&dotname, ".");
  896. error = gfs2_dir_del(ip, &dotname);
  897. if (error)
  898. return error;
  899. gfs2_str2qstr(&dotname, "..");
  900. error = gfs2_dir_del(ip, &dotname);
  901. if (error)
  902. return error;
  903. /* It looks odd, but it really should be done twice */
  904. error = gfs2_change_nlink(ip, -1);
  905. if (error)
  906. return error;
  907. error = gfs2_change_nlink(ip, -1);
  908. if (error)
  909. return error;
  910. return error;
  911. }
  912. /*
  913. * gfs2_unlink_ok - check to see that a inode is still in a directory
  914. * @dip: the directory
  915. * @name: the name of the file
  916. * @ip: the inode
  917. *
  918. * Assumes that the lock on (at least) @dip is held.
  919. *
  920. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  921. */
  922. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  923. const struct gfs2_inode *ip)
  924. {
  925. int error;
  926. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  927. return -EPERM;
  928. if ((dip->i_inode.i_mode & S_ISVTX) &&
  929. dip->i_inode.i_uid != current->fsuid &&
  930. ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
  931. return -EPERM;
  932. if (IS_APPEND(&dip->i_inode))
  933. return -EPERM;
  934. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  935. if (error)
  936. return error;
  937. error = gfs2_dir_check(&dip->i_inode, name, ip);
  938. if (error)
  939. return error;
  940. return 0;
  941. }
  942. /*
  943. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  944. * @this: move this
  945. * @to: to here
  946. *
  947. * Follow @to back to the root and make sure we don't encounter @this
  948. * Assumes we already hold the rename lock.
  949. *
  950. * Returns: errno
  951. */
  952. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  953. {
  954. struct inode *dir = &to->i_inode;
  955. struct super_block *sb = dir->i_sb;
  956. struct inode *tmp;
  957. struct qstr dotdot;
  958. int error = 0;
  959. gfs2_str2qstr(&dotdot, "..");
  960. igrab(dir);
  961. for (;;) {
  962. if (dir == &this->i_inode) {
  963. error = -EINVAL;
  964. break;
  965. }
  966. if (dir == sb->s_root->d_inode) {
  967. error = 0;
  968. break;
  969. }
  970. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  971. if (IS_ERR(tmp)) {
  972. error = PTR_ERR(tmp);
  973. break;
  974. }
  975. iput(dir);
  976. dir = tmp;
  977. }
  978. iput(dir);
  979. return error;
  980. }
  981. /**
  982. * gfs2_readlinki - return the contents of a symlink
  983. * @ip: the symlink's inode
  984. * @buf: a pointer to the buffer to be filled
  985. * @len: a pointer to the length of @buf
  986. *
  987. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  988. * to be freed by the caller.
  989. *
  990. * Returns: errno
  991. */
  992. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  993. {
  994. struct gfs2_holder i_gh;
  995. struct buffer_head *dibh;
  996. unsigned int x;
  997. int error;
  998. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  999. error = gfs2_glock_nq_atime(&i_gh);
  1000. if (error) {
  1001. gfs2_holder_uninit(&i_gh);
  1002. return error;
  1003. }
  1004. if (!ip->i_di.di_size) {
  1005. gfs2_consist_inode(ip);
  1006. error = -EIO;
  1007. goto out;
  1008. }
  1009. error = gfs2_meta_inode_buffer(ip, &dibh);
  1010. if (error)
  1011. goto out;
  1012. x = ip->i_di.di_size + 1;
  1013. if (x > *len) {
  1014. *buf = kmalloc(x, GFP_KERNEL);
  1015. if (!*buf) {
  1016. error = -ENOMEM;
  1017. goto out_brelse;
  1018. }
  1019. }
  1020. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  1021. *len = x;
  1022. out_brelse:
  1023. brelse(dibh);
  1024. out:
  1025. gfs2_glock_dq_uninit(&i_gh);
  1026. return error;
  1027. }
  1028. /**
  1029. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  1030. * conditionally update the inode's atime
  1031. * @gh: the holder to acquire
  1032. *
  1033. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  1034. * Update if the difference between the current time and the inode's current
  1035. * atime is greater than an interval specified at mount.
  1036. *
  1037. * Returns: errno
  1038. */
  1039. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  1040. {
  1041. struct gfs2_glock *gl = gh->gh_gl;
  1042. struct gfs2_sbd *sdp = gl->gl_sbd;
  1043. struct gfs2_inode *ip = gl->gl_object;
  1044. s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  1045. unsigned int state;
  1046. int flags;
  1047. int error;
  1048. struct timespec tv = CURRENT_TIME;
  1049. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  1050. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1051. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1052. return -EINVAL;
  1053. state = gh->gh_state;
  1054. flags = gh->gh_flags;
  1055. error = gfs2_glock_nq(gh);
  1056. if (error)
  1057. return error;
  1058. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1059. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1060. return 0;
  1061. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1062. gfs2_glock_dq(gh);
  1063. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1064. gh);
  1065. error = gfs2_glock_nq(gh);
  1066. if (error)
  1067. return error;
  1068. /* Verify that atime hasn't been updated while we were
  1069. trying to get exclusive lock. */
  1070. tv = CURRENT_TIME;
  1071. if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
  1072. struct buffer_head *dibh;
  1073. struct gfs2_dinode *di;
  1074. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1075. if (error == -EROFS)
  1076. return 0;
  1077. if (error)
  1078. goto fail;
  1079. error = gfs2_meta_inode_buffer(ip, &dibh);
  1080. if (error)
  1081. goto fail_end_trans;
  1082. ip->i_inode.i_atime = tv;
  1083. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1084. di = (struct gfs2_dinode *)dibh->b_data;
  1085. di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1086. di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1087. brelse(dibh);
  1088. gfs2_trans_end(sdp);
  1089. }
  1090. /* If someone else has asked for the glock,
  1091. unlock and let them have it. Then reacquire
  1092. in the original state. */
  1093. if (gfs2_glock_is_blocking(gl)) {
  1094. gfs2_glock_dq(gh);
  1095. gfs2_holder_reinit(state, flags, gh);
  1096. return gfs2_glock_nq(gh);
  1097. }
  1098. }
  1099. return 0;
  1100. fail_end_trans:
  1101. gfs2_trans_end(sdp);
  1102. fail:
  1103. gfs2_glock_dq(gh);
  1104. return error;
  1105. }
  1106. static int
  1107. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1108. {
  1109. struct buffer_head *dibh;
  1110. int error;
  1111. error = gfs2_meta_inode_buffer(ip, &dibh);
  1112. if (!error) {
  1113. error = inode_setattr(&ip->i_inode, attr);
  1114. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1115. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1116. gfs2_dinode_out(ip, dibh->b_data);
  1117. brelse(dibh);
  1118. }
  1119. return error;
  1120. }
  1121. /**
  1122. * gfs2_setattr_simple -
  1123. * @ip:
  1124. * @attr:
  1125. *
  1126. * Called with a reference on the vnode.
  1127. *
  1128. * Returns: errno
  1129. */
  1130. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1131. {
  1132. int error;
  1133. if (current->journal_info)
  1134. return __gfs2_setattr_simple(ip, attr);
  1135. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1136. if (error)
  1137. return error;
  1138. error = __gfs2_setattr_simple(ip, attr);
  1139. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1140. return error;
  1141. }
  1142. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  1143. {
  1144. const struct gfs2_dinode_host *di = &ip->i_di;
  1145. struct gfs2_dinode *str = buf;
  1146. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  1147. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  1148. str->di_header.__pad0 = 0;
  1149. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  1150. str->di_header.__pad1 = 0;
  1151. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  1152. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  1153. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  1154. str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
  1155. str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
  1156. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  1157. str->di_size = cpu_to_be64(di->di_size);
  1158. str->di_blocks = cpu_to_be64(di->di_blocks);
  1159. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  1160. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  1161. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  1162. str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
  1163. str->di_goal_data = cpu_to_be64(di->di_goal_data);
  1164. str->di_generation = cpu_to_be64(di->di_generation);
  1165. str->di_flags = cpu_to_be32(di->di_flags);
  1166. str->di_height = cpu_to_be16(di->di_height);
  1167. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  1168. !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
  1169. GFS2_FORMAT_DE : 0);
  1170. str->di_depth = cpu_to_be16(di->di_depth);
  1171. str->di_entries = cpu_to_be32(di->di_entries);
  1172. str->di_eattr = cpu_to_be64(di->di_eattr);
  1173. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  1174. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  1175. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  1176. }
  1177. void gfs2_dinode_print(const struct gfs2_inode *ip)
  1178. {
  1179. const struct gfs2_dinode_host *di = &ip->i_di;
  1180. printk(KERN_INFO " no_formal_ino = %llu\n",
  1181. (unsigned long long)ip->i_no_formal_ino);
  1182. printk(KERN_INFO " no_addr = %llu\n",
  1183. (unsigned long long)ip->i_no_addr);
  1184. printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
  1185. printk(KERN_INFO " di_blocks = %llu\n",
  1186. (unsigned long long)di->di_blocks);
  1187. printk(KERN_INFO " di_goal_meta = %llu\n",
  1188. (unsigned long long)di->di_goal_meta);
  1189. printk(KERN_INFO " di_goal_data = %llu\n",
  1190. (unsigned long long)di->di_goal_data);
  1191. printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
  1192. printk(KERN_INFO " di_height = %u\n", di->di_height);
  1193. printk(KERN_INFO " di_depth = %u\n", di->di_depth);
  1194. printk(KERN_INFO " di_entries = %u\n", di->di_entries);
  1195. printk(KERN_INFO " di_eattr = %llu\n",
  1196. (unsigned long long)di->di_eattr);
  1197. }