inode.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/crc32.h>
  18. #include <linux/lm_interface.h>
  19. #include <linux/security.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "acl.h"
  23. #include "bmap.h"
  24. #include "dir.h"
  25. #include "eattr.h"
  26. #include "glock.h"
  27. #include "glops.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "ops_address.h"
  32. #include "ops_file.h"
  33. #include "ops_inode.h"
  34. #include "quota.h"
  35. #include "rgrp.h"
  36. #include "trans.h"
  37. #include "util.h"
  38. /**
  39. * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
  40. * @ip: The GFS2 inode (with embedded disk inode data)
  41. * @inode: The Linux VFS inode
  42. *
  43. */
  44. void gfs2_inode_attr_in(struct gfs2_inode *ip)
  45. {
  46. struct inode *inode = &ip->i_inode;
  47. struct gfs2_dinode_host *di = &ip->i_di;
  48. inode->i_ino = ip->i_num.no_addr;
  49. inode->i_nlink = di->di_nlink;
  50. inode->i_uid = di->di_uid;
  51. inode->i_gid = di->di_gid;
  52. i_size_write(inode, di->di_size);
  53. inode->i_atime.tv_sec = di->di_atime;
  54. inode->i_mtime.tv_sec = di->di_mtime;
  55. inode->i_ctime.tv_sec = di->di_ctime;
  56. inode->i_atime.tv_nsec = 0;
  57. inode->i_mtime.tv_nsec = 0;
  58. inode->i_ctime.tv_nsec = 0;
  59. inode->i_blocks = di->di_blocks <<
  60. (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
  61. if (di->di_flags & GFS2_DIF_IMMUTABLE)
  62. inode->i_flags |= S_IMMUTABLE;
  63. else
  64. inode->i_flags &= ~S_IMMUTABLE;
  65. if (di->di_flags & GFS2_DIF_APPENDONLY)
  66. inode->i_flags |= S_APPEND;
  67. else
  68. inode->i_flags &= ~S_APPEND;
  69. }
  70. /**
  71. * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
  72. * @ip: The GFS2 inode
  73. *
  74. * Only copy out the attributes that we want the VFS layer
  75. * to be able to modify.
  76. */
  77. void gfs2_inode_attr_out(struct gfs2_inode *ip)
  78. {
  79. struct inode *inode = &ip->i_inode;
  80. struct gfs2_dinode_host *di = &ip->i_di;
  81. di->di_uid = inode->i_uid;
  82. di->di_gid = inode->i_gid;
  83. di->di_atime = inode->i_atime.tv_sec;
  84. di->di_mtime = inode->i_mtime.tv_sec;
  85. di->di_ctime = inode->i_ctime.tv_sec;
  86. }
  87. static int iget_test(struct inode *inode, void *opaque)
  88. {
  89. struct gfs2_inode *ip = GFS2_I(inode);
  90. struct gfs2_inum_host *inum = opaque;
  91. if (ip && ip->i_num.no_addr == inum->no_addr)
  92. return 1;
  93. return 0;
  94. }
  95. static int iget_set(struct inode *inode, void *opaque)
  96. {
  97. struct gfs2_inode *ip = GFS2_I(inode);
  98. struct gfs2_inum_host *inum = opaque;
  99. ip->i_num = *inum;
  100. return 0;
  101. }
  102. struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
  103. {
  104. return ilookup5(sb, (unsigned long)inum->no_formal_ino,
  105. iget_test, inum);
  106. }
  107. static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
  108. {
  109. return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
  110. iget_test, iget_set, inum);
  111. }
  112. /**
  113. * gfs2_inode_lookup - Lookup an inode
  114. * @sb: The super block
  115. * @inum: The inode number
  116. * @type: The type of the inode
  117. *
  118. * Returns: A VFS inode, or an error
  119. */
  120. struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
  121. {
  122. struct inode *inode = gfs2_iget(sb, inum);
  123. struct gfs2_inode *ip = GFS2_I(inode);
  124. struct gfs2_glock *io_gl;
  125. int error;
  126. if (!inode)
  127. return ERR_PTR(-ENOBUFS);
  128. if (inode->i_state & I_NEW) {
  129. struct gfs2_sbd *sdp = GFS2_SB(inode);
  130. umode_t mode = DT2IF(type);
  131. inode->i_private = ip;
  132. inode->i_mode = mode;
  133. if (S_ISREG(mode)) {
  134. inode->i_op = &gfs2_file_iops;
  135. inode->i_fop = &gfs2_file_fops;
  136. inode->i_mapping->a_ops = &gfs2_file_aops;
  137. } else if (S_ISDIR(mode)) {
  138. inode->i_op = &gfs2_dir_iops;
  139. inode->i_fop = &gfs2_dir_fops;
  140. } else if (S_ISLNK(mode)) {
  141. inode->i_op = &gfs2_symlink_iops;
  142. } else {
  143. inode->i_op = &gfs2_dev_iops;
  144. }
  145. error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
  146. if (unlikely(error))
  147. goto fail;
  148. ip->i_gl->gl_object = ip;
  149. error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
  150. if (unlikely(error))
  151. goto fail_put;
  152. ip->i_vn = ip->i_gl->gl_vn - 1;
  153. error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
  154. if (unlikely(error))
  155. goto fail_iopen;
  156. gfs2_glock_put(io_gl);
  157. unlock_new_inode(inode);
  158. }
  159. return inode;
  160. fail_iopen:
  161. gfs2_glock_put(io_gl);
  162. fail_put:
  163. ip->i_gl->gl_object = NULL;
  164. gfs2_glock_put(ip->i_gl);
  165. fail:
  166. iput(inode);
  167. return ERR_PTR(error);
  168. }
  169. static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
  170. {
  171. struct gfs2_dinode_host *di = &ip->i_di;
  172. const struct gfs2_dinode *str = buf;
  173. if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
  174. if (gfs2_consist_inode(ip))
  175. gfs2_dinode_print(ip);
  176. return -EIO;
  177. }
  178. if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
  179. return -ESTALE;
  180. ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
  181. ip->i_inode.i_rdev = 0;
  182. switch (ip->i_inode.i_mode & S_IFMT) {
  183. case S_IFBLK:
  184. case S_IFCHR:
  185. ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
  186. be32_to_cpu(str->di_minor));
  187. break;
  188. };
  189. di->di_uid = be32_to_cpu(str->di_uid);
  190. di->di_gid = be32_to_cpu(str->di_gid);
  191. di->di_nlink = be32_to_cpu(str->di_nlink);
  192. di->di_size = be64_to_cpu(str->di_size);
  193. di->di_blocks = be64_to_cpu(str->di_blocks);
  194. di->di_atime = be64_to_cpu(str->di_atime);
  195. di->di_mtime = be64_to_cpu(str->di_mtime);
  196. di->di_ctime = be64_to_cpu(str->di_ctime);
  197. di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
  198. di->di_goal_data = be64_to_cpu(str->di_goal_data);
  199. di->di_generation = be64_to_cpu(str->di_generation);
  200. di->di_flags = be32_to_cpu(str->di_flags);
  201. di->di_payload_format = be32_to_cpu(str->di_payload_format);
  202. di->di_height = be16_to_cpu(str->di_height);
  203. di->di_depth = be16_to_cpu(str->di_depth);
  204. di->di_entries = be32_to_cpu(str->di_entries);
  205. di->di_eattr = be64_to_cpu(str->di_eattr);
  206. return 0;
  207. }
  208. /**
  209. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  210. * @ip: The GFS2 inode
  211. *
  212. * Returns: errno
  213. */
  214. int gfs2_inode_refresh(struct gfs2_inode *ip)
  215. {
  216. struct buffer_head *dibh;
  217. int error;
  218. error = gfs2_meta_inode_buffer(ip, &dibh);
  219. if (error)
  220. return error;
  221. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
  222. brelse(dibh);
  223. return -EIO;
  224. }
  225. error = gfs2_dinode_in(ip, dibh->b_data);
  226. brelse(dibh);
  227. ip->i_vn = ip->i_gl->gl_vn;
  228. return error;
  229. }
  230. int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  231. {
  232. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  233. struct gfs2_alloc *al;
  234. struct gfs2_rgrpd *rgd;
  235. int error;
  236. if (ip->i_di.di_blocks != 1) {
  237. if (gfs2_consist_inode(ip))
  238. gfs2_dinode_print(ip);
  239. return -EIO;
  240. }
  241. al = gfs2_alloc_get(ip);
  242. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  243. if (error)
  244. goto out;
  245. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  246. if (error)
  247. goto out_qs;
  248. rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
  249. if (!rgd) {
  250. gfs2_consist_inode(ip);
  251. error = -EIO;
  252. goto out_rindex_relse;
  253. }
  254. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  255. &al->al_rgd_gh);
  256. if (error)
  257. goto out_rindex_relse;
  258. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
  259. if (error)
  260. goto out_rg_gunlock;
  261. gfs2_trans_add_gl(ip->i_gl);
  262. gfs2_free_di(rgd, ip);
  263. gfs2_trans_end(sdp);
  264. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  265. out_rg_gunlock:
  266. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  267. out_rindex_relse:
  268. gfs2_glock_dq_uninit(&al->al_ri_gh);
  269. out_qs:
  270. gfs2_quota_unhold(ip);
  271. out:
  272. gfs2_alloc_put(ip);
  273. return error;
  274. }
  275. /**
  276. * gfs2_change_nlink - Change nlink count on inode
  277. * @ip: The GFS2 inode
  278. * @diff: The change in the nlink count required
  279. *
  280. * Returns: errno
  281. */
  282. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  283. {
  284. struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
  285. struct buffer_head *dibh;
  286. u32 nlink;
  287. int error;
  288. BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink);
  289. nlink = ip->i_di.di_nlink + diff;
  290. /* If we are reducing the nlink count, but the new value ends up being
  291. bigger than the old one, we must have underflowed. */
  292. if (diff < 0 && nlink > ip->i_di.di_nlink) {
  293. if (gfs2_consist_inode(ip))
  294. gfs2_dinode_print(ip);
  295. return -EIO;
  296. }
  297. error = gfs2_meta_inode_buffer(ip, &dibh);
  298. if (error)
  299. return error;
  300. ip->i_di.di_nlink = nlink;
  301. ip->i_di.di_ctime = get_seconds();
  302. ip->i_inode.i_nlink = nlink;
  303. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  304. gfs2_dinode_out(ip, dibh->b_data);
  305. brelse(dibh);
  306. mark_inode_dirty(&ip->i_inode);
  307. if (ip->i_di.di_nlink == 0) {
  308. struct gfs2_rgrpd *rgd;
  309. struct gfs2_holder ri_gh, rg_gh;
  310. error = gfs2_rindex_hold(sdp, &ri_gh);
  311. if (error)
  312. goto out;
  313. error = -EIO;
  314. rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
  315. if (!rgd)
  316. goto out_norgrp;
  317. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
  318. if (error)
  319. goto out_norgrp;
  320. clear_nlink(&ip->i_inode);
  321. gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
  322. gfs2_glock_dq_uninit(&rg_gh);
  323. out_norgrp:
  324. gfs2_glock_dq_uninit(&ri_gh);
  325. }
  326. out:
  327. return error;
  328. }
  329. struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
  330. {
  331. struct qstr qstr;
  332. gfs2_str2qstr(&qstr, name);
  333. return gfs2_lookupi(dip, &qstr, 1, NULL);
  334. }
  335. /**
  336. * gfs2_lookupi - Look up a filename in a directory and return its inode
  337. * @d_gh: An initialized holder for the directory glock
  338. * @name: The name of the inode to look for
  339. * @is_root: If 1, ignore the caller's permissions
  340. * @i_gh: An uninitialized holder for the new inode glock
  341. *
  342. * There will always be a vnode (Linux VFS inode) for the d_gh inode unless
  343. * @is_root is true.
  344. *
  345. * Returns: errno
  346. */
  347. struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
  348. int is_root, struct nameidata *nd)
  349. {
  350. struct super_block *sb = dir->i_sb;
  351. struct gfs2_inode *dip = GFS2_I(dir);
  352. struct gfs2_holder d_gh;
  353. struct gfs2_inum_host inum;
  354. unsigned int type;
  355. int error = 0;
  356. struct inode *inode = NULL;
  357. if (!name->len || name->len > GFS2_FNAMESIZE)
  358. return ERR_PTR(-ENAMETOOLONG);
  359. if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
  360. (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
  361. dir == sb->s_root->d_inode)) {
  362. igrab(dir);
  363. return dir;
  364. }
  365. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  366. if (error)
  367. return ERR_PTR(error);
  368. if (!is_root) {
  369. error = permission(dir, MAY_EXEC, NULL);
  370. if (error)
  371. goto out;
  372. }
  373. error = gfs2_dir_search(dir, name, &inum, &type);
  374. if (error)
  375. goto out;
  376. inode = gfs2_inode_lookup(sb, &inum, type);
  377. out:
  378. gfs2_glock_dq_uninit(&d_gh);
  379. if (error == -ENOENT)
  380. return NULL;
  381. return inode;
  382. }
  383. static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
  384. {
  385. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  386. struct buffer_head *bh;
  387. struct gfs2_inum_range_host ir;
  388. int error;
  389. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  390. if (error)
  391. return error;
  392. mutex_lock(&sdp->sd_inum_mutex);
  393. error = gfs2_meta_inode_buffer(ip, &bh);
  394. if (error) {
  395. mutex_unlock(&sdp->sd_inum_mutex);
  396. gfs2_trans_end(sdp);
  397. return error;
  398. }
  399. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  400. if (ir.ir_length) {
  401. *formal_ino = ir.ir_start++;
  402. ir.ir_length--;
  403. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  404. gfs2_inum_range_out(&ir,
  405. bh->b_data + sizeof(struct gfs2_dinode));
  406. brelse(bh);
  407. mutex_unlock(&sdp->sd_inum_mutex);
  408. gfs2_trans_end(sdp);
  409. return 0;
  410. }
  411. brelse(bh);
  412. mutex_unlock(&sdp->sd_inum_mutex);
  413. gfs2_trans_end(sdp);
  414. return 1;
  415. }
  416. static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
  417. {
  418. struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
  419. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
  420. struct gfs2_holder gh;
  421. struct buffer_head *bh;
  422. struct gfs2_inum_range_host ir;
  423. int error;
  424. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  425. if (error)
  426. return error;
  427. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  428. if (error)
  429. goto out;
  430. mutex_lock(&sdp->sd_inum_mutex);
  431. error = gfs2_meta_inode_buffer(ip, &bh);
  432. if (error)
  433. goto out_end_trans;
  434. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  435. if (!ir.ir_length) {
  436. struct buffer_head *m_bh;
  437. u64 x, y;
  438. __be64 z;
  439. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  440. if (error)
  441. goto out_brelse;
  442. z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  443. x = y = be64_to_cpu(z);
  444. ir.ir_start = x;
  445. ir.ir_length = GFS2_INUM_QUANTUM;
  446. x += GFS2_INUM_QUANTUM;
  447. if (x < y)
  448. gfs2_consist_inode(m_ip);
  449. z = cpu_to_be64(x);
  450. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  451. *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
  452. brelse(m_bh);
  453. }
  454. *formal_ino = ir.ir_start++;
  455. ir.ir_length--;
  456. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  457. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  458. out_brelse:
  459. brelse(bh);
  460. out_end_trans:
  461. mutex_unlock(&sdp->sd_inum_mutex);
  462. gfs2_trans_end(sdp);
  463. out:
  464. gfs2_glock_dq_uninit(&gh);
  465. return error;
  466. }
  467. static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
  468. {
  469. int error;
  470. error = pick_formal_ino_1(sdp, inum);
  471. if (error <= 0)
  472. return error;
  473. error = pick_formal_ino_2(sdp, inum);
  474. return error;
  475. }
  476. /**
  477. * create_ok - OK to create a new on-disk inode here?
  478. * @dip: Directory in which dinode is to be created
  479. * @name: Name of new dinode
  480. * @mode:
  481. *
  482. * Returns: errno
  483. */
  484. static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
  485. unsigned int mode)
  486. {
  487. int error;
  488. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  489. if (error)
  490. return error;
  491. /* Don't create entries in an unlinked directory */
  492. if (!dip->i_di.di_nlink)
  493. return -EPERM;
  494. error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
  495. switch (error) {
  496. case -ENOENT:
  497. error = 0;
  498. break;
  499. case 0:
  500. return -EEXIST;
  501. default:
  502. return error;
  503. }
  504. if (dip->i_di.di_entries == (u32)-1)
  505. return -EFBIG;
  506. if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1)
  507. return -EMLINK;
  508. return 0;
  509. }
  510. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  511. unsigned int *uid, unsigned int *gid)
  512. {
  513. if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
  514. (dip->i_inode.i_mode & S_ISUID) && dip->i_di.di_uid) {
  515. if (S_ISDIR(*mode))
  516. *mode |= S_ISUID;
  517. else if (dip->i_di.di_uid != current->fsuid)
  518. *mode &= ~07111;
  519. *uid = dip->i_di.di_uid;
  520. } else
  521. *uid = current->fsuid;
  522. if (dip->i_inode.i_mode & S_ISGID) {
  523. if (S_ISDIR(*mode))
  524. *mode |= S_ISGID;
  525. *gid = dip->i_di.di_gid;
  526. } else
  527. *gid = current->fsgid;
  528. }
  529. static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
  530. u64 *generation)
  531. {
  532. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  533. int error;
  534. gfs2_alloc_get(dip);
  535. dip->i_alloc.al_requested = RES_DINODE;
  536. error = gfs2_inplace_reserve(dip);
  537. if (error)
  538. goto out;
  539. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
  540. if (error)
  541. goto out_ipreserv;
  542. inum->no_addr = gfs2_alloc_di(dip, generation);
  543. gfs2_trans_end(sdp);
  544. out_ipreserv:
  545. gfs2_inplace_release(dip);
  546. out:
  547. gfs2_alloc_put(dip);
  548. return error;
  549. }
  550. /**
  551. * init_dinode - Fill in a new dinode structure
  552. * @dip: the directory this inode is being created in
  553. * @gl: The glock covering the new inode
  554. * @inum: the inode number
  555. * @mode: the file permissions
  556. * @uid:
  557. * @gid:
  558. *
  559. */
  560. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  561. const struct gfs2_inum_host *inum, unsigned int mode,
  562. unsigned int uid, unsigned int gid,
  563. const u64 *generation, dev_t dev)
  564. {
  565. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  566. struct gfs2_dinode *di;
  567. struct buffer_head *dibh;
  568. dibh = gfs2_meta_new(gl, inum->no_addr);
  569. gfs2_trans_add_bh(gl, dibh, 1);
  570. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  571. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  572. di = (struct gfs2_dinode *)dibh->b_data;
  573. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  574. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  575. di->di_mode = cpu_to_be32(mode);
  576. di->di_uid = cpu_to_be32(uid);
  577. di->di_gid = cpu_to_be32(gid);
  578. di->di_nlink = cpu_to_be32(0);
  579. di->di_size = cpu_to_be64(0);
  580. di->di_blocks = cpu_to_be64(1);
  581. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
  582. di->di_major = cpu_to_be32(MAJOR(dev));
  583. di->di_minor = cpu_to_be32(MINOR(dev));
  584. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  585. di->di_generation = cpu_to_be64(*generation);
  586. di->di_flags = cpu_to_be32(0);
  587. if (S_ISREG(mode)) {
  588. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  589. gfs2_tune_get(sdp, gt_new_files_jdata))
  590. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  591. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  592. gfs2_tune_get(sdp, gt_new_files_directio))
  593. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  594. } else if (S_ISDIR(mode)) {
  595. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  596. GFS2_DIF_INHERIT_DIRECTIO);
  597. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  598. GFS2_DIF_INHERIT_JDATA);
  599. }
  600. di->__pad1 = 0;
  601. di->di_payload_format = cpu_to_be32(0);
  602. di->di_height = cpu_to_be32(0);
  603. di->__pad2 = 0;
  604. di->__pad3 = 0;
  605. di->di_depth = cpu_to_be16(0);
  606. di->di_entries = cpu_to_be32(0);
  607. memset(&di->__pad4, 0, sizeof(di->__pad4));
  608. di->di_eattr = cpu_to_be64(0);
  609. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  610. brelse(dibh);
  611. }
  612. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  613. unsigned int mode, const struct gfs2_inum_host *inum,
  614. const u64 *generation, dev_t dev)
  615. {
  616. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  617. unsigned int uid, gid;
  618. int error;
  619. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  620. gfs2_alloc_get(dip);
  621. error = gfs2_quota_lock(dip, uid, gid);
  622. if (error)
  623. goto out;
  624. error = gfs2_quota_check(dip, uid, gid);
  625. if (error)
  626. goto out_quota;
  627. error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
  628. if (error)
  629. goto out_quota;
  630. init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
  631. gfs2_quota_change(dip, +1, uid, gid);
  632. gfs2_trans_end(sdp);
  633. out_quota:
  634. gfs2_quota_unlock(dip);
  635. out:
  636. gfs2_alloc_put(dip);
  637. return error;
  638. }
  639. static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
  640. struct gfs2_inode *ip)
  641. {
  642. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  643. struct gfs2_alloc *al;
  644. int alloc_required;
  645. struct buffer_head *dibh;
  646. int error;
  647. al = gfs2_alloc_get(dip);
  648. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  649. if (error)
  650. goto fail;
  651. error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
  652. if (alloc_required < 0)
  653. goto fail;
  654. if (alloc_required) {
  655. error = gfs2_quota_check(dip, dip->i_di.di_uid,
  656. dip->i_di.di_gid);
  657. if (error)
  658. goto fail_quota_locks;
  659. al->al_requested = sdp->sd_max_dirres;
  660. error = gfs2_inplace_reserve(dip);
  661. if (error)
  662. goto fail_quota_locks;
  663. error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
  664. al->al_rgd->rd_ri.ri_length +
  665. 2 * RES_DINODE +
  666. RES_STATFS + RES_QUOTA, 0);
  667. if (error)
  668. goto fail_ipreserv;
  669. } else {
  670. error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
  671. if (error)
  672. goto fail_quota_locks;
  673. }
  674. error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
  675. if (error)
  676. goto fail_end_trans;
  677. error = gfs2_meta_inode_buffer(ip, &dibh);
  678. if (error)
  679. goto fail_end_trans;
  680. ip->i_di.di_nlink = 1;
  681. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  682. gfs2_dinode_out(ip, dibh->b_data);
  683. brelse(dibh);
  684. return 0;
  685. fail_end_trans:
  686. gfs2_trans_end(sdp);
  687. fail_ipreserv:
  688. if (dip->i_alloc.al_rgd)
  689. gfs2_inplace_release(dip);
  690. fail_quota_locks:
  691. gfs2_quota_unlock(dip);
  692. fail:
  693. gfs2_alloc_put(dip);
  694. return error;
  695. }
  696. static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  697. {
  698. int err;
  699. size_t len;
  700. void *value;
  701. char *name;
  702. struct gfs2_ea_request er;
  703. err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
  704. &name, &value, &len);
  705. if (err) {
  706. if (err == -EOPNOTSUPP)
  707. return 0;
  708. return err;
  709. }
  710. memset(&er, 0, sizeof(struct gfs2_ea_request));
  711. er.er_type = GFS2_EATYPE_SECURITY;
  712. er.er_name = name;
  713. er.er_data = value;
  714. er.er_name_len = strlen(name);
  715. er.er_data_len = len;
  716. err = gfs2_ea_set_i(ip, &er);
  717. kfree(value);
  718. kfree(name);
  719. return err;
  720. }
  721. /**
  722. * gfs2_createi - Create a new inode
  723. * @ghs: An array of two holders
  724. * @name: The name of the new file
  725. * @mode: the permissions on the new inode
  726. *
  727. * @ghs[0] is an initialized holder for the directory
  728. * @ghs[1] is the holder for the inode lock
  729. *
  730. * If the return value is not NULL, the glocks on both the directory and the new
  731. * file are held. A transaction has been started and an inplace reservation
  732. * is held, as well.
  733. *
  734. * Returns: An inode
  735. */
  736. struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
  737. unsigned int mode, dev_t dev)
  738. {
  739. struct inode *inode;
  740. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  741. struct inode *dir = &dip->i_inode;
  742. struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
  743. struct gfs2_inum_host inum;
  744. int error;
  745. u64 generation;
  746. if (!name->len || name->len > GFS2_FNAMESIZE)
  747. return ERR_PTR(-ENAMETOOLONG);
  748. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  749. error = gfs2_glock_nq(ghs);
  750. if (error)
  751. goto fail;
  752. error = create_ok(dip, name, mode);
  753. if (error)
  754. goto fail_gunlock;
  755. error = pick_formal_ino(sdp, &inum.no_formal_ino);
  756. if (error)
  757. goto fail_gunlock;
  758. error = alloc_dinode(dip, &inum, &generation);
  759. if (error)
  760. goto fail_gunlock;
  761. if (inum.no_addr < dip->i_num.no_addr) {
  762. gfs2_glock_dq(ghs);
  763. error = gfs2_glock_nq_num(sdp, inum.no_addr,
  764. &gfs2_inode_glops, LM_ST_EXCLUSIVE,
  765. GL_SKIP, ghs + 1);
  766. if (error) {
  767. return ERR_PTR(error);
  768. }
  769. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  770. error = gfs2_glock_nq(ghs);
  771. if (error) {
  772. gfs2_glock_dq_uninit(ghs + 1);
  773. return ERR_PTR(error);
  774. }
  775. error = create_ok(dip, name, mode);
  776. if (error)
  777. goto fail_gunlock2;
  778. } else {
  779. error = gfs2_glock_nq_num(sdp, inum.no_addr,
  780. &gfs2_inode_glops, LM_ST_EXCLUSIVE,
  781. GL_SKIP, ghs + 1);
  782. if (error)
  783. goto fail_gunlock;
  784. }
  785. error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
  786. if (error)
  787. goto fail_gunlock2;
  788. inode = gfs2_inode_lookup(dir->i_sb, &inum, IF2DT(mode));
  789. if (IS_ERR(inode))
  790. goto fail_gunlock2;
  791. error = gfs2_inode_refresh(GFS2_I(inode));
  792. if (error)
  793. goto fail_iput;
  794. error = gfs2_acl_create(dip, GFS2_I(inode));
  795. if (error)
  796. goto fail_iput;
  797. error = gfs2_security_init(dip, GFS2_I(inode));
  798. if (error)
  799. goto fail_iput;
  800. error = link_dinode(dip, name, GFS2_I(inode));
  801. if (error)
  802. goto fail_iput;
  803. if (!inode)
  804. return ERR_PTR(-ENOMEM);
  805. return inode;
  806. fail_iput:
  807. iput(inode);
  808. fail_gunlock2:
  809. gfs2_glock_dq_uninit(ghs + 1);
  810. fail_gunlock:
  811. gfs2_glock_dq(ghs);
  812. fail:
  813. return ERR_PTR(error);
  814. }
  815. /**
  816. * gfs2_rmdiri - Remove a directory
  817. * @dip: The parent directory of the directory to be removed
  818. * @name: The name of the directory to be removed
  819. * @ip: The GFS2 inode of the directory to be removed
  820. *
  821. * Assumes Glocks on dip and ip are held
  822. *
  823. * Returns: errno
  824. */
  825. int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
  826. struct gfs2_inode *ip)
  827. {
  828. struct qstr dotname;
  829. int error;
  830. if (ip->i_di.di_entries != 2) {
  831. if (gfs2_consist_inode(ip))
  832. gfs2_dinode_print(ip);
  833. return -EIO;
  834. }
  835. error = gfs2_dir_del(dip, name);
  836. if (error)
  837. return error;
  838. error = gfs2_change_nlink(dip, -1);
  839. if (error)
  840. return error;
  841. gfs2_str2qstr(&dotname, ".");
  842. error = gfs2_dir_del(ip, &dotname);
  843. if (error)
  844. return error;
  845. gfs2_str2qstr(&dotname, "..");
  846. error = gfs2_dir_del(ip, &dotname);
  847. if (error)
  848. return error;
  849. error = gfs2_change_nlink(ip, -2);
  850. if (error)
  851. return error;
  852. return error;
  853. }
  854. /*
  855. * gfs2_unlink_ok - check to see that a inode is still in a directory
  856. * @dip: the directory
  857. * @name: the name of the file
  858. * @ip: the inode
  859. *
  860. * Assumes that the lock on (at least) @dip is held.
  861. *
  862. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  863. */
  864. int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
  865. struct gfs2_inode *ip)
  866. {
  867. struct gfs2_inum_host inum;
  868. unsigned int type;
  869. int error;
  870. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  871. return -EPERM;
  872. if ((dip->i_inode.i_mode & S_ISVTX) &&
  873. dip->i_di.di_uid != current->fsuid &&
  874. ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER))
  875. return -EPERM;
  876. if (IS_APPEND(&dip->i_inode))
  877. return -EPERM;
  878. error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
  879. if (error)
  880. return error;
  881. error = gfs2_dir_search(&dip->i_inode, name, &inum, &type);
  882. if (error)
  883. return error;
  884. if (!gfs2_inum_equal(&inum, &ip->i_num))
  885. return -ENOENT;
  886. if (IF2DT(ip->i_inode.i_mode) != type) {
  887. gfs2_consist_inode(dip);
  888. return -EIO;
  889. }
  890. return 0;
  891. }
  892. /*
  893. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  894. * @this: move this
  895. * @to: to here
  896. *
  897. * Follow @to back to the root and make sure we don't encounter @this
  898. * Assumes we already hold the rename lock.
  899. *
  900. * Returns: errno
  901. */
  902. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  903. {
  904. struct inode *dir = &to->i_inode;
  905. struct super_block *sb = dir->i_sb;
  906. struct inode *tmp;
  907. struct qstr dotdot;
  908. int error = 0;
  909. gfs2_str2qstr(&dotdot, "..");
  910. igrab(dir);
  911. for (;;) {
  912. if (dir == &this->i_inode) {
  913. error = -EINVAL;
  914. break;
  915. }
  916. if (dir == sb->s_root->d_inode) {
  917. error = 0;
  918. break;
  919. }
  920. tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
  921. if (IS_ERR(tmp)) {
  922. error = PTR_ERR(tmp);
  923. break;
  924. }
  925. iput(dir);
  926. dir = tmp;
  927. }
  928. iput(dir);
  929. return error;
  930. }
  931. /**
  932. * gfs2_readlinki - return the contents of a symlink
  933. * @ip: the symlink's inode
  934. * @buf: a pointer to the buffer to be filled
  935. * @len: a pointer to the length of @buf
  936. *
  937. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  938. * to be freed by the caller.
  939. *
  940. * Returns: errno
  941. */
  942. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  943. {
  944. struct gfs2_holder i_gh;
  945. struct buffer_head *dibh;
  946. unsigned int x;
  947. int error;
  948. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  949. error = gfs2_glock_nq_atime(&i_gh);
  950. if (error) {
  951. gfs2_holder_uninit(&i_gh);
  952. return error;
  953. }
  954. if (!ip->i_di.di_size) {
  955. gfs2_consist_inode(ip);
  956. error = -EIO;
  957. goto out;
  958. }
  959. error = gfs2_meta_inode_buffer(ip, &dibh);
  960. if (error)
  961. goto out;
  962. x = ip->i_di.di_size + 1;
  963. if (x > *len) {
  964. *buf = kmalloc(x, GFP_KERNEL);
  965. if (!*buf) {
  966. error = -ENOMEM;
  967. goto out_brelse;
  968. }
  969. }
  970. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  971. *len = x;
  972. out_brelse:
  973. brelse(dibh);
  974. out:
  975. gfs2_glock_dq_uninit(&i_gh);
  976. return error;
  977. }
  978. /**
  979. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  980. * conditionally update the inode's atime
  981. * @gh: the holder to acquire
  982. *
  983. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  984. * Update if the difference between the current time and the inode's current
  985. * atime is greater than an interval specified at mount.
  986. *
  987. * Returns: errno
  988. */
  989. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  990. {
  991. struct gfs2_glock *gl = gh->gh_gl;
  992. struct gfs2_sbd *sdp = gl->gl_sbd;
  993. struct gfs2_inode *ip = gl->gl_object;
  994. s64 curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  995. unsigned int state;
  996. int flags;
  997. int error;
  998. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  999. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1000. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1001. return -EINVAL;
  1002. state = gh->gh_state;
  1003. flags = gh->gh_flags;
  1004. error = gfs2_glock_nq(gh);
  1005. if (error)
  1006. return error;
  1007. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1008. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1009. return 0;
  1010. curtime = get_seconds();
  1011. if (curtime - ip->i_di.di_atime >= quantum) {
  1012. gfs2_glock_dq(gh);
  1013. gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
  1014. gh);
  1015. error = gfs2_glock_nq(gh);
  1016. if (error)
  1017. return error;
  1018. /* Verify that atime hasn't been updated while we were
  1019. trying to get exclusive lock. */
  1020. curtime = get_seconds();
  1021. if (curtime - ip->i_di.di_atime >= quantum) {
  1022. struct buffer_head *dibh;
  1023. struct gfs2_dinode *di;
  1024. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1025. if (error == -EROFS)
  1026. return 0;
  1027. if (error)
  1028. goto fail;
  1029. error = gfs2_meta_inode_buffer(ip, &dibh);
  1030. if (error)
  1031. goto fail_end_trans;
  1032. ip->i_di.di_atime = curtime;
  1033. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1034. di = (struct gfs2_dinode *)dibh->b_data;
  1035. di->di_atime = cpu_to_be64(ip->i_di.di_atime);
  1036. brelse(dibh);
  1037. gfs2_trans_end(sdp);
  1038. }
  1039. /* If someone else has asked for the glock,
  1040. unlock and let them have it. Then reacquire
  1041. in the original state. */
  1042. if (gfs2_glock_is_blocking(gl)) {
  1043. gfs2_glock_dq(gh);
  1044. gfs2_holder_reinit(state, flags, gh);
  1045. return gfs2_glock_nq(gh);
  1046. }
  1047. }
  1048. return 0;
  1049. fail_end_trans:
  1050. gfs2_trans_end(sdp);
  1051. fail:
  1052. gfs2_glock_dq(gh);
  1053. return error;
  1054. }
  1055. /**
  1056. * glock_compare_atime - Compare two struct gfs2_glock structures for sort
  1057. * @arg_a: the first structure
  1058. * @arg_b: the second structure
  1059. *
  1060. * Returns: 1 if A > B
  1061. * -1 if A < B
  1062. * 0 if A == B
  1063. */
  1064. static int glock_compare_atime(const void *arg_a, const void *arg_b)
  1065. {
  1066. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1067. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1068. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1069. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1070. if (a->ln_number > b->ln_number)
  1071. return 1;
  1072. if (a->ln_number < b->ln_number)
  1073. return -1;
  1074. if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
  1075. return 1;
  1076. if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
  1077. return 1;
  1078. return 0;
  1079. }
  1080. /**
  1081. * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
  1082. * atime update
  1083. * @num_gh: the number of structures
  1084. * @ghs: an array of struct gfs2_holder structures
  1085. *
  1086. * Returns: 0 on success (all glocks acquired),
  1087. * errno on failure (no glocks acquired)
  1088. */
  1089. int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
  1090. {
  1091. struct gfs2_holder **p;
  1092. unsigned int x;
  1093. int error = 0;
  1094. if (!num_gh)
  1095. return 0;
  1096. if (num_gh == 1) {
  1097. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1098. if (ghs->gh_flags & GL_ATIME)
  1099. error = gfs2_glock_nq_atime(ghs);
  1100. else
  1101. error = gfs2_glock_nq(ghs);
  1102. return error;
  1103. }
  1104. p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1105. if (!p)
  1106. return -ENOMEM;
  1107. for (x = 0; x < num_gh; x++)
  1108. p[x] = &ghs[x];
  1109. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
  1110. for (x = 0; x < num_gh; x++) {
  1111. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1112. if (p[x]->gh_flags & GL_ATIME)
  1113. error = gfs2_glock_nq_atime(p[x]);
  1114. else
  1115. error = gfs2_glock_nq(p[x]);
  1116. if (error) {
  1117. while (x--)
  1118. gfs2_glock_dq(p[x]);
  1119. break;
  1120. }
  1121. }
  1122. kfree(p);
  1123. return error;
  1124. }
  1125. static int
  1126. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1127. {
  1128. struct buffer_head *dibh;
  1129. int error;
  1130. error = gfs2_meta_inode_buffer(ip, &dibh);
  1131. if (!error) {
  1132. error = inode_setattr(&ip->i_inode, attr);
  1133. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1134. gfs2_inode_attr_out(ip);
  1135. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1136. gfs2_dinode_out(ip, dibh->b_data);
  1137. brelse(dibh);
  1138. }
  1139. return error;
  1140. }
  1141. /**
  1142. * gfs2_setattr_simple -
  1143. * @ip:
  1144. * @attr:
  1145. *
  1146. * Called with a reference on the vnode.
  1147. *
  1148. * Returns: errno
  1149. */
  1150. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1151. {
  1152. int error;
  1153. if (current->journal_info)
  1154. return __gfs2_setattr_simple(ip, attr);
  1155. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
  1156. if (error)
  1157. return error;
  1158. error = __gfs2_setattr_simple(ip, attr);
  1159. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1160. return error;
  1161. }