inode.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License v.2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/posix_acl.h>
  15. #include <linux/sort.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <asm/semaphore.h>
  18. #include "gfs2.h"
  19. #include "lm_interface.h"
  20. #include "incore.h"
  21. #include "acl.h"
  22. #include "bmap.h"
  23. #include "dir.h"
  24. #include "eattr.h"
  25. #include "glock.h"
  26. #include "glops.h"
  27. #include "inode.h"
  28. #include "log.h"
  29. #include "meta_io.h"
  30. #include "ops_address.h"
  31. #include "ops_file.h"
  32. #include "ops_inode.h"
  33. #include "quota.h"
  34. #include "rgrp.h"
  35. #include "trans.h"
  36. #include "unlinked.h"
  37. #include "util.h"
  38. /**
  39. * inode_attr_in - Copy attributes from the dinode into the VFS inode
  40. * @ip: The GFS2 inode (with embedded disk inode data)
  41. * @inode: The Linux VFS inode
  42. *
  43. */
  44. static void inode_attr_in(struct gfs2_inode *ip, struct inode *inode)
  45. {
  46. inode->i_ino = ip->i_num.no_formal_ino;
  47. switch (ip->i_di.di_mode & S_IFMT) {
  48. case S_IFBLK:
  49. case S_IFCHR:
  50. inode->i_rdev = MKDEV(ip->i_di.di_major, ip->i_di.di_minor);
  51. break;
  52. default:
  53. inode->i_rdev = 0;
  54. break;
  55. };
  56. inode->i_mode = ip->i_di.di_mode;
  57. inode->i_nlink = ip->i_di.di_nlink;
  58. inode->i_uid = ip->i_di.di_uid;
  59. inode->i_gid = ip->i_di.di_gid;
  60. i_size_write(inode, ip->i_di.di_size);
  61. inode->i_atime.tv_sec = ip->i_di.di_atime;
  62. inode->i_mtime.tv_sec = ip->i_di.di_mtime;
  63. inode->i_ctime.tv_sec = ip->i_di.di_ctime;
  64. inode->i_atime.tv_nsec = 0;
  65. inode->i_mtime.tv_nsec = 0;
  66. inode->i_ctime.tv_nsec = 0;
  67. inode->i_blksize = PAGE_SIZE;
  68. inode->i_blocks = ip->i_di.di_blocks <<
  69. (ip->i_sbd->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
  70. if (ip->i_di.di_flags & GFS2_DIF_IMMUTABLE)
  71. inode->i_flags |= S_IMMUTABLE;
  72. else
  73. inode->i_flags &= ~S_IMMUTABLE;
  74. if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY)
  75. inode->i_flags |= S_APPEND;
  76. else
  77. inode->i_flags &= ~S_APPEND;
  78. }
  79. /**
  80. * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
  81. * @ip: The GFS2 inode (with embedded disk inode data)
  82. *
  83. */
  84. void gfs2_inode_attr_in(struct gfs2_inode *ip)
  85. {
  86. struct inode *inode;
  87. inode = gfs2_ip2v_lookup(ip);
  88. if (inode) {
  89. inode_attr_in(ip, inode);
  90. iput(inode);
  91. }
  92. }
  93. /**
  94. * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
  95. * @ip: The GFS2 inode
  96. *
  97. * Only copy out the attributes that we want the VFS layer
  98. * to be able to modify.
  99. */
  100. void gfs2_inode_attr_out(struct gfs2_inode *ip)
  101. {
  102. struct inode *inode = ip->i_vnode;
  103. gfs2_assert_withdraw(ip->i_sbd,
  104. (ip->i_di.di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
  105. ip->i_di.di_mode = inode->i_mode;
  106. ip->i_di.di_uid = inode->i_uid;
  107. ip->i_di.di_gid = inode->i_gid;
  108. ip->i_di.di_atime = inode->i_atime.tv_sec;
  109. ip->i_di.di_mtime = inode->i_mtime.tv_sec;
  110. ip->i_di.di_ctime = inode->i_ctime.tv_sec;
  111. }
  112. /**
  113. * gfs2_ip2v_lookup - Get the struct inode for a struct gfs2_inode
  114. * @ip: the struct gfs2_inode to get the struct inode for
  115. *
  116. * Returns: A VFS inode, or NULL if none
  117. */
  118. struct inode *gfs2_ip2v_lookup(struct gfs2_inode *ip)
  119. {
  120. struct inode *inode = NULL;
  121. gfs2_assert_warn(ip->i_sbd, test_bit(GIF_MIN_INIT, &ip->i_flags));
  122. spin_lock(&ip->i_spin);
  123. if (ip->i_vnode)
  124. inode = igrab(ip->i_vnode);
  125. spin_unlock(&ip->i_spin);
  126. return inode;
  127. }
  128. /**
  129. * gfs2_ip2v - Get/Create a struct inode for a struct gfs2_inode
  130. * @ip: the struct gfs2_inode to get the struct inode for
  131. *
  132. * Returns: A VFS inode, or NULL if no mem
  133. */
  134. struct inode *gfs2_ip2v(struct gfs2_inode *ip)
  135. {
  136. struct inode *inode, *tmp;
  137. inode = gfs2_ip2v_lookup(ip);
  138. if (inode)
  139. return inode;
  140. tmp = new_inode(ip->i_sbd->sd_vfs);
  141. if (!tmp)
  142. return NULL;
  143. inode_attr_in(ip, tmp);
  144. if (S_ISREG(ip->i_di.di_mode)) {
  145. tmp->i_op = &gfs2_file_iops;
  146. tmp->i_fop = &gfs2_file_fops;
  147. tmp->i_mapping->a_ops = &gfs2_file_aops;
  148. } else if (S_ISDIR(ip->i_di.di_mode)) {
  149. tmp->i_op = &gfs2_dir_iops;
  150. tmp->i_fop = &gfs2_dir_fops;
  151. } else if (S_ISLNK(ip->i_di.di_mode)) {
  152. tmp->i_op = &gfs2_symlink_iops;
  153. } else {
  154. tmp->i_op = &gfs2_dev_iops;
  155. init_special_inode(tmp, tmp->i_mode, tmp->i_rdev);
  156. }
  157. tmp->u.generic_ip = NULL;
  158. for (;;) {
  159. spin_lock(&ip->i_spin);
  160. if (!ip->i_vnode)
  161. break;
  162. inode = igrab(ip->i_vnode);
  163. spin_unlock(&ip->i_spin);
  164. if (inode) {
  165. iput(tmp);
  166. return inode;
  167. }
  168. yield();
  169. }
  170. inode = tmp;
  171. gfs2_inode_hold(ip);
  172. ip->i_vnode = inode;
  173. inode->u.generic_ip = ip;
  174. spin_unlock(&ip->i_spin);
  175. insert_inode_hash(inode);
  176. return inode;
  177. }
  178. static int iget_test(struct inode *inode, void *opaque)
  179. {
  180. struct gfs2_inode *ip = inode->u.generic_ip;
  181. struct gfs2_inum *inum = (struct gfs2_inum *)opaque;
  182. if (ip && ip->i_num.no_addr == inum->no_addr)
  183. return 1;
  184. return 0;
  185. }
  186. struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
  187. {
  188. return ilookup5(sb, (unsigned long)inum->no_formal_ino,
  189. iget_test, inum);
  190. }
  191. void gfs2_inode_min_init(struct gfs2_inode *ip, unsigned int type)
  192. {
  193. spin_lock(&ip->i_spin);
  194. if (!test_and_set_bit(GIF_MIN_INIT, &ip->i_flags)) {
  195. ip->i_di.di_nlink = 1;
  196. ip->i_di.di_mode = DT2IF(type);
  197. }
  198. spin_unlock(&ip->i_spin);
  199. }
  200. /**
  201. * gfs2_inode_refresh - Refresh the incore copy of the dinode
  202. * @ip: The GFS2 inode
  203. *
  204. * Returns: errno
  205. */
  206. int gfs2_inode_refresh(struct gfs2_inode *ip)
  207. {
  208. struct buffer_head *dibh;
  209. int error;
  210. error = gfs2_meta_inode_buffer(ip, &dibh);
  211. if (error)
  212. return error;
  213. if (gfs2_metatype_check(ip->i_sbd, dibh, GFS2_METATYPE_DI)) {
  214. brelse(dibh);
  215. return -EIO;
  216. }
  217. spin_lock(&ip->i_spin);
  218. gfs2_dinode_in(&ip->i_di, dibh->b_data);
  219. set_bit(GIF_MIN_INIT, &ip->i_flags);
  220. spin_unlock(&ip->i_spin);
  221. brelse(dibh);
  222. if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) {
  223. if (gfs2_consist_inode(ip))
  224. gfs2_dinode_print(&ip->i_di);
  225. return -EIO;
  226. }
  227. if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
  228. return -ESTALE;
  229. ip->i_vn = ip->i_gl->gl_vn;
  230. return 0;
  231. }
  232. /**
  233. * inode_create - create a struct gfs2_inode
  234. * @i_gl: The glock covering the inode
  235. * @inum: The inode number
  236. * @io_gl: the iopen glock to acquire/hold (using holder in new gfs2_inode)
  237. * @io_state: the state the iopen glock should be acquired in
  238. * @ipp: pointer to put the returned inode in
  239. *
  240. * Returns: errno
  241. */
  242. static int inode_create(struct gfs2_glock *i_gl, struct gfs2_inum *inum,
  243. struct gfs2_glock *io_gl, unsigned int io_state,
  244. struct gfs2_inode **ipp)
  245. {
  246. struct gfs2_sbd *sdp = i_gl->gl_sbd;
  247. struct gfs2_inode *ip;
  248. int error = 0;
  249. ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
  250. if (!ip)
  251. return -ENOMEM;
  252. memset(ip, 0, sizeof(struct gfs2_inode));
  253. ip->i_num = *inum;
  254. atomic_set(&ip->i_count, 1);
  255. ip->i_vn = i_gl->gl_vn - 1;
  256. ip->i_gl = i_gl;
  257. ip->i_sbd = sdp;
  258. spin_lock_init(&ip->i_spin);
  259. init_rwsem(&ip->i_rw_mutex);
  260. ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
  261. error = gfs2_glock_nq_init(io_gl,
  262. io_state, GL_LOCAL_EXCL | GL_EXACT,
  263. &ip->i_iopen_gh);
  264. if (error)
  265. goto fail;
  266. ip->i_iopen_gh.gh_owner = NULL;
  267. spin_lock(&io_gl->gl_spin);
  268. gfs2_glock_hold(i_gl);
  269. io_gl->gl_object = i_gl;
  270. spin_unlock(&io_gl->gl_spin);
  271. gfs2_glock_hold(i_gl);
  272. i_gl->gl_object = ip;
  273. atomic_inc(&sdp->sd_inode_count);
  274. *ipp = ip;
  275. return 0;
  276. fail:
  277. gfs2_meta_cache_flush(ip);
  278. kmem_cache_free(gfs2_inode_cachep, ip);
  279. *ipp = NULL;
  280. return error;
  281. }
  282. /**
  283. * gfs2_inode_get - Create or get a reference on an inode
  284. * @i_gl: The glock covering the inode
  285. * @inum: The inode number
  286. * @create:
  287. * @ipp: pointer to put the returned inode in
  288. *
  289. * Returns: errno
  290. */
  291. int gfs2_inode_get(struct gfs2_glock *i_gl, struct gfs2_inum *inum, int create,
  292. struct gfs2_inode **ipp)
  293. {
  294. struct gfs2_sbd *sdp = i_gl->gl_sbd;
  295. struct gfs2_glock *io_gl;
  296. int error = 0;
  297. gfs2_glmutex_lock(i_gl);
  298. *ipp = i_gl->gl_object;
  299. if (*ipp) {
  300. error = -ESTALE;
  301. if ((*ipp)->i_num.no_formal_ino != inum->no_formal_ino)
  302. goto out;
  303. atomic_inc(&(*ipp)->i_count);
  304. error = 0;
  305. goto out;
  306. }
  307. if (!create)
  308. goto out;
  309. error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops,
  310. CREATE, &io_gl);
  311. if (!error) {
  312. error = inode_create(i_gl, inum, io_gl, LM_ST_SHARED, ipp);
  313. gfs2_glock_put(io_gl);
  314. }
  315. out:
  316. gfs2_glmutex_unlock(i_gl);
  317. return error;
  318. }
  319. void gfs2_inode_hold(struct gfs2_inode *ip)
  320. {
  321. gfs2_assert(ip->i_sbd, atomic_read(&ip->i_count) > 0);
  322. atomic_inc(&ip->i_count);
  323. }
  324. void gfs2_inode_put(struct gfs2_inode *ip)
  325. {
  326. gfs2_assert(ip->i_sbd, atomic_read(&ip->i_count) > 0);
  327. atomic_dec(&ip->i_count);
  328. }
  329. void gfs2_inode_destroy(struct gfs2_inode *ip)
  330. {
  331. struct gfs2_sbd *sdp = ip->i_sbd;
  332. struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
  333. struct gfs2_glock *i_gl = ip->i_gl;
  334. gfs2_assert_warn(sdp, !atomic_read(&ip->i_count));
  335. gfs2_assert(sdp, io_gl->gl_object == i_gl);
  336. spin_lock(&io_gl->gl_spin);
  337. io_gl->gl_object = NULL;
  338. gfs2_glock_put(i_gl);
  339. spin_unlock(&io_gl->gl_spin);
  340. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  341. gfs2_meta_cache_flush(ip);
  342. kmem_cache_free(gfs2_inode_cachep, ip);
  343. i_gl->gl_object = NULL;
  344. gfs2_glock_put(i_gl);
  345. atomic_dec(&sdp->sd_inode_count);
  346. }
  347. static int dinode_dealloc(struct gfs2_inode *ip, struct gfs2_unlinked *ul)
  348. {
  349. struct gfs2_sbd *sdp = ip->i_sbd;
  350. struct gfs2_alloc *al;
  351. struct gfs2_rgrpd *rgd;
  352. int error;
  353. if (ip->i_di.di_blocks != 1) {
  354. if (gfs2_consist_inode(ip))
  355. gfs2_dinode_print(&ip->i_di);
  356. return -EIO;
  357. }
  358. al = gfs2_alloc_get(ip);
  359. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  360. if (error)
  361. goto out;
  362. error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
  363. if (error)
  364. goto out_qs;
  365. rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
  366. if (!rgd) {
  367. gfs2_consist_inode(ip);
  368. error = -EIO;
  369. goto out_rindex_relse;
  370. }
  371. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  372. &al->al_rgd_gh);
  373. if (error)
  374. goto out_rindex_relse;
  375. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED +
  376. RES_STATFS + RES_QUOTA, 1);
  377. if (error)
  378. goto out_rg_gunlock;
  379. gfs2_trans_add_gl(ip->i_gl);
  380. gfs2_free_di(rgd, ip);
  381. error = gfs2_unlinked_ondisk_rm(sdp, ul);
  382. gfs2_trans_end(sdp);
  383. clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  384. out_rg_gunlock:
  385. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  386. out_rindex_relse:
  387. gfs2_glock_dq_uninit(&al->al_ri_gh);
  388. out_qs:
  389. gfs2_quota_unhold(ip);
  390. out:
  391. gfs2_alloc_put(ip);
  392. return error;
  393. }
  394. /**
  395. * inode_dealloc - Deallocate all on-disk blocks for an inode (dinode)
  396. * @sdp: the filesystem
  397. * @inum: the inode number to deallocate
  398. * @io_gh: a holder for the iopen glock for this inode
  399. *
  400. * Returns: errno
  401. */
  402. static int inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul,
  403. struct gfs2_holder *io_gh)
  404. {
  405. struct gfs2_inode *ip;
  406. struct gfs2_holder i_gh;
  407. int error;
  408. error = gfs2_glock_nq_num(sdp,
  409. ul->ul_ut.ut_inum.no_addr, &gfs2_inode_glops,
  410. LM_ST_EXCLUSIVE, 0, &i_gh);
  411. if (error)
  412. return error;
  413. /* We reacquire the iopen lock here to avoid a race with the NFS server
  414. calling gfs2_read_inode() with the inode number of a inode we're in
  415. the process of deallocating. And we can't keep our hold on the lock
  416. from inode_dealloc_init() for deadlock reasons. */
  417. gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY, io_gh);
  418. error = gfs2_glock_nq(io_gh);
  419. switch (error) {
  420. case 0:
  421. break;
  422. case GLR_TRYFAILED:
  423. error = 1;
  424. default:
  425. goto out;
  426. }
  427. gfs2_assert_warn(sdp, !i_gh.gh_gl->gl_object);
  428. error = inode_create(i_gh.gh_gl, &ul->ul_ut.ut_inum, io_gh->gh_gl,
  429. LM_ST_EXCLUSIVE, &ip);
  430. gfs2_glock_dq(io_gh);
  431. if (error)
  432. goto out;
  433. error = gfs2_inode_refresh(ip);
  434. if (error)
  435. goto out_iput;
  436. if (ip->i_di.di_nlink) {
  437. if (gfs2_consist_inode(ip))
  438. gfs2_dinode_print(&ip->i_di);
  439. error = -EIO;
  440. goto out_iput;
  441. }
  442. if (S_ISDIR(ip->i_di.di_mode) &&
  443. (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
  444. error = gfs2_dir_exhash_dealloc(ip);
  445. if (error)
  446. goto out_iput;
  447. }
  448. if (ip->i_di.di_eattr) {
  449. error = gfs2_ea_dealloc(ip);
  450. if (error)
  451. goto out_iput;
  452. }
  453. if (!gfs2_is_stuffed(ip)) {
  454. error = gfs2_file_dealloc(ip);
  455. if (error)
  456. goto out_iput;
  457. }
  458. error = dinode_dealloc(ip, ul);
  459. if (error)
  460. goto out_iput;
  461. out_iput:
  462. gfs2_glmutex_lock(i_gh.gh_gl);
  463. gfs2_inode_put(ip);
  464. gfs2_inode_destroy(ip);
  465. gfs2_glmutex_unlock(i_gh.gh_gl);
  466. out:
  467. gfs2_glock_dq_uninit(&i_gh);
  468. return error;
  469. }
  470. /**
  471. * try_inode_dealloc - Try to deallocate an inode and all its blocks
  472. * @sdp: the filesystem
  473. *
  474. * Returns: 0 on success, -errno on error, 1 on busy (inode open)
  475. */
  476. static int try_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  477. {
  478. struct gfs2_holder io_gh;
  479. int error = 0;
  480. gfs2_try_toss_inode(sdp, &ul->ul_ut.ut_inum);
  481. error = gfs2_glock_nq_num(sdp,
  482. ul->ul_ut.ut_inum.no_addr, &gfs2_iopen_glops,
  483. LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &io_gh);
  484. switch (error) {
  485. case 0:
  486. break;
  487. case GLR_TRYFAILED:
  488. return 1;
  489. default:
  490. return error;
  491. }
  492. gfs2_glock_dq(&io_gh);
  493. error = inode_dealloc(sdp, ul, &io_gh);
  494. gfs2_holder_uninit(&io_gh);
  495. return error;
  496. }
  497. static int inode_dealloc_uninit(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  498. {
  499. struct gfs2_rgrpd *rgd;
  500. struct gfs2_holder ri_gh, rgd_gh;
  501. int error;
  502. error = gfs2_rindex_hold(sdp, &ri_gh);
  503. if (error)
  504. return error;
  505. rgd = gfs2_blk2rgrpd(sdp, ul->ul_ut.ut_inum.no_addr);
  506. if (!rgd) {
  507. gfs2_consist(sdp);
  508. error = -EIO;
  509. goto out;
  510. }
  511. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rgd_gh);
  512. if (error)
  513. goto out;
  514. error = gfs2_trans_begin(sdp,
  515. RES_RG_BIT + RES_UNLINKED + RES_STATFS,
  516. 0);
  517. if (error)
  518. goto out_gunlock;
  519. gfs2_free_uninit_di(rgd, ul->ul_ut.ut_inum.no_addr);
  520. gfs2_unlinked_ondisk_rm(sdp, ul);
  521. gfs2_trans_end(sdp);
  522. out_gunlock:
  523. gfs2_glock_dq_uninit(&rgd_gh);
  524. out:
  525. gfs2_glock_dq_uninit(&ri_gh);
  526. return error;
  527. }
  528. int gfs2_inode_dealloc(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
  529. {
  530. if (ul->ul_ut.ut_flags & GFS2_UTF_UNINIT)
  531. return inode_dealloc_uninit(sdp, ul);
  532. else
  533. return try_inode_dealloc(sdp, ul);
  534. }
  535. /**
  536. * gfs2_change_nlink - Change nlink count on inode
  537. * @ip: The GFS2 inode
  538. * @diff: The change in the nlink count required
  539. *
  540. * Returns: errno
  541. */
  542. int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
  543. {
  544. struct buffer_head *dibh;
  545. uint32_t nlink;
  546. int error;
  547. nlink = ip->i_di.di_nlink + diff;
  548. /* If we are reducing the nlink count, but the new value ends up being
  549. bigger than the old one, we must have underflowed. */
  550. if (diff < 0 && nlink > ip->i_di.di_nlink) {
  551. if (gfs2_consist_inode(ip))
  552. gfs2_dinode_print(&ip->i_di);
  553. return -EIO;
  554. }
  555. error = gfs2_meta_inode_buffer(ip, &dibh);
  556. if (error)
  557. return error;
  558. ip->i_di.di_nlink = nlink;
  559. ip->i_di.di_ctime = get_seconds();
  560. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  561. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  562. brelse(dibh);
  563. return 0;
  564. }
  565. /**
  566. * gfs2_lookupi - Look up a filename in a directory and return its inode
  567. * @d_gh: An initialized holder for the directory glock
  568. * @name: The name of the inode to look for
  569. * @is_root: If 1, ignore the caller's permissions
  570. * @i_gh: An uninitialized holder for the new inode glock
  571. *
  572. * There will always be a vnode (Linux VFS inode) for the d_gh inode unless
  573. * @is_root is true.
  574. *
  575. * Returns: errno
  576. */
  577. int gfs2_lookupi(struct inode *dir, struct qstr *name, int is_root,
  578. struct inode **inodep)
  579. {
  580. struct gfs2_inode *ipp;
  581. struct gfs2_inode *dip = dir->u.generic_ip;
  582. struct gfs2_sbd *sdp = dip->i_sbd;
  583. struct gfs2_holder d_gh;
  584. struct gfs2_inum inum;
  585. unsigned int type;
  586. struct gfs2_glock *gl;
  587. int error = 0;
  588. *inodep = NULL;
  589. if (!name->len || name->len > GFS2_FNAMESIZE)
  590. return -ENAMETOOLONG;
  591. if (gfs2_filecmp(name, ".", 1) ||
  592. (gfs2_filecmp(name, "..", 2) && dir == sdp->sd_root_dir)) {
  593. gfs2_inode_hold(dip);
  594. ipp = dip;
  595. goto done;
  596. }
  597. error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  598. if (error)
  599. return error;
  600. if (!is_root) {
  601. error = gfs2_repermission(dip->i_vnode, MAY_EXEC, NULL);
  602. if (error)
  603. goto out;
  604. }
  605. error = gfs2_dir_search(dip, name, &inum, &type);
  606. if (error)
  607. goto out;
  608. error = gfs2_glock_get(sdp, inum.no_addr, &gfs2_inode_glops,
  609. CREATE, &gl);
  610. if (error)
  611. goto out;
  612. error = gfs2_inode_get(gl, &inum, CREATE, &ipp);
  613. if (!error)
  614. gfs2_inode_min_init(ipp, type);
  615. gfs2_glock_put(gl);
  616. out:
  617. gfs2_glock_dq_uninit(&d_gh);
  618. done:
  619. if (error == 0) {
  620. *inodep = gfs2_ip2v(ipp);
  621. if (!*inodep)
  622. error = -ENOMEM;
  623. gfs2_inode_put(ipp);
  624. }
  625. return error;
  626. }
  627. static int pick_formal_ino_1(struct gfs2_sbd *sdp, uint64_t *formal_ino)
  628. {
  629. struct gfs2_inode *ip = sdp->sd_ir_inode->u.generic_ip;
  630. struct buffer_head *bh;
  631. struct gfs2_inum_range ir;
  632. int error;
  633. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  634. if (error)
  635. return error;
  636. mutex_lock(&sdp->sd_inum_mutex);
  637. error = gfs2_meta_inode_buffer(ip, &bh);
  638. if (error) {
  639. mutex_unlock(&sdp->sd_inum_mutex);
  640. gfs2_trans_end(sdp);
  641. return error;
  642. }
  643. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  644. if (ir.ir_length) {
  645. *formal_ino = ir.ir_start++;
  646. ir.ir_length--;
  647. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  648. gfs2_inum_range_out(&ir,
  649. bh->b_data + sizeof(struct gfs2_dinode));
  650. brelse(bh);
  651. mutex_unlock(&sdp->sd_inum_mutex);
  652. gfs2_trans_end(sdp);
  653. return 0;
  654. }
  655. brelse(bh);
  656. mutex_unlock(&sdp->sd_inum_mutex);
  657. gfs2_trans_end(sdp);
  658. return 1;
  659. }
  660. static int pick_formal_ino_2(struct gfs2_sbd *sdp, uint64_t *formal_ino)
  661. {
  662. struct gfs2_inode *ip = sdp->sd_ir_inode->u.generic_ip;
  663. struct gfs2_inode *m_ip = sdp->sd_inum_inode->u.generic_ip;
  664. struct gfs2_holder gh;
  665. struct buffer_head *bh;
  666. struct gfs2_inum_range ir;
  667. int error;
  668. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  669. if (error)
  670. return error;
  671. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  672. if (error)
  673. goto out;
  674. mutex_lock(&sdp->sd_inum_mutex);
  675. error = gfs2_meta_inode_buffer(ip, &bh);
  676. if (error)
  677. goto out_end_trans;
  678. gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  679. if (!ir.ir_length) {
  680. struct buffer_head *m_bh;
  681. uint64_t x, y;
  682. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  683. if (error)
  684. goto out_brelse;
  685. x = *(uint64_t *)(m_bh->b_data + sizeof(struct gfs2_dinode));
  686. x = y = be64_to_cpu(x);
  687. ir.ir_start = x;
  688. ir.ir_length = GFS2_INUM_QUANTUM;
  689. x += GFS2_INUM_QUANTUM;
  690. if (x < y)
  691. gfs2_consist_inode(m_ip);
  692. x = cpu_to_be64(x);
  693. gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
  694. *(uint64_t *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x;
  695. brelse(m_bh);
  696. }
  697. *formal_ino = ir.ir_start++;
  698. ir.ir_length--;
  699. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  700. gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
  701. out_brelse:
  702. brelse(bh);
  703. out_end_trans:
  704. mutex_unlock(&sdp->sd_inum_mutex);
  705. gfs2_trans_end(sdp);
  706. out:
  707. gfs2_glock_dq_uninit(&gh);
  708. return error;
  709. }
  710. static int pick_formal_ino(struct gfs2_sbd *sdp, uint64_t *inum)
  711. {
  712. int error;
  713. error = pick_formal_ino_1(sdp, inum);
  714. if (error <= 0)
  715. return error;
  716. error = pick_formal_ino_2(sdp, inum);
  717. return error;
  718. }
  719. /**
  720. * create_ok - OK to create a new on-disk inode here?
  721. * @dip: Directory in which dinode is to be created
  722. * @name: Name of new dinode
  723. * @mode:
  724. *
  725. * Returns: errno
  726. */
  727. static int create_ok(struct gfs2_inode *dip, struct qstr *name,
  728. unsigned int mode)
  729. {
  730. int error;
  731. error = gfs2_repermission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
  732. if (error)
  733. return error;
  734. /* Don't create entries in an unlinked directory */
  735. if (!dip->i_di.di_nlink)
  736. return -EPERM;
  737. error = gfs2_dir_search(dip, name, NULL, NULL);
  738. switch (error) {
  739. case -ENOENT:
  740. error = 0;
  741. break;
  742. case 0:
  743. return -EEXIST;
  744. default:
  745. return error;
  746. }
  747. if (dip->i_di.di_entries == (uint32_t)-1)
  748. return -EFBIG;
  749. if (S_ISDIR(mode) && dip->i_di.di_nlink == (uint32_t)-1)
  750. return -EMLINK;
  751. return 0;
  752. }
  753. static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
  754. unsigned int *uid, unsigned int *gid)
  755. {
  756. if (dip->i_sbd->sd_args.ar_suiddir &&
  757. (dip->i_di.di_mode & S_ISUID) &&
  758. dip->i_di.di_uid) {
  759. if (S_ISDIR(*mode))
  760. *mode |= S_ISUID;
  761. else if (dip->i_di.di_uid != current->fsuid)
  762. *mode &= ~07111;
  763. *uid = dip->i_di.di_uid;
  764. } else
  765. *uid = current->fsuid;
  766. if (dip->i_di.di_mode & S_ISGID) {
  767. if (S_ISDIR(*mode))
  768. *mode |= S_ISGID;
  769. *gid = dip->i_di.di_gid;
  770. } else
  771. *gid = current->fsgid;
  772. }
  773. static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_unlinked *ul)
  774. {
  775. struct gfs2_sbd *sdp = dip->i_sbd;
  776. int error;
  777. gfs2_alloc_get(dip);
  778. dip->i_alloc.al_requested = RES_DINODE;
  779. error = gfs2_inplace_reserve(dip);
  780. if (error)
  781. goto out;
  782. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_UNLINKED +
  783. RES_STATFS, 0);
  784. if (error)
  785. goto out_ipreserv;
  786. ul->ul_ut.ut_inum.no_addr = gfs2_alloc_di(dip);
  787. ul->ul_ut.ut_flags = GFS2_UTF_UNINIT;
  788. error = gfs2_unlinked_ondisk_add(sdp, ul);
  789. gfs2_trans_end(sdp);
  790. out_ipreserv:
  791. gfs2_inplace_release(dip);
  792. out:
  793. gfs2_alloc_put(dip);
  794. return error;
  795. }
  796. /**
  797. * init_dinode - Fill in a new dinode structure
  798. * @dip: the directory this inode is being created in
  799. * @gl: The glock covering the new inode
  800. * @inum: the inode number
  801. * @mode: the file permissions
  802. * @uid:
  803. * @gid:
  804. *
  805. */
  806. static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  807. struct gfs2_inum *inum, unsigned int mode,
  808. unsigned int uid, unsigned int gid)
  809. {
  810. struct gfs2_sbd *sdp = dip->i_sbd;
  811. struct gfs2_dinode *di;
  812. struct buffer_head *dibh;
  813. dibh = gfs2_meta_new(gl, inum->no_addr);
  814. gfs2_trans_add_bh(gl, dibh, 1);
  815. gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
  816. gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
  817. di = (struct gfs2_dinode *)dibh->b_data;
  818. di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
  819. di->di_num.no_addr = cpu_to_be64(inum->no_addr);
  820. di->di_mode = cpu_to_be32(mode);
  821. di->di_uid = cpu_to_be32(uid);
  822. di->di_gid = cpu_to_be32(gid);
  823. di->di_nlink = cpu_to_be32(0);
  824. di->di_size = cpu_to_be64(0);
  825. di->di_blocks = cpu_to_be64(1);
  826. di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
  827. di->di_major = di->di_minor = cpu_to_be32(0);
  828. di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
  829. di->__pad[0] = di->__pad[1] = 0;
  830. di->di_flags = cpu_to_be32(0);
  831. if (S_ISREG(mode)) {
  832. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
  833. gfs2_tune_get(sdp, gt_new_files_jdata))
  834. di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
  835. if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
  836. gfs2_tune_get(sdp, gt_new_files_directio))
  837. di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
  838. } else if (S_ISDIR(mode)) {
  839. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  840. GFS2_DIF_INHERIT_DIRECTIO);
  841. di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
  842. GFS2_DIF_INHERIT_JDATA);
  843. }
  844. di->__pad1 = 0;
  845. di->di_height = cpu_to_be32(0);
  846. di->__pad2 = 0;
  847. di->__pad3 = 0;
  848. di->di_depth = cpu_to_be16(0);
  849. di->di_entries = cpu_to_be32(0);
  850. memset(&di->__pad4, 0, sizeof(di->__pad4));
  851. di->di_eattr = cpu_to_be64(0);
  852. memset(&di->di_reserved, 0, sizeof(di->di_reserved));
  853. brelse(dibh);
  854. }
  855. static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
  856. unsigned int mode, struct gfs2_unlinked *ul)
  857. {
  858. struct gfs2_sbd *sdp = dip->i_sbd;
  859. unsigned int uid, gid;
  860. int error;
  861. munge_mode_uid_gid(dip, &mode, &uid, &gid);
  862. gfs2_alloc_get(dip);
  863. error = gfs2_quota_lock(dip, uid, gid);
  864. if (error)
  865. goto out;
  866. error = gfs2_quota_check(dip, uid, gid);
  867. if (error)
  868. goto out_quota;
  869. error = gfs2_trans_begin(sdp, RES_DINODE + RES_UNLINKED +
  870. RES_QUOTA, 0);
  871. if (error)
  872. goto out_quota;
  873. ul->ul_ut.ut_flags = 0;
  874. error = gfs2_unlinked_ondisk_munge(sdp, ul);
  875. init_dinode(dip, gl, &ul->ul_ut.ut_inum,
  876. mode, uid, gid);
  877. gfs2_quota_change(dip, +1, uid, gid);
  878. gfs2_trans_end(sdp);
  879. out_quota:
  880. gfs2_quota_unlock(dip);
  881. out:
  882. gfs2_alloc_put(dip);
  883. return error;
  884. }
  885. static int link_dinode(struct gfs2_inode *dip, struct qstr *name,
  886. struct gfs2_inode *ip, struct gfs2_unlinked *ul)
  887. {
  888. struct gfs2_sbd *sdp = dip->i_sbd;
  889. struct gfs2_alloc *al;
  890. int alloc_required;
  891. struct buffer_head *dibh;
  892. int error;
  893. al = gfs2_alloc_get(dip);
  894. error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  895. if (error)
  896. goto fail;
  897. error = gfs2_diradd_alloc_required(dip, name, &alloc_required);
  898. if (alloc_required) {
  899. error = gfs2_quota_check(dip, dip->i_di.di_uid,
  900. dip->i_di.di_gid);
  901. if (error)
  902. goto fail_quota_locks;
  903. al->al_requested = sdp->sd_max_dirres;
  904. error = gfs2_inplace_reserve(dip);
  905. if (error)
  906. goto fail_quota_locks;
  907. error = gfs2_trans_begin(sdp,
  908. sdp->sd_max_dirres +
  909. al->al_rgd->rd_ri.ri_length +
  910. 2 * RES_DINODE + RES_UNLINKED +
  911. RES_STATFS + RES_QUOTA, 0);
  912. if (error)
  913. goto fail_ipreserv;
  914. } else {
  915. error = gfs2_trans_begin(sdp,
  916. RES_LEAF +
  917. 2 * RES_DINODE +
  918. RES_UNLINKED, 0);
  919. if (error)
  920. goto fail_quota_locks;
  921. }
  922. error = gfs2_dir_add(dip, name, &ip->i_num, IF2DT(ip->i_di.di_mode));
  923. if (error)
  924. goto fail_end_trans;
  925. error = gfs2_meta_inode_buffer(ip, &dibh);
  926. if (error)
  927. goto fail_end_trans;
  928. ip->i_di.di_nlink = 1;
  929. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  930. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  931. brelse(dibh);
  932. error = gfs2_unlinked_ondisk_rm(sdp, ul);
  933. if (error)
  934. goto fail_end_trans;
  935. return 0;
  936. fail_end_trans:
  937. gfs2_trans_end(sdp);
  938. fail_ipreserv:
  939. if (dip->i_alloc.al_rgd)
  940. gfs2_inplace_release(dip);
  941. fail_quota_locks:
  942. gfs2_quota_unlock(dip);
  943. fail:
  944. gfs2_alloc_put(dip);
  945. return error;
  946. }
  947. /**
  948. * gfs2_createi - Create a new inode
  949. * @ghs: An array of two holders
  950. * @name: The name of the new file
  951. * @mode: the permissions on the new inode
  952. *
  953. * @ghs[0] is an initialized holder for the directory
  954. * @ghs[1] is the holder for the inode lock
  955. *
  956. * If the return value is not NULL, the glocks on both the directory and the new
  957. * file are held. A transaction has been started and an inplace reservation
  958. * is held, as well.
  959. *
  960. * Returns: An inode
  961. */
  962. struct inode *gfs2_createi(struct gfs2_holder *ghs, struct qstr *name,
  963. unsigned int mode)
  964. {
  965. struct inode *inode;
  966. struct gfs2_inode *dip = ghs->gh_gl->gl_object;
  967. struct gfs2_sbd *sdp = dip->i_sbd;
  968. struct gfs2_unlinked *ul;
  969. struct gfs2_inode *ip;
  970. int error;
  971. if (!name->len || name->len > GFS2_FNAMESIZE)
  972. return ERR_PTR(-ENAMETOOLONG);
  973. error = gfs2_unlinked_get(sdp, &ul);
  974. if (error)
  975. return ERR_PTR(error);
  976. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  977. error = gfs2_glock_nq(ghs);
  978. if (error)
  979. goto fail;
  980. error = create_ok(dip, name, mode);
  981. if (error)
  982. goto fail_gunlock;
  983. error = pick_formal_ino(sdp, &ul->ul_ut.ut_inum.no_formal_ino);
  984. if (error)
  985. goto fail_gunlock;
  986. error = alloc_dinode(dip, ul);
  987. if (error)
  988. goto fail_gunlock;
  989. if (ul->ul_ut.ut_inum.no_addr < dip->i_num.no_addr) {
  990. gfs2_glock_dq(ghs);
  991. error = gfs2_glock_nq_num(sdp,
  992. ul->ul_ut.ut_inum.no_addr,
  993. &gfs2_inode_glops,
  994. LM_ST_EXCLUSIVE, GL_SKIP,
  995. ghs + 1);
  996. if (error) {
  997. gfs2_unlinked_put(sdp, ul);
  998. return ERR_PTR(error);
  999. }
  1000. gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
  1001. error = gfs2_glock_nq(ghs);
  1002. if (error) {
  1003. gfs2_glock_dq_uninit(ghs + 1);
  1004. gfs2_unlinked_put(sdp, ul);
  1005. return ERR_PTR(error);
  1006. }
  1007. error = create_ok(dip, name, mode);
  1008. if (error)
  1009. goto fail_gunlock2;
  1010. } else {
  1011. error = gfs2_glock_nq_num(sdp,
  1012. ul->ul_ut.ut_inum.no_addr,
  1013. &gfs2_inode_glops,
  1014. LM_ST_EXCLUSIVE, GL_SKIP,
  1015. ghs + 1);
  1016. if (error)
  1017. goto fail_gunlock;
  1018. }
  1019. error = make_dinode(dip, ghs[1].gh_gl, mode, ul);
  1020. if (error)
  1021. goto fail_gunlock2;
  1022. error = gfs2_inode_get(ghs[1].gh_gl, &ul->ul_ut.ut_inum, CREATE, &ip);
  1023. if (error)
  1024. goto fail_gunlock2;
  1025. error = gfs2_inode_refresh(ip);
  1026. if (error)
  1027. goto fail_iput;
  1028. error = gfs2_acl_create(dip, ip);
  1029. if (error)
  1030. goto fail_iput;
  1031. error = link_dinode(dip, name, ip, ul);
  1032. if (error)
  1033. goto fail_iput;
  1034. gfs2_unlinked_put(sdp, ul);
  1035. inode = gfs2_ip2v(ip);
  1036. gfs2_inode_put(ip);
  1037. if (!inode)
  1038. return ERR_PTR(-ENOMEM);
  1039. return inode;
  1040. fail_iput:
  1041. gfs2_inode_put(ip);
  1042. fail_gunlock2:
  1043. gfs2_glock_dq_uninit(ghs + 1);
  1044. fail_gunlock:
  1045. gfs2_glock_dq(ghs);
  1046. fail:
  1047. gfs2_unlinked_put(sdp, ul);
  1048. return ERR_PTR(error);
  1049. }
  1050. /**
  1051. * gfs2_unlinki - Unlink a file
  1052. * @dip: The inode of the directory
  1053. * @name: The name of the file to be unlinked
  1054. * @ip: The inode of the file to be removed
  1055. *
  1056. * Assumes Glocks on both dip and ip are held.
  1057. *
  1058. * Returns: errno
  1059. */
  1060. int gfs2_unlinki(struct gfs2_inode *dip, struct qstr *name,
  1061. struct gfs2_inode *ip, struct gfs2_unlinked *ul)
  1062. {
  1063. struct gfs2_sbd *sdp = dip->i_sbd;
  1064. int error;
  1065. error = gfs2_dir_del(dip, name);
  1066. if (error)
  1067. return error;
  1068. error = gfs2_change_nlink(ip, -1);
  1069. if (error)
  1070. return error;
  1071. /* If this inode is being unlinked from the directory structure,
  1072. we need to mark that in the log so that it isn't lost during
  1073. a crash. */
  1074. if (!ip->i_di.di_nlink) {
  1075. ul->ul_ut.ut_inum = ip->i_num;
  1076. error = gfs2_unlinked_ondisk_add(sdp, ul);
  1077. if (!error)
  1078. set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  1079. }
  1080. return error;
  1081. }
  1082. /**
  1083. * gfs2_rmdiri - Remove a directory
  1084. * @dip: The parent directory of the directory to be removed
  1085. * @name: The name of the directory to be removed
  1086. * @ip: The GFS2 inode of the directory to be removed
  1087. *
  1088. * Assumes Glocks on dip and ip are held
  1089. *
  1090. * Returns: errno
  1091. */
  1092. int gfs2_rmdiri(struct gfs2_inode *dip, struct qstr *name,
  1093. struct gfs2_inode *ip, struct gfs2_unlinked *ul)
  1094. {
  1095. struct gfs2_sbd *sdp = dip->i_sbd;
  1096. struct qstr dotname;
  1097. int error;
  1098. if (ip->i_di.di_entries != 2) {
  1099. if (gfs2_consist_inode(ip))
  1100. gfs2_dinode_print(&ip->i_di);
  1101. return -EIO;
  1102. }
  1103. error = gfs2_dir_del(dip, name);
  1104. if (error)
  1105. return error;
  1106. error = gfs2_change_nlink(dip, -1);
  1107. if (error)
  1108. return error;
  1109. dotname.len = 1;
  1110. dotname.name = ".";
  1111. error = gfs2_dir_del(ip, &dotname);
  1112. if (error)
  1113. return error;
  1114. dotname.len = 2;
  1115. dotname.name = "..";
  1116. error = gfs2_dir_del(ip, &dotname);
  1117. if (error)
  1118. return error;
  1119. error = gfs2_change_nlink(ip, -2);
  1120. if (error)
  1121. return error;
  1122. /* This inode is being unlinked from the directory structure and
  1123. we need to mark that in the log so that it isn't lost during
  1124. a crash. */
  1125. ul->ul_ut.ut_inum = ip->i_num;
  1126. error = gfs2_unlinked_ondisk_add(sdp, ul);
  1127. if (!error)
  1128. set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  1129. return error;
  1130. }
  1131. /*
  1132. * gfs2_unlink_ok - check to see that a inode is still in a directory
  1133. * @dip: the directory
  1134. * @name: the name of the file
  1135. * @ip: the inode
  1136. *
  1137. * Assumes that the lock on (at least) @dip is held.
  1138. *
  1139. * Returns: 0 if the parent/child relationship is correct, errno if it isn't
  1140. */
  1141. int gfs2_unlink_ok(struct gfs2_inode *dip, struct qstr *name,
  1142. struct gfs2_inode *ip)
  1143. {
  1144. struct gfs2_inum inum;
  1145. unsigned int type;
  1146. int error;
  1147. if (IS_IMMUTABLE(ip->i_vnode) || IS_APPEND(ip->i_vnode))
  1148. return -EPERM;
  1149. if ((dip->i_di.di_mode & S_ISVTX) &&
  1150. dip->i_di.di_uid != current->fsuid &&
  1151. ip->i_di.di_uid != current->fsuid &&
  1152. !capable(CAP_FOWNER))
  1153. return -EPERM;
  1154. if (IS_APPEND(dip->i_vnode))
  1155. return -EPERM;
  1156. error = gfs2_repermission(dip->i_vnode, MAY_WRITE | MAY_EXEC, NULL);
  1157. if (error)
  1158. return error;
  1159. error = gfs2_dir_search(dip, name, &inum, &type);
  1160. if (error)
  1161. return error;
  1162. if (!gfs2_inum_equal(&inum, &ip->i_num))
  1163. return -ENOENT;
  1164. if (IF2DT(ip->i_di.di_mode) != type) {
  1165. gfs2_consist_inode(dip);
  1166. return -EIO;
  1167. }
  1168. return 0;
  1169. }
  1170. /*
  1171. * gfs2_ok_to_move - check if it's ok to move a directory to another directory
  1172. * @this: move this
  1173. * @to: to here
  1174. *
  1175. * Follow @to back to the root and make sure we don't encounter @this
  1176. * Assumes we already hold the rename lock.
  1177. *
  1178. * Returns: errno
  1179. */
  1180. int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
  1181. {
  1182. struct gfs2_sbd *sdp = this->i_sbd;
  1183. struct inode *dir = to->i_vnode;
  1184. struct inode *tmp;
  1185. struct qstr dotdot;
  1186. int error = 0;
  1187. memset(&dotdot, 0, sizeof(struct qstr));
  1188. dotdot.name = "..";
  1189. dotdot.len = 2;
  1190. igrab(dir);
  1191. for (;;) {
  1192. if (dir == this->i_vnode) {
  1193. error = -EINVAL;
  1194. break;
  1195. }
  1196. if (dir == sdp->sd_root_dir) {
  1197. error = 0;
  1198. break;
  1199. }
  1200. error = gfs2_lookupi(dir, &dotdot, 1, &tmp);
  1201. if (error)
  1202. break;
  1203. iput(dir);
  1204. dir = tmp;
  1205. }
  1206. iput(dir);
  1207. return error;
  1208. }
  1209. /**
  1210. * gfs2_readlinki - return the contents of a symlink
  1211. * @ip: the symlink's inode
  1212. * @buf: a pointer to the buffer to be filled
  1213. * @len: a pointer to the length of @buf
  1214. *
  1215. * If @buf is too small, a piece of memory is kmalloc()ed and needs
  1216. * to be freed by the caller.
  1217. *
  1218. * Returns: errno
  1219. */
  1220. int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
  1221. {
  1222. struct gfs2_holder i_gh;
  1223. struct buffer_head *dibh;
  1224. unsigned int x;
  1225. int error;
  1226. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
  1227. error = gfs2_glock_nq_atime(&i_gh);
  1228. if (error) {
  1229. gfs2_holder_uninit(&i_gh);
  1230. return error;
  1231. }
  1232. if (!ip->i_di.di_size) {
  1233. gfs2_consist_inode(ip);
  1234. error = -EIO;
  1235. goto out;
  1236. }
  1237. error = gfs2_meta_inode_buffer(ip, &dibh);
  1238. if (error)
  1239. goto out;
  1240. x = ip->i_di.di_size + 1;
  1241. if (x > *len) {
  1242. *buf = kmalloc(x, GFP_KERNEL);
  1243. if (!*buf) {
  1244. error = -ENOMEM;
  1245. goto out_brelse;
  1246. }
  1247. }
  1248. memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
  1249. *len = x;
  1250. out_brelse:
  1251. brelse(dibh);
  1252. out:
  1253. gfs2_glock_dq_uninit(&i_gh);
  1254. return error;
  1255. }
  1256. /**
  1257. * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
  1258. * conditionally update the inode's atime
  1259. * @gh: the holder to acquire
  1260. *
  1261. * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
  1262. * Update if the difference between the current time and the inode's current
  1263. * atime is greater than an interval specified at mount.
  1264. *
  1265. * Returns: errno
  1266. */
  1267. int gfs2_glock_nq_atime(struct gfs2_holder *gh)
  1268. {
  1269. struct gfs2_glock *gl = gh->gh_gl;
  1270. struct gfs2_sbd *sdp = gl->gl_sbd;
  1271. struct gfs2_inode *ip = gl->gl_object;
  1272. int64_t curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
  1273. unsigned int state;
  1274. int flags;
  1275. int error;
  1276. if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
  1277. gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
  1278. gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
  1279. return -EINVAL;
  1280. state = gh->gh_state;
  1281. flags = gh->gh_flags;
  1282. error = gfs2_glock_nq(gh);
  1283. if (error)
  1284. return error;
  1285. if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
  1286. (sdp->sd_vfs->s_flags & MS_RDONLY))
  1287. return 0;
  1288. curtime = get_seconds();
  1289. if (curtime - ip->i_di.di_atime >= quantum) {
  1290. gfs2_glock_dq(gh);
  1291. gfs2_holder_reinit(LM_ST_EXCLUSIVE,
  1292. gh->gh_flags & ~LM_FLAG_ANY,
  1293. gh);
  1294. error = gfs2_glock_nq(gh);
  1295. if (error)
  1296. return error;
  1297. /* Verify that atime hasn't been updated while we were
  1298. trying to get exclusive lock. */
  1299. curtime = get_seconds();
  1300. if (curtime - ip->i_di.di_atime >= quantum) {
  1301. struct buffer_head *dibh;
  1302. error = gfs2_trans_begin(sdp, RES_DINODE, 0);
  1303. if (error == -EROFS)
  1304. return 0;
  1305. if (error)
  1306. goto fail;
  1307. error = gfs2_meta_inode_buffer(ip, &dibh);
  1308. if (error)
  1309. goto fail_end_trans;
  1310. ip->i_di.di_atime = curtime;
  1311. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1312. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  1313. brelse(dibh);
  1314. gfs2_trans_end(sdp);
  1315. }
  1316. /* If someone else has asked for the glock,
  1317. unlock and let them have it. Then reacquire
  1318. in the original state. */
  1319. if (gfs2_glock_is_blocking(gl)) {
  1320. gfs2_glock_dq(gh);
  1321. gfs2_holder_reinit(state, flags, gh);
  1322. return gfs2_glock_nq(gh);
  1323. }
  1324. }
  1325. return 0;
  1326. fail_end_trans:
  1327. gfs2_trans_end(sdp);
  1328. fail:
  1329. gfs2_glock_dq(gh);
  1330. return error;
  1331. }
  1332. /**
  1333. * glock_compare_atime - Compare two struct gfs2_glock structures for sort
  1334. * @arg_a: the first structure
  1335. * @arg_b: the second structure
  1336. *
  1337. * Returns: 1 if A > B
  1338. * -1 if A < B
  1339. * 0 if A = B
  1340. */
  1341. static int glock_compare_atime(const void *arg_a, const void *arg_b)
  1342. {
  1343. struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
  1344. struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
  1345. struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1346. struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1347. int ret = 0;
  1348. if (a->ln_number > b->ln_number)
  1349. ret = 1;
  1350. else if (a->ln_number < b->ln_number)
  1351. ret = -1;
  1352. else {
  1353. if (gh_a->gh_state == LM_ST_SHARED &&
  1354. gh_b->gh_state == LM_ST_EXCLUSIVE)
  1355. ret = 1;
  1356. else if (gh_a->gh_state == LM_ST_SHARED &&
  1357. (gh_b->gh_flags & GL_ATIME))
  1358. ret = 1;
  1359. }
  1360. return ret;
  1361. }
  1362. /**
  1363. * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
  1364. * atime update
  1365. * @num_gh: the number of structures
  1366. * @ghs: an array of struct gfs2_holder structures
  1367. *
  1368. * Returns: 0 on success (all glocks acquired),
  1369. * errno on failure (no glocks acquired)
  1370. */
  1371. int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
  1372. {
  1373. struct gfs2_holder **p;
  1374. unsigned int x;
  1375. int error = 0;
  1376. if (!num_gh)
  1377. return 0;
  1378. if (num_gh == 1) {
  1379. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1380. if (ghs->gh_flags & GL_ATIME)
  1381. error = gfs2_glock_nq_atime(ghs);
  1382. else
  1383. error = gfs2_glock_nq(ghs);
  1384. return error;
  1385. }
  1386. p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
  1387. if (!p)
  1388. return -ENOMEM;
  1389. for (x = 0; x < num_gh; x++)
  1390. p[x] = &ghs[x];
  1391. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
  1392. for (x = 0; x < num_gh; x++) {
  1393. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1394. if (p[x]->gh_flags & GL_ATIME)
  1395. error = gfs2_glock_nq_atime(p[x]);
  1396. else
  1397. error = gfs2_glock_nq(p[x]);
  1398. if (error) {
  1399. while (x--)
  1400. gfs2_glock_dq(p[x]);
  1401. break;
  1402. }
  1403. }
  1404. kfree(p);
  1405. return error;
  1406. }
  1407. /**
  1408. * gfs2_try_toss_vnode - See if we can toss a vnode from memory
  1409. * @ip: the inode
  1410. *
  1411. * Returns: 1 if the vnode was tossed
  1412. */
  1413. void gfs2_try_toss_vnode(struct gfs2_inode *ip)
  1414. {
  1415. struct inode *inode;
  1416. inode = gfs2_ip2v_lookup(ip);
  1417. if (!inode)
  1418. return;
  1419. d_prune_aliases(inode);
  1420. if (S_ISDIR(ip->i_di.di_mode)) {
  1421. struct list_head *head = &inode->i_dentry;
  1422. struct dentry *d = NULL;
  1423. spin_lock(&dcache_lock);
  1424. if (list_empty(head))
  1425. spin_unlock(&dcache_lock);
  1426. else {
  1427. d = list_entry(head->next, struct dentry, d_alias);
  1428. dget_locked(d);
  1429. spin_unlock(&dcache_lock);
  1430. if (have_submounts(d))
  1431. dput(d);
  1432. else {
  1433. shrink_dcache_parent(d);
  1434. dput(d);
  1435. d_prune_aliases(inode);
  1436. }
  1437. }
  1438. }
  1439. inode->i_nlink = 0;
  1440. iput(inode);
  1441. }
  1442. static int
  1443. __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1444. {
  1445. struct buffer_head *dibh;
  1446. int error;
  1447. error = gfs2_meta_inode_buffer(ip, &dibh);
  1448. if (!error) {
  1449. error = inode_setattr(ip->i_vnode, attr);
  1450. gfs2_assert_warn(ip->i_sbd, !error);
  1451. gfs2_inode_attr_out(ip);
  1452. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1453. gfs2_dinode_out(&ip->i_di, dibh->b_data);
  1454. brelse(dibh);
  1455. }
  1456. return error;
  1457. }
  1458. /**
  1459. * gfs2_setattr_simple -
  1460. * @ip:
  1461. * @attr:
  1462. *
  1463. * Called with a reference on the vnode.
  1464. *
  1465. * Returns: errno
  1466. */
  1467. int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
  1468. {
  1469. int error;
  1470. if (current->journal_info)
  1471. return __gfs2_setattr_simple(ip, attr);
  1472. error = gfs2_trans_begin(ip->i_sbd, RES_DINODE, 0);
  1473. if (error)
  1474. return error;
  1475. error = __gfs2_setattr_simple(ip, attr);
  1476. gfs2_trans_end(ip->i_sbd);
  1477. return error;
  1478. }
  1479. int gfs2_repermission(struct inode *inode, int mask, struct nameidata *nd)
  1480. {
  1481. return permission(inode, mask, nd);
  1482. }