xfs_iget.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_acl.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_bmap_btree.h"
  29. #include "xfs_alloc_btree.h"
  30. #include "xfs_ialloc_btree.h"
  31. #include "xfs_dinode.h"
  32. #include "xfs_inode.h"
  33. #include "xfs_btree.h"
  34. #include "xfs_ialloc.h"
  35. #include "xfs_quota.h"
  36. #include "xfs_utils.h"
  37. #include "xfs_trans_priv.h"
  38. #include "xfs_inode_item.h"
  39. #include "xfs_bmap.h"
  40. #include "xfs_trace.h"
  41. /*
  42. * Allocate and initialise an xfs_inode.
  43. */
  44. STATIC struct xfs_inode *
  45. xfs_inode_alloc(
  46. struct xfs_mount *mp,
  47. xfs_ino_t ino)
  48. {
  49. struct xfs_inode *ip;
  50. /*
  51. * if this didn't occur in transactions, we could use
  52. * KM_MAYFAIL and return NULL here on ENOMEM. Set the
  53. * code up to do this anyway.
  54. */
  55. ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
  56. if (!ip)
  57. return NULL;
  58. if (inode_init_always(mp->m_super, VFS_I(ip))) {
  59. kmem_zone_free(xfs_inode_zone, ip);
  60. return NULL;
  61. }
  62. ASSERT(atomic_read(&ip->i_pincount) == 0);
  63. ASSERT(!spin_is_locked(&ip->i_flags_lock));
  64. ASSERT(!xfs_isiflocked(ip));
  65. ASSERT(ip->i_ino == 0);
  66. mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
  67. /* initialise the xfs inode */
  68. ip->i_ino = ino;
  69. ip->i_mount = mp;
  70. memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
  71. ip->i_afp = NULL;
  72. memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
  73. ip->i_flags = 0;
  74. ip->i_delayed_blks = 0;
  75. memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
  76. return ip;
  77. }
  78. STATIC void
  79. xfs_inode_free_callback(
  80. struct rcu_head *head)
  81. {
  82. struct inode *inode = container_of(head, struct inode, i_rcu);
  83. struct xfs_inode *ip = XFS_I(inode);
  84. kmem_zone_free(xfs_inode_zone, ip);
  85. }
  86. void
  87. xfs_inode_free(
  88. struct xfs_inode *ip)
  89. {
  90. switch (ip->i_d.di_mode & S_IFMT) {
  91. case S_IFREG:
  92. case S_IFDIR:
  93. case S_IFLNK:
  94. xfs_idestroy_fork(ip, XFS_DATA_FORK);
  95. break;
  96. }
  97. if (ip->i_afp)
  98. xfs_idestroy_fork(ip, XFS_ATTR_FORK);
  99. if (ip->i_itemp) {
  100. ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
  101. xfs_inode_item_destroy(ip);
  102. ip->i_itemp = NULL;
  103. }
  104. /* asserts to verify all state is correct here */
  105. ASSERT(atomic_read(&ip->i_pincount) == 0);
  106. ASSERT(!spin_is_locked(&ip->i_flags_lock));
  107. ASSERT(!xfs_isiflocked(ip));
  108. /*
  109. * Because we use RCU freeing we need to ensure the inode always
  110. * appears to be reclaimed with an invalid inode number when in the
  111. * free state. The ip->i_flags_lock provides the barrier against lookup
  112. * races.
  113. */
  114. spin_lock(&ip->i_flags_lock);
  115. ip->i_flags = XFS_IRECLAIM;
  116. ip->i_ino = 0;
  117. spin_unlock(&ip->i_flags_lock);
  118. call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
  119. }
  120. /*
  121. * Check the validity of the inode we just found it the cache
  122. */
  123. static int
  124. xfs_iget_cache_hit(
  125. struct xfs_perag *pag,
  126. struct xfs_inode *ip,
  127. xfs_ino_t ino,
  128. int flags,
  129. int lock_flags) __releases(RCU)
  130. {
  131. struct inode *inode = VFS_I(ip);
  132. struct xfs_mount *mp = ip->i_mount;
  133. int error;
  134. /*
  135. * check for re-use of an inode within an RCU grace period due to the
  136. * radix tree nodes not being updated yet. We monitor for this by
  137. * setting the inode number to zero before freeing the inode structure.
  138. * If the inode has been reallocated and set up, then the inode number
  139. * will not match, so check for that, too.
  140. */
  141. spin_lock(&ip->i_flags_lock);
  142. if (ip->i_ino != ino) {
  143. trace_xfs_iget_skip(ip);
  144. XFS_STATS_INC(xs_ig_frecycle);
  145. error = EAGAIN;
  146. goto out_error;
  147. }
  148. /*
  149. * If we are racing with another cache hit that is currently
  150. * instantiating this inode or currently recycling it out of
  151. * reclaimabe state, wait for the initialisation to complete
  152. * before continuing.
  153. *
  154. * XXX(hch): eventually we should do something equivalent to
  155. * wait_on_inode to wait for these flags to be cleared
  156. * instead of polling for it.
  157. */
  158. if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
  159. trace_xfs_iget_skip(ip);
  160. XFS_STATS_INC(xs_ig_frecycle);
  161. error = EAGAIN;
  162. goto out_error;
  163. }
  164. /*
  165. * If lookup is racing with unlink return an error immediately.
  166. */
  167. if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
  168. error = ENOENT;
  169. goto out_error;
  170. }
  171. /*
  172. * If IRECLAIMABLE is set, we've torn down the VFS inode already.
  173. * Need to carefully get it back into useable state.
  174. */
  175. if (ip->i_flags & XFS_IRECLAIMABLE) {
  176. trace_xfs_iget_reclaim(ip);
  177. /*
  178. * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
  179. * from stomping over us while we recycle the inode. We can't
  180. * clear the radix tree reclaimable tag yet as it requires
  181. * pag_ici_lock to be held exclusive.
  182. */
  183. ip->i_flags |= XFS_IRECLAIM;
  184. spin_unlock(&ip->i_flags_lock);
  185. rcu_read_unlock();
  186. error = -inode_init_always(mp->m_super, inode);
  187. if (error) {
  188. /*
  189. * Re-initializing the inode failed, and we are in deep
  190. * trouble. Try to re-add it to the reclaim list.
  191. */
  192. rcu_read_lock();
  193. spin_lock(&ip->i_flags_lock);
  194. ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
  195. ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
  196. trace_xfs_iget_reclaim_fail(ip);
  197. goto out_error;
  198. }
  199. spin_lock(&pag->pag_ici_lock);
  200. spin_lock(&ip->i_flags_lock);
  201. /*
  202. * Clear the per-lifetime state in the inode as we are now
  203. * effectively a new inode and need to return to the initial
  204. * state before reuse occurs.
  205. */
  206. ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
  207. ip->i_flags |= XFS_INEW;
  208. __xfs_inode_clear_reclaim_tag(mp, pag, ip);
  209. inode->i_state = I_NEW;
  210. ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
  211. mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
  212. spin_unlock(&ip->i_flags_lock);
  213. spin_unlock(&pag->pag_ici_lock);
  214. } else {
  215. /* If the VFS inode is being torn down, pause and try again. */
  216. if (!igrab(inode)) {
  217. trace_xfs_iget_skip(ip);
  218. error = EAGAIN;
  219. goto out_error;
  220. }
  221. /* We've got a live one. */
  222. spin_unlock(&ip->i_flags_lock);
  223. rcu_read_unlock();
  224. trace_xfs_iget_hit(ip);
  225. }
  226. if (lock_flags != 0)
  227. xfs_ilock(ip, lock_flags);
  228. xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
  229. XFS_STATS_INC(xs_ig_found);
  230. return 0;
  231. out_error:
  232. spin_unlock(&ip->i_flags_lock);
  233. rcu_read_unlock();
  234. return error;
  235. }
  236. static int
  237. xfs_iget_cache_miss(
  238. struct xfs_mount *mp,
  239. struct xfs_perag *pag,
  240. xfs_trans_t *tp,
  241. xfs_ino_t ino,
  242. struct xfs_inode **ipp,
  243. int flags,
  244. int lock_flags)
  245. {
  246. struct xfs_inode *ip;
  247. int error;
  248. xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
  249. int iflags;
  250. ip = xfs_inode_alloc(mp, ino);
  251. if (!ip)
  252. return ENOMEM;
  253. error = xfs_iread(mp, tp, ip, flags);
  254. if (error)
  255. goto out_destroy;
  256. trace_xfs_iget_miss(ip);
  257. if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
  258. error = ENOENT;
  259. goto out_destroy;
  260. }
  261. /*
  262. * Preload the radix tree so we can insert safely under the
  263. * write spinlock. Note that we cannot sleep inside the preload
  264. * region. Since we can be called from transaction context, don't
  265. * recurse into the file system.
  266. */
  267. if (radix_tree_preload(GFP_NOFS)) {
  268. error = EAGAIN;
  269. goto out_destroy;
  270. }
  271. /*
  272. * Because the inode hasn't been added to the radix-tree yet it can't
  273. * be found by another thread, so we can do the non-sleeping lock here.
  274. */
  275. if (lock_flags) {
  276. if (!xfs_ilock_nowait(ip, lock_flags))
  277. BUG();
  278. }
  279. /*
  280. * These values must be set before inserting the inode into the radix
  281. * tree as the moment it is inserted a concurrent lookup (allowed by the
  282. * RCU locking mechanism) can find it and that lookup must see that this
  283. * is an inode currently under construction (i.e. that XFS_INEW is set).
  284. * The ip->i_flags_lock that protects the XFS_INEW flag forms the
  285. * memory barrier that ensures this detection works correctly at lookup
  286. * time.
  287. */
  288. iflags = XFS_INEW;
  289. if (flags & XFS_IGET_DONTCACHE)
  290. iflags |= XFS_IDONTCACHE;
  291. ip->i_udquot = ip->i_gdquot = NULL;
  292. xfs_iflags_set(ip, iflags);
  293. /* insert the new inode */
  294. spin_lock(&pag->pag_ici_lock);
  295. error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
  296. if (unlikely(error)) {
  297. WARN_ON(error != -EEXIST);
  298. XFS_STATS_INC(xs_ig_dup);
  299. error = EAGAIN;
  300. goto out_preload_end;
  301. }
  302. spin_unlock(&pag->pag_ici_lock);
  303. radix_tree_preload_end();
  304. *ipp = ip;
  305. return 0;
  306. out_preload_end:
  307. spin_unlock(&pag->pag_ici_lock);
  308. radix_tree_preload_end();
  309. if (lock_flags)
  310. xfs_iunlock(ip, lock_flags);
  311. out_destroy:
  312. __destroy_inode(VFS_I(ip));
  313. xfs_inode_free(ip);
  314. return error;
  315. }
  316. /*
  317. * Look up an inode by number in the given file system.
  318. * The inode is looked up in the cache held in each AG.
  319. * If the inode is found in the cache, initialise the vfs inode
  320. * if necessary.
  321. *
  322. * If it is not in core, read it in from the file system's device,
  323. * add it to the cache and initialise the vfs inode.
  324. *
  325. * The inode is locked according to the value of the lock_flags parameter.
  326. * This flag parameter indicates how and if the inode's IO lock and inode lock
  327. * should be taken.
  328. *
  329. * mp -- the mount point structure for the current file system. It points
  330. * to the inode hash table.
  331. * tp -- a pointer to the current transaction if there is one. This is
  332. * simply passed through to the xfs_iread() call.
  333. * ino -- the number of the inode desired. This is the unique identifier
  334. * within the file system for the inode being requested.
  335. * lock_flags -- flags indicating how to lock the inode. See the comment
  336. * for xfs_ilock() for a list of valid values.
  337. */
  338. int
  339. xfs_iget(
  340. xfs_mount_t *mp,
  341. xfs_trans_t *tp,
  342. xfs_ino_t ino,
  343. uint flags,
  344. uint lock_flags,
  345. xfs_inode_t **ipp)
  346. {
  347. xfs_inode_t *ip;
  348. int error;
  349. xfs_perag_t *pag;
  350. xfs_agino_t agino;
  351. /*
  352. * xfs_reclaim_inode() uses the ILOCK to ensure an inode
  353. * doesn't get freed while it's being referenced during a
  354. * radix tree traversal here. It assumes this function
  355. * aqcuires only the ILOCK (and therefore it has no need to
  356. * involve the IOLOCK in this synchronization).
  357. */
  358. ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
  359. /* reject inode numbers outside existing AGs */
  360. if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
  361. return EINVAL;
  362. /* get the perag structure and ensure that it's inode capable */
  363. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
  364. agino = XFS_INO_TO_AGINO(mp, ino);
  365. again:
  366. error = 0;
  367. rcu_read_lock();
  368. ip = radix_tree_lookup(&pag->pag_ici_root, agino);
  369. if (ip) {
  370. error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
  371. if (error)
  372. goto out_error_or_again;
  373. } else {
  374. rcu_read_unlock();
  375. XFS_STATS_INC(xs_ig_missed);
  376. error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
  377. flags, lock_flags);
  378. if (error)
  379. goto out_error_or_again;
  380. }
  381. xfs_perag_put(pag);
  382. *ipp = ip;
  383. /*
  384. * If we have a real type for an on-disk inode, we can set ops(&unlock)
  385. * now. If it's a new inode being created, xfs_ialloc will handle it.
  386. */
  387. if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
  388. xfs_setup_inode(ip);
  389. return 0;
  390. out_error_or_again:
  391. if (error == EAGAIN) {
  392. delay(1);
  393. goto again;
  394. }
  395. xfs_perag_put(pag);
  396. return error;
  397. }
  398. /*
  399. * This is a wrapper routine around the xfs_ilock() routine
  400. * used to centralize some grungy code. It is used in places
  401. * that wish to lock the inode solely for reading the extents.
  402. * The reason these places can't just call xfs_ilock(SHARED)
  403. * is that the inode lock also guards to bringing in of the
  404. * extents from disk for a file in b-tree format. If the inode
  405. * is in b-tree format, then we need to lock the inode exclusively
  406. * until the extents are read in. Locking it exclusively all
  407. * the time would limit our parallelism unnecessarily, though.
  408. * What we do instead is check to see if the extents have been
  409. * read in yet, and only lock the inode exclusively if they
  410. * have not.
  411. *
  412. * The function returns a value which should be given to the
  413. * corresponding xfs_iunlock_map_shared(). This value is
  414. * the mode in which the lock was actually taken.
  415. */
  416. uint
  417. xfs_ilock_map_shared(
  418. xfs_inode_t *ip)
  419. {
  420. uint lock_mode;
  421. if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
  422. ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
  423. lock_mode = XFS_ILOCK_EXCL;
  424. } else {
  425. lock_mode = XFS_ILOCK_SHARED;
  426. }
  427. xfs_ilock(ip, lock_mode);
  428. return lock_mode;
  429. }
  430. /*
  431. * This is simply the unlock routine to go with xfs_ilock_map_shared().
  432. * All it does is call xfs_iunlock() with the given lock_mode.
  433. */
  434. void
  435. xfs_iunlock_map_shared(
  436. xfs_inode_t *ip,
  437. unsigned int lock_mode)
  438. {
  439. xfs_iunlock(ip, lock_mode);
  440. }
  441. /*
  442. * The xfs inode contains 2 locks: a multi-reader lock called the
  443. * i_iolock and a multi-reader lock called the i_lock. This routine
  444. * allows either or both of the locks to be obtained.
  445. *
  446. * The 2 locks should always be ordered so that the IO lock is
  447. * obtained first in order to prevent deadlock.
  448. *
  449. * ip -- the inode being locked
  450. * lock_flags -- this parameter indicates the inode's locks
  451. * to be locked. It can be:
  452. * XFS_IOLOCK_SHARED,
  453. * XFS_IOLOCK_EXCL,
  454. * XFS_ILOCK_SHARED,
  455. * XFS_ILOCK_EXCL,
  456. * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
  457. * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
  458. * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
  459. * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  460. */
  461. void
  462. xfs_ilock(
  463. xfs_inode_t *ip,
  464. uint lock_flags)
  465. {
  466. /*
  467. * You can't set both SHARED and EXCL for the same lock,
  468. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  469. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  470. */
  471. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  472. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  473. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  474. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  475. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  476. if (lock_flags & XFS_IOLOCK_EXCL)
  477. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  478. else if (lock_flags & XFS_IOLOCK_SHARED)
  479. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  480. if (lock_flags & XFS_ILOCK_EXCL)
  481. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  482. else if (lock_flags & XFS_ILOCK_SHARED)
  483. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  484. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  485. }
  486. /*
  487. * This is just like xfs_ilock(), except that the caller
  488. * is guaranteed not to sleep. It returns 1 if it gets
  489. * the requested locks and 0 otherwise. If the IO lock is
  490. * obtained but the inode lock cannot be, then the IO lock
  491. * is dropped before returning.
  492. *
  493. * ip -- the inode being locked
  494. * lock_flags -- this parameter indicates the inode's locks to be
  495. * to be locked. See the comment for xfs_ilock() for a list
  496. * of valid values.
  497. */
  498. int
  499. xfs_ilock_nowait(
  500. xfs_inode_t *ip,
  501. uint lock_flags)
  502. {
  503. /*
  504. * You can't set both SHARED and EXCL for the same lock,
  505. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  506. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  507. */
  508. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  509. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  510. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  511. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  512. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  513. if (lock_flags & XFS_IOLOCK_EXCL) {
  514. if (!mrtryupdate(&ip->i_iolock))
  515. goto out;
  516. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  517. if (!mrtryaccess(&ip->i_iolock))
  518. goto out;
  519. }
  520. if (lock_flags & XFS_ILOCK_EXCL) {
  521. if (!mrtryupdate(&ip->i_lock))
  522. goto out_undo_iolock;
  523. } else if (lock_flags & XFS_ILOCK_SHARED) {
  524. if (!mrtryaccess(&ip->i_lock))
  525. goto out_undo_iolock;
  526. }
  527. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  528. return 1;
  529. out_undo_iolock:
  530. if (lock_flags & XFS_IOLOCK_EXCL)
  531. mrunlock_excl(&ip->i_iolock);
  532. else if (lock_flags & XFS_IOLOCK_SHARED)
  533. mrunlock_shared(&ip->i_iolock);
  534. out:
  535. return 0;
  536. }
  537. /*
  538. * xfs_iunlock() is used to drop the inode locks acquired with
  539. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  540. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  541. * that we know which locks to drop.
  542. *
  543. * ip -- the inode being unlocked
  544. * lock_flags -- this parameter indicates the inode's locks to be
  545. * to be unlocked. See the comment for xfs_ilock() for a list
  546. * of valid values for this parameter.
  547. *
  548. */
  549. void
  550. xfs_iunlock(
  551. xfs_inode_t *ip,
  552. uint lock_flags)
  553. {
  554. /*
  555. * You can't set both SHARED and EXCL for the same lock,
  556. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  557. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  558. */
  559. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  560. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  561. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  562. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  563. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  564. ASSERT(lock_flags != 0);
  565. if (lock_flags & XFS_IOLOCK_EXCL)
  566. mrunlock_excl(&ip->i_iolock);
  567. else if (lock_flags & XFS_IOLOCK_SHARED)
  568. mrunlock_shared(&ip->i_iolock);
  569. if (lock_flags & XFS_ILOCK_EXCL)
  570. mrunlock_excl(&ip->i_lock);
  571. else if (lock_flags & XFS_ILOCK_SHARED)
  572. mrunlock_shared(&ip->i_lock);
  573. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  574. }
  575. /*
  576. * give up write locks. the i/o lock cannot be held nested
  577. * if it is being demoted.
  578. */
  579. void
  580. xfs_ilock_demote(
  581. xfs_inode_t *ip,
  582. uint lock_flags)
  583. {
  584. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
  585. ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  586. if (lock_flags & XFS_ILOCK_EXCL)
  587. mrdemote(&ip->i_lock);
  588. if (lock_flags & XFS_IOLOCK_EXCL)
  589. mrdemote(&ip->i_iolock);
  590. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  591. }
  592. #ifdef DEBUG
  593. int
  594. xfs_isilocked(
  595. xfs_inode_t *ip,
  596. uint lock_flags)
  597. {
  598. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  599. if (!(lock_flags & XFS_ILOCK_SHARED))
  600. return !!ip->i_lock.mr_writer;
  601. return rwsem_is_locked(&ip->i_lock.mr_lock);
  602. }
  603. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  604. if (!(lock_flags & XFS_IOLOCK_SHARED))
  605. return !!ip->i_iolock.mr_writer;
  606. return rwsem_is_locked(&ip->i_iolock.mr_lock);
  607. }
  608. ASSERT(0);
  609. return 0;
  610. }
  611. #endif
  612. void
  613. __xfs_iflock(
  614. struct xfs_inode *ip)
  615. {
  616. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  617. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  618. do {
  619. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  620. if (xfs_isiflocked(ip))
  621. io_schedule();
  622. } while (!xfs_iflock_nowait(ip));
  623. finish_wait(wq, &wait.wait);
  624. }