xfs_iget.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir2.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_mount.h"
  30. #include "xfs_bmap_btree.h"
  31. #include "xfs_alloc_btree.h"
  32. #include "xfs_ialloc_btree.h"
  33. #include "xfs_dir2_sf.h"
  34. #include "xfs_attr_sf.h"
  35. #include "xfs_dinode.h"
  36. #include "xfs_inode.h"
  37. #include "xfs_btree.h"
  38. #include "xfs_ialloc.h"
  39. #include "xfs_quota.h"
  40. #include "xfs_utils.h"
  41. /*
  42. * Check the validity of the inode we just found it the cache
  43. */
  44. static int
  45. xfs_iget_cache_hit(
  46. struct inode *inode,
  47. struct xfs_perag *pag,
  48. struct xfs_inode *ip,
  49. int flags,
  50. int lock_flags) __releases(pag->pag_ici_lock)
  51. {
  52. struct xfs_mount *mp = ip->i_mount;
  53. struct inode *old_inode;
  54. int error = 0;
  55. /*
  56. * If INEW is set this inode is being set up
  57. * Pause and try again.
  58. */
  59. if (xfs_iflags_test(ip, XFS_INEW)) {
  60. error = EAGAIN;
  61. XFS_STATS_INC(xs_ig_frecycle);
  62. goto out_error;
  63. }
  64. old_inode = ip->i_vnode;
  65. if (old_inode == NULL) {
  66. /*
  67. * If IRECLAIM is set this inode is
  68. * on its way out of the system,
  69. * we need to pause and try again.
  70. */
  71. if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
  72. error = EAGAIN;
  73. XFS_STATS_INC(xs_ig_frecycle);
  74. goto out_error;
  75. }
  76. ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE));
  77. /*
  78. * If lookup is racing with unlink, then we
  79. * should return an error immediately so we
  80. * don't remove it from the reclaim list and
  81. * potentially leak the inode.
  82. */
  83. if ((ip->i_d.di_mode == 0) &&
  84. !(flags & XFS_IGET_CREATE)) {
  85. error = ENOENT;
  86. goto out_error;
  87. }
  88. xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
  89. xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
  90. read_unlock(&pag->pag_ici_lock);
  91. XFS_MOUNT_ILOCK(mp);
  92. list_del_init(&ip->i_reclaim);
  93. XFS_MOUNT_IUNLOCK(mp);
  94. } else if (inode != old_inode) {
  95. /* The inode is being torn down, pause and
  96. * try again.
  97. */
  98. if (old_inode->i_state & (I_FREEING | I_CLEAR)) {
  99. error = EAGAIN;
  100. XFS_STATS_INC(xs_ig_frecycle);
  101. goto out_error;
  102. }
  103. /* Chances are the other vnode (the one in the inode) is being torn
  104. * down right now, and we landed on top of it. Question is, what do
  105. * we do? Unhook the old inode and hook up the new one?
  106. */
  107. cmn_err(CE_PANIC,
  108. "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
  109. old_inode, inode);
  110. } else {
  111. read_unlock(&pag->pag_ici_lock);
  112. }
  113. if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
  114. error = ENOENT;
  115. goto out;
  116. }
  117. if (lock_flags != 0)
  118. xfs_ilock(ip, lock_flags);
  119. xfs_iflags_clear(ip, XFS_ISTALE);
  120. xfs_itrace_exit_tag(ip, "xfs_iget.found");
  121. XFS_STATS_INC(xs_ig_found);
  122. return 0;
  123. out_error:
  124. read_unlock(&pag->pag_ici_lock);
  125. out:
  126. return error;
  127. }
  128. static int
  129. xfs_iget_cache_miss(
  130. struct xfs_mount *mp,
  131. struct xfs_perag *pag,
  132. xfs_trans_t *tp,
  133. xfs_ino_t ino,
  134. struct xfs_inode **ipp,
  135. xfs_daddr_t bno,
  136. int flags,
  137. int lock_flags) __releases(pag->pag_ici_lock)
  138. {
  139. struct xfs_inode *ip;
  140. int error;
  141. unsigned long first_index, mask;
  142. xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
  143. /*
  144. * Read the disk inode attributes into a new inode structure and get
  145. * a new vnode for it. This should also initialize i_ino and i_mount.
  146. */
  147. error = xfs_iread(mp, tp, ino, &ip, bno,
  148. (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
  149. if (error)
  150. return error;
  151. xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
  152. if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
  153. error = ENOENT;
  154. goto out_destroy;
  155. }
  156. /*
  157. * Preload the radix tree so we can insert safely under the
  158. * write spinlock.
  159. */
  160. if (radix_tree_preload(GFP_KERNEL)) {
  161. error = EAGAIN;
  162. goto out_destroy;
  163. }
  164. if (lock_flags)
  165. xfs_ilock(ip, lock_flags);
  166. mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
  167. first_index = agino & mask;
  168. write_lock(&pag->pag_ici_lock);
  169. /* insert the new inode */
  170. error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
  171. if (unlikely(error)) {
  172. WARN_ON(error != -EEXIST);
  173. XFS_STATS_INC(xs_ig_dup);
  174. error = EAGAIN;
  175. goto out_unlock;
  176. }
  177. /* These values _must_ be set before releasing the radix tree lock! */
  178. ip->i_udquot = ip->i_gdquot = NULL;
  179. xfs_iflags_set(ip, XFS_INEW);
  180. write_unlock(&pag->pag_ici_lock);
  181. radix_tree_preload_end();
  182. *ipp = ip;
  183. return 0;
  184. out_unlock:
  185. write_unlock(&pag->pag_ici_lock);
  186. radix_tree_preload_end();
  187. out_destroy:
  188. xfs_idestroy(ip);
  189. return error;
  190. }
  191. /*
  192. * Look up an inode by number in the given file system.
  193. * The inode is looked up in the cache held in each AG.
  194. * If the inode is found in the cache, attach it to the provided
  195. * vnode.
  196. *
  197. * If it is not in core, read it in from the file system's device,
  198. * add it to the cache and attach the provided vnode.
  199. *
  200. * The inode is locked according to the value of the lock_flags parameter.
  201. * This flag parameter indicates how and if the inode's IO lock and inode lock
  202. * should be taken.
  203. *
  204. * mp -- the mount point structure for the current file system. It points
  205. * to the inode hash table.
  206. * tp -- a pointer to the current transaction if there is one. This is
  207. * simply passed through to the xfs_iread() call.
  208. * ino -- the number of the inode desired. This is the unique identifier
  209. * within the file system for the inode being requested.
  210. * lock_flags -- flags indicating how to lock the inode. See the comment
  211. * for xfs_ilock() for a list of valid values.
  212. * bno -- the block number starting the buffer containing the inode,
  213. * if known (as by bulkstat), else 0.
  214. */
  215. STATIC int
  216. xfs_iget_core(
  217. struct inode *inode,
  218. xfs_mount_t *mp,
  219. xfs_trans_t *tp,
  220. xfs_ino_t ino,
  221. uint flags,
  222. uint lock_flags,
  223. xfs_inode_t **ipp,
  224. xfs_daddr_t bno)
  225. {
  226. xfs_inode_t *ip;
  227. int error;
  228. xfs_perag_t *pag;
  229. xfs_agino_t agino;
  230. /* the radix tree exists only in inode capable AGs */
  231. if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
  232. return EINVAL;
  233. /* get the perag structure and ensure that it's inode capable */
  234. pag = xfs_get_perag(mp, ino);
  235. if (!pag->pagi_inodeok)
  236. return EINVAL;
  237. ASSERT(pag->pag_ici_init);
  238. agino = XFS_INO_TO_AGINO(mp, ino);
  239. again:
  240. error = 0;
  241. read_lock(&pag->pag_ici_lock);
  242. ip = radix_tree_lookup(&pag->pag_ici_root, agino);
  243. if (ip) {
  244. error = xfs_iget_cache_hit(inode, pag, ip, flags, lock_flags);
  245. if (error)
  246. goto out_error_or_again;
  247. } else {
  248. read_unlock(&pag->pag_ici_lock);
  249. XFS_STATS_INC(xs_ig_missed);
  250. error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
  251. flags, lock_flags);
  252. if (error)
  253. goto out_error_or_again;
  254. }
  255. xfs_put_perag(mp, pag);
  256. ASSERT(ip->i_df.if_ext_max ==
  257. XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
  258. xfs_iflags_set(ip, XFS_IMODIFIED);
  259. *ipp = ip;
  260. /*
  261. * Set up the Linux with the Linux inode.
  262. */
  263. ip->i_vnode = inode;
  264. inode->i_private = ip;
  265. /*
  266. * If we have a real type for an on-disk inode, we can set ops(&unlock)
  267. * now. If it's a new inode being created, xfs_ialloc will handle it.
  268. */
  269. if (ip->i_d.di_mode != 0)
  270. xfs_setup_inode(ip);
  271. return 0;
  272. out_error_or_again:
  273. if (error == EAGAIN) {
  274. delay(1);
  275. goto again;
  276. }
  277. xfs_put_perag(mp, pag);
  278. return error;
  279. }
  280. /*
  281. * The 'normal' internal xfs_iget, if needed it will
  282. * 'allocate', or 'get', the vnode.
  283. */
  284. int
  285. xfs_iget(
  286. xfs_mount_t *mp,
  287. xfs_trans_t *tp,
  288. xfs_ino_t ino,
  289. uint flags,
  290. uint lock_flags,
  291. xfs_inode_t **ipp,
  292. xfs_daddr_t bno)
  293. {
  294. struct inode *inode;
  295. xfs_inode_t *ip;
  296. int error;
  297. XFS_STATS_INC(xs_ig_attempts);
  298. retry:
  299. inode = iget_locked(mp->m_super, ino);
  300. if (!inode)
  301. /* If we got no inode we are out of memory */
  302. return ENOMEM;
  303. if (inode->i_state & I_NEW) {
  304. XFS_STATS_INC(vn_active);
  305. XFS_STATS_INC(vn_alloc);
  306. error = xfs_iget_core(inode, mp, tp, ino, flags,
  307. lock_flags, ipp, bno);
  308. if (error) {
  309. make_bad_inode(inode);
  310. if (inode->i_state & I_NEW)
  311. unlock_new_inode(inode);
  312. iput(inode);
  313. }
  314. return error;
  315. }
  316. /*
  317. * If the inode is not fully constructed due to
  318. * filehandle mismatches wait for the inode to go
  319. * away and try again.
  320. *
  321. * iget_locked will call __wait_on_freeing_inode
  322. * to wait for the inode to go away.
  323. */
  324. if (is_bad_inode(inode)) {
  325. iput(inode);
  326. delay(1);
  327. goto retry;
  328. }
  329. ip = XFS_I(inode);
  330. if (!ip) {
  331. iput(inode);
  332. delay(1);
  333. goto retry;
  334. }
  335. if (lock_flags != 0)
  336. xfs_ilock(ip, lock_flags);
  337. XFS_STATS_INC(xs_ig_found);
  338. *ipp = ip;
  339. return 0;
  340. }
  341. /*
  342. * Look for the inode corresponding to the given ino in the hash table.
  343. * If it is there and its i_transp pointer matches tp, return it.
  344. * Otherwise, return NULL.
  345. */
  346. xfs_inode_t *
  347. xfs_inode_incore(xfs_mount_t *mp,
  348. xfs_ino_t ino,
  349. xfs_trans_t *tp)
  350. {
  351. xfs_inode_t *ip;
  352. xfs_perag_t *pag;
  353. pag = xfs_get_perag(mp, ino);
  354. read_lock(&pag->pag_ici_lock);
  355. ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
  356. read_unlock(&pag->pag_ici_lock);
  357. xfs_put_perag(mp, pag);
  358. /* the returned inode must match the transaction */
  359. if (ip && (ip->i_transp != tp))
  360. return NULL;
  361. return ip;
  362. }
  363. /*
  364. * Decrement reference count of an inode structure and unlock it.
  365. *
  366. * ip -- the inode being released
  367. * lock_flags -- this parameter indicates the inode's locks to be
  368. * to be released. See the comment on xfs_iunlock() for a list
  369. * of valid values.
  370. */
  371. void
  372. xfs_iput(xfs_inode_t *ip,
  373. uint lock_flags)
  374. {
  375. xfs_itrace_entry(ip);
  376. xfs_iunlock(ip, lock_flags);
  377. IRELE(ip);
  378. }
  379. /*
  380. * Special iput for brand-new inodes that are still locked
  381. */
  382. void
  383. xfs_iput_new(
  384. xfs_inode_t *ip,
  385. uint lock_flags)
  386. {
  387. struct inode *inode = VFS_I(ip);
  388. xfs_itrace_entry(ip);
  389. if ((ip->i_d.di_mode == 0)) {
  390. ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
  391. make_bad_inode(inode);
  392. }
  393. if (inode->i_state & I_NEW)
  394. unlock_new_inode(inode);
  395. if (lock_flags)
  396. xfs_iunlock(ip, lock_flags);
  397. IRELE(ip);
  398. }
  399. /*
  400. * This routine embodies the part of the reclaim code that pulls
  401. * the inode from the inode hash table and the mount structure's
  402. * inode list.
  403. * This should only be called from xfs_reclaim().
  404. */
  405. void
  406. xfs_ireclaim(xfs_inode_t *ip)
  407. {
  408. /*
  409. * Remove from old hash list and mount list.
  410. */
  411. XFS_STATS_INC(xs_ig_reclaims);
  412. xfs_iextract(ip);
  413. /*
  414. * Here we do a spurious inode lock in order to coordinate with inode
  415. * cache radix tree lookups. This is because the lookup can reference
  416. * the inodes in the cache without taking references. We make that OK
  417. * here by ensuring that we wait until the inode is unlocked after the
  418. * lookup before we go ahead and free it. We get both the ilock and
  419. * the iolock because the code may need to drop the ilock one but will
  420. * still hold the iolock.
  421. */
  422. xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
  423. /*
  424. * Release dquots (and their references) if any. An inode may escape
  425. * xfs_inactive and get here via vn_alloc->vn_reclaim path.
  426. */
  427. XFS_QM_DQDETACH(ip->i_mount, ip);
  428. /*
  429. * Pull our behavior descriptor from the vnode chain.
  430. */
  431. if (ip->i_vnode) {
  432. ip->i_vnode->i_private = NULL;
  433. ip->i_vnode = NULL;
  434. }
  435. /*
  436. * Free all memory associated with the inode.
  437. */
  438. xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
  439. xfs_idestroy(ip);
  440. }
  441. /*
  442. * This routine removes an about-to-be-destroyed inode from
  443. * all of the lists in which it is located with the exception
  444. * of the behavior chain.
  445. */
  446. void
  447. xfs_iextract(
  448. xfs_inode_t *ip)
  449. {
  450. xfs_mount_t *mp = ip->i_mount;
  451. xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
  452. write_lock(&pag->pag_ici_lock);
  453. radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
  454. write_unlock(&pag->pag_ici_lock);
  455. xfs_put_perag(mp, pag);
  456. /* Deal with the deleted inodes list */
  457. XFS_MOUNT_ILOCK(mp);
  458. list_del_init(&ip->i_reclaim);
  459. mp->m_ireclaims++;
  460. XFS_MOUNT_IUNLOCK(mp);
  461. }
  462. /*
  463. * This is a wrapper routine around the xfs_ilock() routine
  464. * used to centralize some grungy code. It is used in places
  465. * that wish to lock the inode solely for reading the extents.
  466. * The reason these places can't just call xfs_ilock(SHARED)
  467. * is that the inode lock also guards to bringing in of the
  468. * extents from disk for a file in b-tree format. If the inode
  469. * is in b-tree format, then we need to lock the inode exclusively
  470. * until the extents are read in. Locking it exclusively all
  471. * the time would limit our parallelism unnecessarily, though.
  472. * What we do instead is check to see if the extents have been
  473. * read in yet, and only lock the inode exclusively if they
  474. * have not.
  475. *
  476. * The function returns a value which should be given to the
  477. * corresponding xfs_iunlock_map_shared(). This value is
  478. * the mode in which the lock was actually taken.
  479. */
  480. uint
  481. xfs_ilock_map_shared(
  482. xfs_inode_t *ip)
  483. {
  484. uint lock_mode;
  485. if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
  486. ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
  487. lock_mode = XFS_ILOCK_EXCL;
  488. } else {
  489. lock_mode = XFS_ILOCK_SHARED;
  490. }
  491. xfs_ilock(ip, lock_mode);
  492. return lock_mode;
  493. }
  494. /*
  495. * This is simply the unlock routine to go with xfs_ilock_map_shared().
  496. * All it does is call xfs_iunlock() with the given lock_mode.
  497. */
  498. void
  499. xfs_iunlock_map_shared(
  500. xfs_inode_t *ip,
  501. unsigned int lock_mode)
  502. {
  503. xfs_iunlock(ip, lock_mode);
  504. }
  505. /*
  506. * The xfs inode contains 2 locks: a multi-reader lock called the
  507. * i_iolock and a multi-reader lock called the i_lock. This routine
  508. * allows either or both of the locks to be obtained.
  509. *
  510. * The 2 locks should always be ordered so that the IO lock is
  511. * obtained first in order to prevent deadlock.
  512. *
  513. * ip -- the inode being locked
  514. * lock_flags -- this parameter indicates the inode's locks
  515. * to be locked. It can be:
  516. * XFS_IOLOCK_SHARED,
  517. * XFS_IOLOCK_EXCL,
  518. * XFS_ILOCK_SHARED,
  519. * XFS_ILOCK_EXCL,
  520. * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
  521. * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
  522. * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
  523. * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  524. */
  525. void
  526. xfs_ilock(
  527. xfs_inode_t *ip,
  528. uint lock_flags)
  529. {
  530. /*
  531. * You can't set both SHARED and EXCL for the same lock,
  532. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  533. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  534. */
  535. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  536. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  537. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  538. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  539. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  540. if (lock_flags & XFS_IOLOCK_EXCL)
  541. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  542. else if (lock_flags & XFS_IOLOCK_SHARED)
  543. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  544. if (lock_flags & XFS_ILOCK_EXCL)
  545. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  546. else if (lock_flags & XFS_ILOCK_SHARED)
  547. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  548. xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
  549. }
  550. /*
  551. * This is just like xfs_ilock(), except that the caller
  552. * is guaranteed not to sleep. It returns 1 if it gets
  553. * the requested locks and 0 otherwise. If the IO lock is
  554. * obtained but the inode lock cannot be, then the IO lock
  555. * is dropped before returning.
  556. *
  557. * ip -- the inode being locked
  558. * lock_flags -- this parameter indicates the inode's locks to be
  559. * to be locked. See the comment for xfs_ilock() for a list
  560. * of valid values.
  561. */
  562. int
  563. xfs_ilock_nowait(
  564. xfs_inode_t *ip,
  565. uint lock_flags)
  566. {
  567. /*
  568. * You can't set both SHARED and EXCL for the same lock,
  569. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  570. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  571. */
  572. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  573. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  574. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  575. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  576. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  577. if (lock_flags & XFS_IOLOCK_EXCL) {
  578. if (!mrtryupdate(&ip->i_iolock))
  579. goto out;
  580. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  581. if (!mrtryaccess(&ip->i_iolock))
  582. goto out;
  583. }
  584. if (lock_flags & XFS_ILOCK_EXCL) {
  585. if (!mrtryupdate(&ip->i_lock))
  586. goto out_undo_iolock;
  587. } else if (lock_flags & XFS_ILOCK_SHARED) {
  588. if (!mrtryaccess(&ip->i_lock))
  589. goto out_undo_iolock;
  590. }
  591. xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
  592. return 1;
  593. out_undo_iolock:
  594. if (lock_flags & XFS_IOLOCK_EXCL)
  595. mrunlock_excl(&ip->i_iolock);
  596. else if (lock_flags & XFS_IOLOCK_SHARED)
  597. mrunlock_shared(&ip->i_iolock);
  598. out:
  599. return 0;
  600. }
  601. /*
  602. * xfs_iunlock() is used to drop the inode locks acquired with
  603. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  604. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  605. * that we know which locks to drop.
  606. *
  607. * ip -- the inode being unlocked
  608. * lock_flags -- this parameter indicates the inode's locks to be
  609. * to be unlocked. See the comment for xfs_ilock() for a list
  610. * of valid values for this parameter.
  611. *
  612. */
  613. void
  614. xfs_iunlock(
  615. xfs_inode_t *ip,
  616. uint lock_flags)
  617. {
  618. /*
  619. * You can't set both SHARED and EXCL for the same lock,
  620. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  621. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  622. */
  623. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  624. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  625. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  626. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  627. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
  628. XFS_LOCK_DEP_MASK)) == 0);
  629. ASSERT(lock_flags != 0);
  630. if (lock_flags & XFS_IOLOCK_EXCL)
  631. mrunlock_excl(&ip->i_iolock);
  632. else if (lock_flags & XFS_IOLOCK_SHARED)
  633. mrunlock_shared(&ip->i_iolock);
  634. if (lock_flags & XFS_ILOCK_EXCL)
  635. mrunlock_excl(&ip->i_lock);
  636. else if (lock_flags & XFS_ILOCK_SHARED)
  637. mrunlock_shared(&ip->i_lock);
  638. if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
  639. !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
  640. /*
  641. * Let the AIL know that this item has been unlocked in case
  642. * it is in the AIL and anyone is waiting on it. Don't do
  643. * this if the caller has asked us not to.
  644. */
  645. xfs_trans_unlocked_item(ip->i_mount,
  646. (xfs_log_item_t*)(ip->i_itemp));
  647. }
  648. xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
  649. }
  650. /*
  651. * give up write locks. the i/o lock cannot be held nested
  652. * if it is being demoted.
  653. */
  654. void
  655. xfs_ilock_demote(
  656. xfs_inode_t *ip,
  657. uint lock_flags)
  658. {
  659. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
  660. ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  661. if (lock_flags & XFS_ILOCK_EXCL)
  662. mrdemote(&ip->i_lock);
  663. if (lock_flags & XFS_IOLOCK_EXCL)
  664. mrdemote(&ip->i_iolock);
  665. }
  666. #ifdef DEBUG
  667. /*
  668. * Debug-only routine, without additional rw_semaphore APIs, we can
  669. * now only answer requests regarding whether we hold the lock for write
  670. * (reader state is outside our visibility, we only track writer state).
  671. *
  672. * Note: this means !xfs_isilocked would give false positives, so don't do that.
  673. */
  674. int
  675. xfs_isilocked(
  676. xfs_inode_t *ip,
  677. uint lock_flags)
  678. {
  679. if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
  680. XFS_ILOCK_EXCL) {
  681. if (!ip->i_lock.mr_writer)
  682. return 0;
  683. }
  684. if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
  685. XFS_IOLOCK_EXCL) {
  686. if (!ip->i_iolock.mr_writer)
  687. return 0;
  688. }
  689. return 1;
  690. }
  691. #endif