xfs_sync.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir2.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_mount.h"
  30. #include "xfs_bmap_btree.h"
  31. #include "xfs_alloc_btree.h"
  32. #include "xfs_ialloc_btree.h"
  33. #include "xfs_btree.h"
  34. #include "xfs_dir2_sf.h"
  35. #include "xfs_attr_sf.h"
  36. #include "xfs_inode.h"
  37. #include "xfs_dinode.h"
  38. #include "xfs_error.h"
  39. #include "xfs_mru_cache.h"
  40. #include "xfs_filestream.h"
  41. #include "xfs_vnodeops.h"
  42. #include "xfs_utils.h"
  43. #include "xfs_buf_item.h"
  44. #include "xfs_inode_item.h"
  45. #include "xfs_rw.h"
  46. #include "xfs_quota.h"
  47. #include "xfs_trace.h"
  48. #include <linux/kthread.h>
  49. #include <linux/freezer.h>
  50. STATIC xfs_inode_t *
  51. xfs_inode_ag_lookup(
  52. struct xfs_mount *mp,
  53. struct xfs_perag *pag,
  54. uint32_t *first_index,
  55. int tag)
  56. {
  57. int nr_found;
  58. struct xfs_inode *ip;
  59. /*
  60. * use a gang lookup to find the next inode in the tree
  61. * as the tree is sparse and a gang lookup walks to find
  62. * the number of objects requested.
  63. */
  64. if (tag == XFS_ICI_NO_TAG) {
  65. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
  66. (void **)&ip, *first_index, 1);
  67. } else {
  68. nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
  69. (void **)&ip, *first_index, 1, tag);
  70. }
  71. if (!nr_found)
  72. return NULL;
  73. /*
  74. * Update the index for the next lookup. Catch overflows
  75. * into the next AG range which can occur if we have inodes
  76. * in the last block of the AG and we are currently
  77. * pointing to the last inode.
  78. */
  79. *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
  80. if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
  81. return NULL;
  82. return ip;
  83. }
  84. STATIC int
  85. xfs_inode_ag_walk(
  86. struct xfs_mount *mp,
  87. xfs_agnumber_t ag,
  88. int (*execute)(struct xfs_inode *ip,
  89. struct xfs_perag *pag, int flags),
  90. int flags,
  91. int tag,
  92. int exclusive)
  93. {
  94. struct xfs_perag *pag = &mp->m_perag[ag];
  95. uint32_t first_index;
  96. int last_error = 0;
  97. int skipped;
  98. restart:
  99. skipped = 0;
  100. first_index = 0;
  101. do {
  102. int error = 0;
  103. xfs_inode_t *ip;
  104. if (exclusive)
  105. write_lock(&pag->pag_ici_lock);
  106. else
  107. read_lock(&pag->pag_ici_lock);
  108. ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
  109. if (!ip) {
  110. if (exclusive)
  111. write_unlock(&pag->pag_ici_lock);
  112. else
  113. read_unlock(&pag->pag_ici_lock);
  114. break;
  115. }
  116. /* execute releases pag->pag_ici_lock */
  117. error = execute(ip, pag, flags);
  118. if (error == EAGAIN) {
  119. skipped++;
  120. continue;
  121. }
  122. if (error)
  123. last_error = error;
  124. /* bail out if the filesystem is corrupted. */
  125. if (error == EFSCORRUPTED)
  126. break;
  127. } while (1);
  128. if (skipped) {
  129. delay(1);
  130. goto restart;
  131. }
  132. xfs_put_perag(mp, pag);
  133. return last_error;
  134. }
  135. int
  136. xfs_inode_ag_iterator(
  137. struct xfs_mount *mp,
  138. int (*execute)(struct xfs_inode *ip,
  139. struct xfs_perag *pag, int flags),
  140. int flags,
  141. int tag,
  142. int exclusive)
  143. {
  144. int error = 0;
  145. int last_error = 0;
  146. xfs_agnumber_t ag;
  147. for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
  148. if (!mp->m_perag[ag].pag_ici_init)
  149. continue;
  150. error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
  151. exclusive);
  152. if (error) {
  153. last_error = error;
  154. if (error == EFSCORRUPTED)
  155. break;
  156. }
  157. }
  158. return XFS_ERROR(last_error);
  159. }
  160. /* must be called with pag_ici_lock held and releases it */
  161. int
  162. xfs_sync_inode_valid(
  163. struct xfs_inode *ip,
  164. struct xfs_perag *pag)
  165. {
  166. struct inode *inode = VFS_I(ip);
  167. /* nothing to sync during shutdown */
  168. if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  169. read_unlock(&pag->pag_ici_lock);
  170. return EFSCORRUPTED;
  171. }
  172. /* If we can't get a reference on the inode, it must be in reclaim. */
  173. if (!igrab(inode)) {
  174. read_unlock(&pag->pag_ici_lock);
  175. return ENOENT;
  176. }
  177. read_unlock(&pag->pag_ici_lock);
  178. if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
  179. IRELE(ip);
  180. return ENOENT;
  181. }
  182. return 0;
  183. }
  184. STATIC int
  185. xfs_sync_inode_data(
  186. struct xfs_inode *ip,
  187. struct xfs_perag *pag,
  188. int flags)
  189. {
  190. struct inode *inode = VFS_I(ip);
  191. struct address_space *mapping = inode->i_mapping;
  192. int error = 0;
  193. error = xfs_sync_inode_valid(ip, pag);
  194. if (error)
  195. return error;
  196. if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
  197. goto out_wait;
  198. if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
  199. if (flags & SYNC_TRYLOCK)
  200. goto out_wait;
  201. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  202. }
  203. error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
  204. 0 : XFS_B_ASYNC, FI_NONE);
  205. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  206. out_wait:
  207. if (flags & SYNC_WAIT)
  208. xfs_ioend_wait(ip);
  209. IRELE(ip);
  210. return error;
  211. }
  212. STATIC int
  213. xfs_sync_inode_attr(
  214. struct xfs_inode *ip,
  215. struct xfs_perag *pag,
  216. int flags)
  217. {
  218. int error = 0;
  219. error = xfs_sync_inode_valid(ip, pag);
  220. if (error)
  221. return error;
  222. xfs_ilock(ip, XFS_ILOCK_SHARED);
  223. if (xfs_inode_clean(ip))
  224. goto out_unlock;
  225. if (!xfs_iflock_nowait(ip)) {
  226. if (!(flags & SYNC_WAIT))
  227. goto out_unlock;
  228. xfs_iflock(ip);
  229. }
  230. if (xfs_inode_clean(ip)) {
  231. xfs_ifunlock(ip);
  232. goto out_unlock;
  233. }
  234. error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
  235. XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
  236. out_unlock:
  237. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  238. IRELE(ip);
  239. return error;
  240. }
  241. /*
  242. * Write out pagecache data for the whole filesystem.
  243. */
  244. int
  245. xfs_sync_data(
  246. struct xfs_mount *mp,
  247. int flags)
  248. {
  249. int error;
  250. ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
  251. error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
  252. XFS_ICI_NO_TAG, 0);
  253. if (error)
  254. return XFS_ERROR(error);
  255. xfs_log_force(mp, 0,
  256. (flags & SYNC_WAIT) ?
  257. XFS_LOG_FORCE | XFS_LOG_SYNC :
  258. XFS_LOG_FORCE);
  259. return 0;
  260. }
  261. /*
  262. * Write out inode metadata (attributes) for the whole filesystem.
  263. */
  264. int
  265. xfs_sync_attr(
  266. struct xfs_mount *mp,
  267. int flags)
  268. {
  269. ASSERT((flags & ~SYNC_WAIT) == 0);
  270. return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
  271. XFS_ICI_NO_TAG, 0);
  272. }
  273. STATIC int
  274. xfs_commit_dummy_trans(
  275. struct xfs_mount *mp,
  276. uint flags)
  277. {
  278. struct xfs_inode *ip = mp->m_rootip;
  279. struct xfs_trans *tp;
  280. int error;
  281. int log_flags = XFS_LOG_FORCE;
  282. if (flags & SYNC_WAIT)
  283. log_flags |= XFS_LOG_SYNC;
  284. /*
  285. * Put a dummy transaction in the log to tell recovery
  286. * that all others are OK.
  287. */
  288. tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
  289. error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
  290. if (error) {
  291. xfs_trans_cancel(tp, 0);
  292. return error;
  293. }
  294. xfs_ilock(ip, XFS_ILOCK_EXCL);
  295. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  296. xfs_trans_ihold(tp, ip);
  297. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  298. error = xfs_trans_commit(tp, 0);
  299. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  300. /* the log force ensures this transaction is pushed to disk */
  301. xfs_log_force(mp, 0, log_flags);
  302. return error;
  303. }
  304. int
  305. xfs_sync_fsdata(
  306. struct xfs_mount *mp,
  307. int flags)
  308. {
  309. struct xfs_buf *bp;
  310. struct xfs_buf_log_item *bip;
  311. int error = 0;
  312. /*
  313. * If this is xfssyncd() then only sync the superblock if we can
  314. * lock it without sleeping and it is not pinned.
  315. */
  316. if (flags & SYNC_TRYLOCK) {
  317. ASSERT(!(flags & SYNC_WAIT));
  318. bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
  319. if (!bp)
  320. goto out;
  321. bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
  322. if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
  323. goto out_brelse;
  324. } else {
  325. bp = xfs_getsb(mp, 0);
  326. /*
  327. * If the buffer is pinned then push on the log so we won't
  328. * get stuck waiting in the write for someone, maybe
  329. * ourselves, to flush the log.
  330. *
  331. * Even though we just pushed the log above, we did not have
  332. * the superblock buffer locked at that point so it can
  333. * become pinned in between there and here.
  334. */
  335. if (XFS_BUF_ISPINNED(bp))
  336. xfs_log_force(mp, 0, XFS_LOG_FORCE);
  337. }
  338. if (flags & SYNC_WAIT)
  339. XFS_BUF_UNASYNC(bp);
  340. else
  341. XFS_BUF_ASYNC(bp);
  342. error = xfs_bwrite(mp, bp);
  343. if (error)
  344. return error;
  345. /*
  346. * If this is a data integrity sync make sure all pending buffers
  347. * are flushed out for the log coverage check below.
  348. */
  349. if (flags & SYNC_WAIT)
  350. xfs_flush_buftarg(mp->m_ddev_targp, 1);
  351. if (xfs_log_need_covered(mp))
  352. error = xfs_commit_dummy_trans(mp, flags);
  353. return error;
  354. out_brelse:
  355. xfs_buf_relse(bp);
  356. out:
  357. return error;
  358. }
  359. /*
  360. * When remounting a filesystem read-only or freezing the filesystem, we have
  361. * two phases to execute. This first phase is syncing the data before we
  362. * quiesce the filesystem, and the second is flushing all the inodes out after
  363. * we've waited for all the transactions created by the first phase to
  364. * complete. The second phase ensures that the inodes are written to their
  365. * location on disk rather than just existing in transactions in the log. This
  366. * means after a quiesce there is no log replay required to write the inodes to
  367. * disk (this is the main difference between a sync and a quiesce).
  368. */
  369. /*
  370. * First stage of freeze - no writers will make progress now we are here,
  371. * so we flush delwri and delalloc buffers here, then wait for all I/O to
  372. * complete. Data is frozen at that point. Metadata is not frozen,
  373. * transactions can still occur here so don't bother flushing the buftarg
  374. * because it'll just get dirty again.
  375. */
  376. int
  377. xfs_quiesce_data(
  378. struct xfs_mount *mp)
  379. {
  380. int error;
  381. /* push non-blocking */
  382. xfs_sync_data(mp, 0);
  383. xfs_qm_sync(mp, SYNC_TRYLOCK);
  384. /* push and block till complete */
  385. xfs_sync_data(mp, SYNC_WAIT);
  386. xfs_qm_sync(mp, SYNC_WAIT);
  387. /* drop inode references pinned by filestreams */
  388. xfs_filestream_flush(mp);
  389. /* write superblock and hoover up shutdown errors */
  390. error = xfs_sync_fsdata(mp, SYNC_WAIT);
  391. /* flush data-only devices */
  392. if (mp->m_rtdev_targp)
  393. XFS_bflush(mp->m_rtdev_targp);
  394. return error;
  395. }
  396. STATIC void
  397. xfs_quiesce_fs(
  398. struct xfs_mount *mp)
  399. {
  400. int count = 0, pincount;
  401. xfs_flush_buftarg(mp->m_ddev_targp, 0);
  402. xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
  403. /*
  404. * This loop must run at least twice. The first instance of the loop
  405. * will flush most meta data but that will generate more meta data
  406. * (typically directory updates). Which then must be flushed and
  407. * logged before we can write the unmount record.
  408. */
  409. do {
  410. xfs_sync_attr(mp, SYNC_WAIT);
  411. pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
  412. if (!pincount) {
  413. delay(50);
  414. count++;
  415. }
  416. } while (count < 2);
  417. }
  418. /*
  419. * Second stage of a quiesce. The data is already synced, now we have to take
  420. * care of the metadata. New transactions are already blocked, so we need to
  421. * wait for any remaining transactions to drain out before proceding.
  422. */
  423. void
  424. xfs_quiesce_attr(
  425. struct xfs_mount *mp)
  426. {
  427. int error = 0;
  428. /* wait for all modifications to complete */
  429. while (atomic_read(&mp->m_active_trans) > 0)
  430. delay(100);
  431. /* flush inodes and push all remaining buffers out to disk */
  432. xfs_quiesce_fs(mp);
  433. /*
  434. * Just warn here till VFS can correctly support
  435. * read-only remount without racing.
  436. */
  437. WARN_ON(atomic_read(&mp->m_active_trans) != 0);
  438. /* Push the superblock and write an unmount record */
  439. error = xfs_log_sbcount(mp, 1);
  440. if (error)
  441. xfs_fs_cmn_err(CE_WARN, mp,
  442. "xfs_attr_quiesce: failed to log sb changes. "
  443. "Frozen image may not be consistent.");
  444. xfs_log_unmount_write(mp);
  445. xfs_unmountfs_writesb(mp);
  446. }
  447. /*
  448. * Enqueue a work item to be picked up by the vfs xfssyncd thread.
  449. * Doing this has two advantages:
  450. * - It saves on stack space, which is tight in certain situations
  451. * - It can be used (with care) as a mechanism to avoid deadlocks.
  452. * Flushing while allocating in a full filesystem requires both.
  453. */
  454. STATIC void
  455. xfs_syncd_queue_work(
  456. struct xfs_mount *mp,
  457. void *data,
  458. void (*syncer)(struct xfs_mount *, void *),
  459. struct completion *completion)
  460. {
  461. struct xfs_sync_work *work;
  462. work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
  463. INIT_LIST_HEAD(&work->w_list);
  464. work->w_syncer = syncer;
  465. work->w_data = data;
  466. work->w_mount = mp;
  467. work->w_completion = completion;
  468. spin_lock(&mp->m_sync_lock);
  469. list_add_tail(&work->w_list, &mp->m_sync_list);
  470. spin_unlock(&mp->m_sync_lock);
  471. wake_up_process(mp->m_sync_task);
  472. }
  473. /*
  474. * Flush delayed allocate data, attempting to free up reserved space
  475. * from existing allocations. At this point a new allocation attempt
  476. * has failed with ENOSPC and we are in the process of scratching our
  477. * heads, looking about for more room...
  478. */
  479. STATIC void
  480. xfs_flush_inodes_work(
  481. struct xfs_mount *mp,
  482. void *arg)
  483. {
  484. struct inode *inode = arg;
  485. xfs_sync_data(mp, SYNC_TRYLOCK);
  486. xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
  487. iput(inode);
  488. }
  489. void
  490. xfs_flush_inodes(
  491. xfs_inode_t *ip)
  492. {
  493. struct inode *inode = VFS_I(ip);
  494. DECLARE_COMPLETION_ONSTACK(completion);
  495. igrab(inode);
  496. xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
  497. wait_for_completion(&completion);
  498. xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
  499. }
  500. /*
  501. * Every sync period we need to unpin all items, reclaim inodes, sync
  502. * quota and write out the superblock. We might need to cover the log
  503. * to indicate it is idle.
  504. */
  505. STATIC void
  506. xfs_sync_worker(
  507. struct xfs_mount *mp,
  508. void *unused)
  509. {
  510. int error;
  511. if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
  512. xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
  513. xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
  514. /* dgc: errors ignored here */
  515. error = xfs_qm_sync(mp, SYNC_TRYLOCK);
  516. error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
  517. }
  518. mp->m_sync_seq++;
  519. wake_up(&mp->m_wait_single_sync_task);
  520. }
  521. STATIC int
  522. xfssyncd(
  523. void *arg)
  524. {
  525. struct xfs_mount *mp = arg;
  526. long timeleft;
  527. xfs_sync_work_t *work, *n;
  528. LIST_HEAD (tmp);
  529. set_freezable();
  530. timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
  531. for (;;) {
  532. timeleft = schedule_timeout_interruptible(timeleft);
  533. /* swsusp */
  534. try_to_freeze();
  535. if (kthread_should_stop() && list_empty(&mp->m_sync_list))
  536. break;
  537. spin_lock(&mp->m_sync_lock);
  538. /*
  539. * We can get woken by laptop mode, to do a sync -
  540. * that's the (only!) case where the list would be
  541. * empty with time remaining.
  542. */
  543. if (!timeleft || list_empty(&mp->m_sync_list)) {
  544. if (!timeleft)
  545. timeleft = xfs_syncd_centisecs *
  546. msecs_to_jiffies(10);
  547. INIT_LIST_HEAD(&mp->m_sync_work.w_list);
  548. list_add_tail(&mp->m_sync_work.w_list,
  549. &mp->m_sync_list);
  550. }
  551. list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
  552. list_move(&work->w_list, &tmp);
  553. spin_unlock(&mp->m_sync_lock);
  554. list_for_each_entry_safe(work, n, &tmp, w_list) {
  555. (*work->w_syncer)(mp, work->w_data);
  556. list_del(&work->w_list);
  557. if (work == &mp->m_sync_work)
  558. continue;
  559. if (work->w_completion)
  560. complete(work->w_completion);
  561. kmem_free(work);
  562. }
  563. }
  564. return 0;
  565. }
  566. int
  567. xfs_syncd_init(
  568. struct xfs_mount *mp)
  569. {
  570. mp->m_sync_work.w_syncer = xfs_sync_worker;
  571. mp->m_sync_work.w_mount = mp;
  572. mp->m_sync_work.w_completion = NULL;
  573. mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
  574. if (IS_ERR(mp->m_sync_task))
  575. return -PTR_ERR(mp->m_sync_task);
  576. return 0;
  577. }
  578. void
  579. xfs_syncd_stop(
  580. struct xfs_mount *mp)
  581. {
  582. kthread_stop(mp->m_sync_task);
  583. }
  584. void
  585. __xfs_inode_set_reclaim_tag(
  586. struct xfs_perag *pag,
  587. struct xfs_inode *ip)
  588. {
  589. radix_tree_tag_set(&pag->pag_ici_root,
  590. XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
  591. XFS_ICI_RECLAIM_TAG);
  592. }
  593. /*
  594. * We set the inode flag atomically with the radix tree tag.
  595. * Once we get tag lookups on the radix tree, this inode flag
  596. * can go away.
  597. */
  598. void
  599. xfs_inode_set_reclaim_tag(
  600. xfs_inode_t *ip)
  601. {
  602. xfs_mount_t *mp = ip->i_mount;
  603. xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
  604. read_lock(&pag->pag_ici_lock);
  605. spin_lock(&ip->i_flags_lock);
  606. __xfs_inode_set_reclaim_tag(pag, ip);
  607. __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
  608. spin_unlock(&ip->i_flags_lock);
  609. read_unlock(&pag->pag_ici_lock);
  610. xfs_put_perag(mp, pag);
  611. }
  612. void
  613. __xfs_inode_clear_reclaim_tag(
  614. xfs_mount_t *mp,
  615. xfs_perag_t *pag,
  616. xfs_inode_t *ip)
  617. {
  618. radix_tree_tag_clear(&pag->pag_ici_root,
  619. XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
  620. }
  621. STATIC int
  622. xfs_reclaim_inode(
  623. struct xfs_inode *ip,
  624. struct xfs_perag *pag,
  625. int sync_mode)
  626. {
  627. /*
  628. * The radix tree lock here protects a thread in xfs_iget from racing
  629. * with us starting reclaim on the inode. Once we have the
  630. * XFS_IRECLAIM flag set it will not touch us.
  631. */
  632. spin_lock(&ip->i_flags_lock);
  633. ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
  634. if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
  635. /* ignore as it is already under reclaim */
  636. spin_unlock(&ip->i_flags_lock);
  637. write_unlock(&pag->pag_ici_lock);
  638. return 0;
  639. }
  640. __xfs_iflags_set(ip, XFS_IRECLAIM);
  641. spin_unlock(&ip->i_flags_lock);
  642. write_unlock(&pag->pag_ici_lock);
  643. /*
  644. * If the inode is still dirty, then flush it out. If the inode
  645. * is not in the AIL, then it will be OK to flush it delwri as
  646. * long as xfs_iflush() does not keep any references to the inode.
  647. * We leave that decision up to xfs_iflush() since it has the
  648. * knowledge of whether it's OK to simply do a delwri flush of
  649. * the inode or whether we need to wait until the inode is
  650. * pulled from the AIL.
  651. * We get the flush lock regardless, though, just to make sure
  652. * we don't free it while it is being flushed.
  653. */
  654. xfs_ilock(ip, XFS_ILOCK_EXCL);
  655. xfs_iflock(ip);
  656. /*
  657. * In the case of a forced shutdown we rely on xfs_iflush() to
  658. * wait for the inode to be unpinned before returning an error.
  659. */
  660. if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
  661. /* synchronize with xfs_iflush_done */
  662. xfs_iflock(ip);
  663. xfs_ifunlock(ip);
  664. }
  665. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  666. xfs_ireclaim(ip);
  667. return 0;
  668. }
  669. int
  670. xfs_reclaim_inodes(
  671. xfs_mount_t *mp,
  672. int mode)
  673. {
  674. return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
  675. XFS_ICI_RECLAIM_TAG, 1);
  676. }