xfs_extent_busy.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. /*
  2. * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  3. * Copyright (c) 2010 David Chinner.
  4. * Copyright (c) 2011 Christoph Hellwig.
  5. * All Rights Reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it would be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write the Free Software Foundation,
  18. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "xfs.h"
  21. #include "xfs_fs.h"
  22. #include "xfs_types.h"
  23. #include "xfs_log.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_bmap_btree.h"
  29. #include "xfs_alloc.h"
  30. #include "xfs_inode.h"
  31. #include "xfs_extent_busy.h"
  32. #include "xfs_trace.h"
  33. void
  34. xfs_extent_busy_insert(
  35. struct xfs_trans *tp,
  36. xfs_agnumber_t agno,
  37. xfs_agblock_t bno,
  38. xfs_extlen_t len,
  39. unsigned int flags)
  40. {
  41. struct xfs_extent_busy *new;
  42. struct xfs_extent_busy *busyp;
  43. struct xfs_perag *pag;
  44. struct rb_node **rbp;
  45. struct rb_node *parent = NULL;
  46. new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
  47. if (!new) {
  48. /*
  49. * No Memory! Since it is now not possible to track the free
  50. * block, make this a synchronous transaction to insure that
  51. * the block is not reused before this transaction commits.
  52. */
  53. trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
  54. xfs_trans_set_sync(tp);
  55. return;
  56. }
  57. new->agno = agno;
  58. new->bno = bno;
  59. new->length = len;
  60. INIT_LIST_HEAD(&new->list);
  61. new->flags = flags;
  62. /* trace before insert to be able to see failed inserts */
  63. trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
  64. pag = xfs_perag_get(tp->t_mountp, new->agno);
  65. spin_lock(&pag->pagb_lock);
  66. rbp = &pag->pagb_tree.rb_node;
  67. while (*rbp) {
  68. parent = *rbp;
  69. busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
  70. if (new->bno < busyp->bno) {
  71. rbp = &(*rbp)->rb_left;
  72. ASSERT(new->bno + new->length <= busyp->bno);
  73. } else if (new->bno > busyp->bno) {
  74. rbp = &(*rbp)->rb_right;
  75. ASSERT(bno >= busyp->bno + busyp->length);
  76. } else {
  77. ASSERT(0);
  78. }
  79. }
  80. rb_link_node(&new->rb_node, parent, rbp);
  81. rb_insert_color(&new->rb_node, &pag->pagb_tree);
  82. list_add(&new->list, &tp->t_busy);
  83. spin_unlock(&pag->pagb_lock);
  84. xfs_perag_put(pag);
  85. }
  86. /*
  87. * Search for a busy extent within the range of the extent we are about to
  88. * allocate. You need to be holding the busy extent tree lock when calling
  89. * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
  90. * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
  91. * match. This is done so that a non-zero return indicates an overlap that
  92. * will require a synchronous transaction, but it can still be
  93. * used to distinguish between a partial or exact match.
  94. */
  95. int
  96. xfs_extent_busy_search(
  97. struct xfs_mount *mp,
  98. xfs_agnumber_t agno,
  99. xfs_agblock_t bno,
  100. xfs_extlen_t len)
  101. {
  102. struct xfs_perag *pag;
  103. struct rb_node *rbp;
  104. struct xfs_extent_busy *busyp;
  105. int match = 0;
  106. pag = xfs_perag_get(mp, agno);
  107. spin_lock(&pag->pagb_lock);
  108. rbp = pag->pagb_tree.rb_node;
  109. /* find closest start bno overlap */
  110. while (rbp) {
  111. busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
  112. if (bno < busyp->bno) {
  113. /* may overlap, but exact start block is lower */
  114. if (bno + len > busyp->bno)
  115. match = -1;
  116. rbp = rbp->rb_left;
  117. } else if (bno > busyp->bno) {
  118. /* may overlap, but exact start block is higher */
  119. if (bno < busyp->bno + busyp->length)
  120. match = -1;
  121. rbp = rbp->rb_right;
  122. } else {
  123. /* bno matches busyp, length determines exact match */
  124. match = (busyp->length == len) ? 1 : -1;
  125. break;
  126. }
  127. }
  128. spin_unlock(&pag->pagb_lock);
  129. xfs_perag_put(pag);
  130. return match;
  131. }
  132. /*
  133. * The found free extent [fbno, fend] overlaps part or all of the given busy
  134. * extent. If the overlap covers the beginning, the end, or all of the busy
  135. * extent, the overlapping portion can be made unbusy and used for the
  136. * allocation. We can't split a busy extent because we can't modify a
  137. * transaction/CIL context busy list, but we can update an entries block
  138. * number or length.
  139. *
  140. * Returns true if the extent can safely be reused, or false if the search
  141. * needs to be restarted.
  142. */
  143. STATIC bool
  144. xfs_extent_busy_update_extent(
  145. struct xfs_mount *mp,
  146. struct xfs_perag *pag,
  147. struct xfs_extent_busy *busyp,
  148. xfs_agblock_t fbno,
  149. xfs_extlen_t flen,
  150. bool userdata)
  151. {
  152. xfs_agblock_t fend = fbno + flen;
  153. xfs_agblock_t bbno = busyp->bno;
  154. xfs_agblock_t bend = bbno + busyp->length;
  155. /*
  156. * This extent is currently being discarded. Give the thread
  157. * performing the discard a chance to mark the extent unbusy
  158. * and retry.
  159. */
  160. if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
  161. spin_unlock(&pag->pagb_lock);
  162. delay(1);
  163. spin_lock(&pag->pagb_lock);
  164. return false;
  165. }
  166. /*
  167. * If there is a busy extent overlapping a user allocation, we have
  168. * no choice but to force the log and retry the search.
  169. *
  170. * Fortunately this does not happen during normal operation, but
  171. * only if the filesystem is very low on space and has to dip into
  172. * the AGFL for normal allocations.
  173. */
  174. if (userdata)
  175. goto out_force_log;
  176. if (bbno < fbno && bend > fend) {
  177. /*
  178. * Case 1:
  179. * bbno bend
  180. * +BBBBBBBBBBBBBBBBB+
  181. * +---------+
  182. * fbno fend
  183. */
  184. /*
  185. * We would have to split the busy extent to be able to track
  186. * it correct, which we cannot do because we would have to
  187. * modify the list of busy extents attached to the transaction
  188. * or CIL context, which is immutable.
  189. *
  190. * Force out the log to clear the busy extent and retry the
  191. * search.
  192. */
  193. goto out_force_log;
  194. } else if (bbno >= fbno && bend <= fend) {
  195. /*
  196. * Case 2:
  197. * bbno bend
  198. * +BBBBBBBBBBBBBBBBB+
  199. * +-----------------+
  200. * fbno fend
  201. *
  202. * Case 3:
  203. * bbno bend
  204. * +BBBBBBBBBBBBBBBBB+
  205. * +--------------------------+
  206. * fbno fend
  207. *
  208. * Case 4:
  209. * bbno bend
  210. * +BBBBBBBBBBBBBBBBB+
  211. * +--------------------------+
  212. * fbno fend
  213. *
  214. * Case 5:
  215. * bbno bend
  216. * +BBBBBBBBBBBBBBBBB+
  217. * +-----------------------------------+
  218. * fbno fend
  219. *
  220. */
  221. /*
  222. * The busy extent is fully covered by the extent we are
  223. * allocating, and can simply be removed from the rbtree.
  224. * However we cannot remove it from the immutable list
  225. * tracking busy extents in the transaction or CIL context,
  226. * so set the length to zero to mark it invalid.
  227. *
  228. * We also need to restart the busy extent search from the
  229. * tree root, because erasing the node can rearrange the
  230. * tree topology.
  231. */
  232. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  233. busyp->length = 0;
  234. return false;
  235. } else if (fend < bend) {
  236. /*
  237. * Case 6:
  238. * bbno bend
  239. * +BBBBBBBBBBBBBBBBB+
  240. * +---------+
  241. * fbno fend
  242. *
  243. * Case 7:
  244. * bbno bend
  245. * +BBBBBBBBBBBBBBBBB+
  246. * +------------------+
  247. * fbno fend
  248. *
  249. */
  250. busyp->bno = fend;
  251. } else if (bbno < fbno) {
  252. /*
  253. * Case 8:
  254. * bbno bend
  255. * +BBBBBBBBBBBBBBBBB+
  256. * +-------------+
  257. * fbno fend
  258. *
  259. * Case 9:
  260. * bbno bend
  261. * +BBBBBBBBBBBBBBBBB+
  262. * +----------------------+
  263. * fbno fend
  264. */
  265. busyp->length = fbno - busyp->bno;
  266. } else {
  267. ASSERT(0);
  268. }
  269. trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
  270. return true;
  271. out_force_log:
  272. spin_unlock(&pag->pagb_lock);
  273. xfs_log_force(mp, XFS_LOG_SYNC);
  274. trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
  275. spin_lock(&pag->pagb_lock);
  276. return false;
  277. }
  278. /*
  279. * For a given extent [fbno, flen], make sure we can reuse it safely.
  280. */
  281. void
  282. xfs_extent_busy_reuse(
  283. struct xfs_mount *mp,
  284. xfs_agnumber_t agno,
  285. xfs_agblock_t fbno,
  286. xfs_extlen_t flen,
  287. bool userdata)
  288. {
  289. struct xfs_perag *pag;
  290. struct rb_node *rbp;
  291. ASSERT(flen > 0);
  292. pag = xfs_perag_get(mp, agno);
  293. spin_lock(&pag->pagb_lock);
  294. restart:
  295. rbp = pag->pagb_tree.rb_node;
  296. while (rbp) {
  297. struct xfs_extent_busy *busyp =
  298. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  299. xfs_agblock_t bbno = busyp->bno;
  300. xfs_agblock_t bend = bbno + busyp->length;
  301. if (fbno + flen <= bbno) {
  302. rbp = rbp->rb_left;
  303. continue;
  304. } else if (fbno >= bend) {
  305. rbp = rbp->rb_right;
  306. continue;
  307. }
  308. if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
  309. userdata))
  310. goto restart;
  311. }
  312. spin_unlock(&pag->pagb_lock);
  313. xfs_perag_put(pag);
  314. }
  315. /*
  316. * For a given extent [fbno, flen], search the busy extent list to find a
  317. * subset of the extent that is not busy. If *rlen is smaller than
  318. * args->minlen no suitable extent could be found, and the higher level
  319. * code needs to force out the log and retry the allocation.
  320. */
  321. void
  322. xfs_extent_busy_trim(
  323. struct xfs_alloc_arg *args,
  324. xfs_agblock_t bno,
  325. xfs_extlen_t len,
  326. xfs_agblock_t *rbno,
  327. xfs_extlen_t *rlen)
  328. {
  329. xfs_agblock_t fbno;
  330. xfs_extlen_t flen;
  331. struct rb_node *rbp;
  332. ASSERT(len > 0);
  333. spin_lock(&args->pag->pagb_lock);
  334. restart:
  335. fbno = bno;
  336. flen = len;
  337. rbp = args->pag->pagb_tree.rb_node;
  338. while (rbp && flen >= args->minlen) {
  339. struct xfs_extent_busy *busyp =
  340. rb_entry(rbp, struct xfs_extent_busy, rb_node);
  341. xfs_agblock_t fend = fbno + flen;
  342. xfs_agblock_t bbno = busyp->bno;
  343. xfs_agblock_t bend = bbno + busyp->length;
  344. if (fend <= bbno) {
  345. rbp = rbp->rb_left;
  346. continue;
  347. } else if (fbno >= bend) {
  348. rbp = rbp->rb_right;
  349. continue;
  350. }
  351. /*
  352. * If this is a metadata allocation, try to reuse the busy
  353. * extent instead of trimming the allocation.
  354. */
  355. if (!args->userdata &&
  356. !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
  357. if (!xfs_extent_busy_update_extent(args->mp, args->pag,
  358. busyp, fbno, flen,
  359. false))
  360. goto restart;
  361. continue;
  362. }
  363. if (bbno <= fbno) {
  364. /* start overlap */
  365. /*
  366. * Case 1:
  367. * bbno bend
  368. * +BBBBBBBBBBBBBBBBB+
  369. * +---------+
  370. * fbno fend
  371. *
  372. * Case 2:
  373. * bbno bend
  374. * +BBBBBBBBBBBBBBBBB+
  375. * +-------------+
  376. * fbno fend
  377. *
  378. * Case 3:
  379. * bbno bend
  380. * +BBBBBBBBBBBBBBBBB+
  381. * +-------------+
  382. * fbno fend
  383. *
  384. * Case 4:
  385. * bbno bend
  386. * +BBBBBBBBBBBBBBBBB+
  387. * +-----------------+
  388. * fbno fend
  389. *
  390. * No unbusy region in extent, return failure.
  391. */
  392. if (fend <= bend)
  393. goto fail;
  394. /*
  395. * Case 5:
  396. * bbno bend
  397. * +BBBBBBBBBBBBBBBBB+
  398. * +----------------------+
  399. * fbno fend
  400. *
  401. * Case 6:
  402. * bbno bend
  403. * +BBBBBBBBBBBBBBBBB+
  404. * +--------------------------+
  405. * fbno fend
  406. *
  407. * Needs to be trimmed to:
  408. * +-------+
  409. * fbno fend
  410. */
  411. fbno = bend;
  412. } else if (bend >= fend) {
  413. /* end overlap */
  414. /*
  415. * Case 7:
  416. * bbno bend
  417. * +BBBBBBBBBBBBBBBBB+
  418. * +------------------+
  419. * fbno fend
  420. *
  421. * Case 8:
  422. * bbno bend
  423. * +BBBBBBBBBBBBBBBBB+
  424. * +--------------------------+
  425. * fbno fend
  426. *
  427. * Needs to be trimmed to:
  428. * +-------+
  429. * fbno fend
  430. */
  431. fend = bbno;
  432. } else {
  433. /* middle overlap */
  434. /*
  435. * Case 9:
  436. * bbno bend
  437. * +BBBBBBBBBBBBBBBBB+
  438. * +-----------------------------------+
  439. * fbno fend
  440. *
  441. * Can be trimmed to:
  442. * +-------+ OR +-------+
  443. * fbno fend fbno fend
  444. *
  445. * Backward allocation leads to significant
  446. * fragmentation of directories, which degrades
  447. * directory performance, therefore we always want to
  448. * choose the option that produces forward allocation
  449. * patterns.
  450. * Preferring the lower bno extent will make the next
  451. * request use "fend" as the start of the next
  452. * allocation; if the segment is no longer busy at
  453. * that point, we'll get a contiguous allocation, but
  454. * even if it is still busy, we will get a forward
  455. * allocation.
  456. * We try to avoid choosing the segment at "bend",
  457. * because that can lead to the next allocation
  458. * taking the segment at "fbno", which would be a
  459. * backward allocation. We only use the segment at
  460. * "fbno" if it is much larger than the current
  461. * requested size, because in that case there's a
  462. * good chance subsequent allocations will be
  463. * contiguous.
  464. */
  465. if (bbno - fbno >= args->maxlen) {
  466. /* left candidate fits perfect */
  467. fend = bbno;
  468. } else if (fend - bend >= args->maxlen * 4) {
  469. /* right candidate has enough free space */
  470. fbno = bend;
  471. } else if (bbno - fbno >= args->minlen) {
  472. /* left candidate fits minimum requirement */
  473. fend = bbno;
  474. } else {
  475. goto fail;
  476. }
  477. }
  478. flen = fend - fbno;
  479. }
  480. spin_unlock(&args->pag->pagb_lock);
  481. if (fbno != bno || flen != len) {
  482. trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
  483. fbno, flen);
  484. }
  485. *rbno = fbno;
  486. *rlen = flen;
  487. return;
  488. fail:
  489. /*
  490. * Return a zero extent length as failure indications. All callers
  491. * re-check if the trimmed extent satisfies the minlen requirement.
  492. */
  493. spin_unlock(&args->pag->pagb_lock);
  494. trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
  495. *rbno = fbno;
  496. *rlen = 0;
  497. }
  498. STATIC void
  499. xfs_extent_busy_clear_one(
  500. struct xfs_mount *mp,
  501. struct xfs_perag *pag,
  502. struct xfs_extent_busy *busyp)
  503. {
  504. if (busyp->length) {
  505. trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
  506. busyp->length);
  507. rb_erase(&busyp->rb_node, &pag->pagb_tree);
  508. }
  509. list_del_init(&busyp->list);
  510. kmem_free(busyp);
  511. }
  512. /*
  513. * Remove all extents on the passed in list from the busy extents tree.
  514. * If do_discard is set skip extents that need to be discarded, and mark
  515. * these as undergoing a discard operation instead.
  516. */
  517. void
  518. xfs_extent_busy_clear(
  519. struct xfs_mount *mp,
  520. struct list_head *list,
  521. bool do_discard)
  522. {
  523. struct xfs_extent_busy *busyp, *n;
  524. struct xfs_perag *pag = NULL;
  525. xfs_agnumber_t agno = NULLAGNUMBER;
  526. list_for_each_entry_safe(busyp, n, list, list) {
  527. if (busyp->agno != agno) {
  528. if (pag) {
  529. spin_unlock(&pag->pagb_lock);
  530. xfs_perag_put(pag);
  531. }
  532. pag = xfs_perag_get(mp, busyp->agno);
  533. spin_lock(&pag->pagb_lock);
  534. agno = busyp->agno;
  535. }
  536. if (do_discard && busyp->length &&
  537. !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
  538. busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
  539. else
  540. xfs_extent_busy_clear_one(mp, pag, busyp);
  541. }
  542. if (pag) {
  543. spin_unlock(&pag->pagb_lock);
  544. xfs_perag_put(pag);
  545. }
  546. }
  547. /*
  548. * Callback for list_sort to sort busy extents by the AG they reside in.
  549. */
  550. int
  551. xfs_extent_busy_ag_cmp(
  552. void *priv,
  553. struct list_head *a,
  554. struct list_head *b)
  555. {
  556. return container_of(a, struct xfs_extent_busy, list)->agno -
  557. container_of(b, struct xfs_extent_busy, list)->agno;
  558. }