free-space-cache.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /*
  2. * Copyright (C) 2008 Red Hat. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "free-space-cache.h"
  21. #include "transaction.h"
  22. struct btrfs_free_space {
  23. struct rb_node bytes_index;
  24. struct rb_node offset_index;
  25. u64 offset;
  26. u64 bytes;
  27. };
  28. static int tree_insert_offset(struct rb_root *root, u64 offset,
  29. struct rb_node *node)
  30. {
  31. struct rb_node **p = &root->rb_node;
  32. struct rb_node *parent = NULL;
  33. struct btrfs_free_space *info;
  34. while (*p) {
  35. parent = *p;
  36. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  37. if (offset < info->offset)
  38. p = &(*p)->rb_left;
  39. else if (offset > info->offset)
  40. p = &(*p)->rb_right;
  41. else
  42. return -EEXIST;
  43. }
  44. rb_link_node(node, parent, p);
  45. rb_insert_color(node, root);
  46. return 0;
  47. }
  48. static int tree_insert_bytes(struct rb_root *root, u64 bytes,
  49. struct rb_node *node)
  50. {
  51. struct rb_node **p = &root->rb_node;
  52. struct rb_node *parent = NULL;
  53. struct btrfs_free_space *info;
  54. while (*p) {
  55. parent = *p;
  56. info = rb_entry(parent, struct btrfs_free_space, bytes_index);
  57. if (bytes < info->bytes)
  58. p = &(*p)->rb_left;
  59. else
  60. p = &(*p)->rb_right;
  61. }
  62. rb_link_node(node, parent, p);
  63. rb_insert_color(node, root);
  64. return 0;
  65. }
  66. /*
  67. * searches the tree for the given offset.
  68. *
  69. * fuzzy == 1: this is used for allocations where we are given a hint of where
  70. * to look for free space. Because the hint may not be completely on an offset
  71. * mark, or the hint may no longer point to free space we need to fudge our
  72. * results a bit. So we look for free space starting at or after offset with at
  73. * least bytes size. We prefer to find as close to the given offset as we can.
  74. * Also if the offset is within a free space range, then we will return the free
  75. * space that contains the given offset, which means we can return a free space
  76. * chunk with an offset before the provided offset.
  77. *
  78. * fuzzy == 0: this is just a normal tree search. Give us the free space that
  79. * starts at the given offset which is at least bytes size, and if its not there
  80. * return NULL.
  81. */
  82. static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
  83. u64 offset, u64 bytes,
  84. int fuzzy)
  85. {
  86. struct rb_node *n = root->rb_node;
  87. struct btrfs_free_space *entry, *ret = NULL;
  88. while (n) {
  89. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  90. if (offset < entry->offset) {
  91. if (fuzzy &&
  92. (!ret || entry->offset < ret->offset) &&
  93. (bytes <= entry->bytes))
  94. ret = entry;
  95. n = n->rb_left;
  96. } else if (offset > entry->offset) {
  97. if (fuzzy &&
  98. (entry->offset + entry->bytes - 1) >= offset &&
  99. bytes <= entry->bytes) {
  100. ret = entry;
  101. break;
  102. }
  103. n = n->rb_right;
  104. } else {
  105. if (bytes > entry->bytes) {
  106. n = n->rb_right;
  107. continue;
  108. }
  109. ret = entry;
  110. break;
  111. }
  112. }
  113. return ret;
  114. }
  115. /*
  116. * return a chunk at least bytes size, as close to offset that we can get.
  117. */
  118. static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
  119. u64 offset, u64 bytes)
  120. {
  121. struct rb_node *n = root->rb_node;
  122. struct btrfs_free_space *entry, *ret = NULL;
  123. while (n) {
  124. entry = rb_entry(n, struct btrfs_free_space, bytes_index);
  125. if (bytes < entry->bytes) {
  126. /*
  127. * We prefer to get a hole size as close to the size we
  128. * are asking for so we don't take small slivers out of
  129. * huge holes, but we also want to get as close to the
  130. * offset as possible so we don't have a whole lot of
  131. * fragmentation.
  132. */
  133. if (offset <= entry->offset) {
  134. if (!ret)
  135. ret = entry;
  136. else if (entry->bytes < ret->bytes)
  137. ret = entry;
  138. else if (entry->offset < ret->offset)
  139. ret = entry;
  140. }
  141. n = n->rb_left;
  142. } else if (bytes > entry->bytes) {
  143. n = n->rb_right;
  144. } else {
  145. /*
  146. * Ok we may have multiple chunks of the wanted size,
  147. * so we don't want to take the first one we find, we
  148. * want to take the one closest to our given offset, so
  149. * keep searching just in case theres a better match.
  150. */
  151. n = n->rb_right;
  152. if (offset > entry->offset)
  153. continue;
  154. else if (!ret || entry->offset < ret->offset)
  155. ret = entry;
  156. }
  157. }
  158. return ret;
  159. }
  160. static void unlink_free_space(struct btrfs_block_group_cache *block_group,
  161. struct btrfs_free_space *info)
  162. {
  163. rb_erase(&info->offset_index, &block_group->free_space_offset);
  164. rb_erase(&info->bytes_index, &block_group->free_space_bytes);
  165. }
  166. static int link_free_space(struct btrfs_block_group_cache *block_group,
  167. struct btrfs_free_space *info)
  168. {
  169. int ret = 0;
  170. BUG_ON(!info->bytes);
  171. ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
  172. &info->offset_index);
  173. if (ret)
  174. return ret;
  175. ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
  176. &info->bytes_index);
  177. if (ret)
  178. return ret;
  179. return ret;
  180. }
  181. int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
  182. u64 offset, u64 bytes)
  183. {
  184. struct btrfs_free_space *right_info;
  185. struct btrfs_free_space *left_info;
  186. struct btrfs_free_space *info = NULL;
  187. int ret = 0;
  188. info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
  189. if (!info)
  190. return -ENOMEM;
  191. info->offset = offset;
  192. info->bytes = bytes;
  193. spin_lock(&block_group->tree_lock);
  194. /*
  195. * first we want to see if there is free space adjacent to the range we
  196. * are adding, if there is remove that struct and add a new one to
  197. * cover the entire range
  198. */
  199. right_info = tree_search_offset(&block_group->free_space_offset,
  200. offset+bytes, 0, 0);
  201. left_info = tree_search_offset(&block_group->free_space_offset,
  202. offset-1, 0, 1);
  203. if (right_info) {
  204. unlink_free_space(block_group, right_info);
  205. info->bytes += right_info->bytes;
  206. kfree(right_info);
  207. }
  208. if (left_info && left_info->offset + left_info->bytes == offset) {
  209. unlink_free_space(block_group, left_info);
  210. info->offset = left_info->offset;
  211. info->bytes += left_info->bytes;
  212. kfree(left_info);
  213. }
  214. ret = link_free_space(block_group, info);
  215. if (ret)
  216. kfree(info);
  217. spin_unlock(&block_group->tree_lock);
  218. if (ret) {
  219. printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
  220. BUG_ON(ret == -EEXIST);
  221. }
  222. return ret;
  223. }
  224. int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
  225. u64 offset, u64 bytes)
  226. {
  227. struct btrfs_free_space *info;
  228. int ret = 0;
  229. spin_lock(&block_group->tree_lock);
  230. info = tree_search_offset(&block_group->free_space_offset, offset, 0,
  231. 1);
  232. if (info && info->offset == offset) {
  233. if (info->bytes < bytes) {
  234. printk(KERN_ERR "Found free space at %llu, size %llu,"
  235. "trying to use %llu\n",
  236. (unsigned long long)info->offset,
  237. (unsigned long long)info->bytes,
  238. (unsigned long long)bytes);
  239. WARN_ON(1);
  240. ret = -EINVAL;
  241. spin_unlock(&block_group->tree_lock);
  242. goto out;
  243. }
  244. unlink_free_space(block_group, info);
  245. if (info->bytes == bytes) {
  246. kfree(info);
  247. spin_unlock(&block_group->tree_lock);
  248. goto out;
  249. }
  250. info->offset += bytes;
  251. info->bytes -= bytes;
  252. ret = link_free_space(block_group, info);
  253. spin_unlock(&block_group->tree_lock);
  254. BUG_ON(ret);
  255. } else if (info && info->offset < offset &&
  256. info->offset + info->bytes >= offset + bytes) {
  257. u64 old_start = info->offset;
  258. /*
  259. * we're freeing space in the middle of the info,
  260. * this can happen during tree log replay
  261. *
  262. * first unlink the old info and then
  263. * insert it again after the hole we're creating
  264. */
  265. unlink_free_space(block_group, info);
  266. if (offset + bytes < info->offset + info->bytes) {
  267. u64 old_end = info->offset + info->bytes;
  268. info->offset = offset + bytes;
  269. info->bytes = old_end - info->offset;
  270. ret = link_free_space(block_group, info);
  271. BUG_ON(ret);
  272. } else {
  273. /* the hole we're creating ends at the end
  274. * of the info struct, just free the info
  275. */
  276. kfree(info);
  277. }
  278. spin_unlock(&block_group->tree_lock);
  279. /* step two, insert a new info struct to cover anything
  280. * before the hole
  281. */
  282. ret = btrfs_add_free_space(block_group, old_start,
  283. offset - old_start);
  284. BUG_ON(ret);
  285. } else {
  286. spin_unlock(&block_group->tree_lock);
  287. if (!info) {
  288. printk(KERN_ERR "couldn't find space %llu to free\n",
  289. (unsigned long long)offset);
  290. printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
  291. block_group->cached,
  292. (unsigned long long)block_group->key.objectid,
  293. (unsigned long long)block_group->key.offset);
  294. btrfs_dump_free_space(block_group, bytes);
  295. } else if (info) {
  296. printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
  297. "but wanted offset=%llu bytes=%llu\n",
  298. (unsigned long long)info->offset,
  299. (unsigned long long)info->bytes,
  300. (unsigned long long)offset,
  301. (unsigned long long)bytes);
  302. }
  303. WARN_ON(1);
  304. }
  305. out:
  306. return ret;
  307. }
  308. void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
  309. u64 bytes)
  310. {
  311. struct btrfs_free_space *info;
  312. struct rb_node *n;
  313. int count = 0;
  314. for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
  315. info = rb_entry(n, struct btrfs_free_space, offset_index);
  316. if (info->bytes >= bytes)
  317. count++;
  318. printk(KERN_ERR "entry offset %llu, bytes %llu\n",
  319. (unsigned long long)info->offset,
  320. (unsigned long long)info->bytes);
  321. }
  322. printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
  323. "\n", count);
  324. }
  325. u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
  326. {
  327. struct btrfs_free_space *info;
  328. struct rb_node *n;
  329. u64 ret = 0;
  330. for (n = rb_first(&block_group->free_space_offset); n;
  331. n = rb_next(n)) {
  332. info = rb_entry(n, struct btrfs_free_space, offset_index);
  333. ret += info->bytes;
  334. }
  335. return ret;
  336. }
  337. /*
  338. * for a given cluster, put all of its extents back into the free
  339. * space cache. If the block group passed doesn't match the block group
  340. * pointed to by the cluster, someone else raced in and freed the
  341. * cluster already. In that case, we just return without changing anything
  342. */
  343. static int
  344. __btrfs_return_cluster_to_free_space(
  345. struct btrfs_block_group_cache *block_group,
  346. struct btrfs_free_cluster *cluster)
  347. {
  348. struct btrfs_free_space *entry;
  349. struct rb_node *node;
  350. spin_lock(&cluster->lock);
  351. if (cluster->block_group != block_group)
  352. goto out;
  353. cluster->window_start = 0;
  354. node = rb_first(&cluster->root);
  355. while(node) {
  356. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  357. node = rb_next(&entry->offset_index);
  358. rb_erase(&entry->offset_index, &cluster->root);
  359. link_free_space(block_group, entry);
  360. }
  361. list_del_init(&cluster->block_group_list);
  362. btrfs_put_block_group(cluster->block_group);
  363. cluster->block_group = NULL;
  364. cluster->root.rb_node = NULL;
  365. out:
  366. spin_unlock(&cluster->lock);
  367. return 0;
  368. }
  369. void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
  370. {
  371. struct btrfs_free_space *info;
  372. struct rb_node *node;
  373. struct btrfs_free_cluster *cluster;
  374. struct btrfs_free_cluster *safe;
  375. spin_lock(&block_group->tree_lock);
  376. list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
  377. block_group_list) {
  378. WARN_ON(cluster->block_group != block_group);
  379. __btrfs_return_cluster_to_free_space(block_group, cluster);
  380. }
  381. while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
  382. info = rb_entry(node, struct btrfs_free_space, bytes_index);
  383. unlink_free_space(block_group, info);
  384. kfree(info);
  385. if (need_resched()) {
  386. spin_unlock(&block_group->tree_lock);
  387. cond_resched();
  388. spin_lock(&block_group->tree_lock);
  389. }
  390. }
  391. spin_unlock(&block_group->tree_lock);
  392. }
  393. u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
  394. u64 offset, u64 bytes, u64 empty_size)
  395. {
  396. struct btrfs_free_space *entry = NULL;
  397. u64 ret = 0;
  398. spin_lock(&block_group->tree_lock);
  399. entry = tree_search_offset(&block_group->free_space_offset, offset,
  400. bytes + empty_size, 1);
  401. if (!entry)
  402. entry = tree_search_bytes(&block_group->free_space_bytes,
  403. offset, bytes + empty_size);
  404. if (entry) {
  405. unlink_free_space(block_group, entry);
  406. ret = entry->offset;
  407. entry->offset += bytes;
  408. entry->bytes -= bytes;
  409. if (!entry->bytes)
  410. kfree(entry);
  411. else
  412. link_free_space(block_group, entry);
  413. }
  414. spin_unlock(&block_group->tree_lock);
  415. return ret;
  416. }
  417. /*
  418. * given a cluster, put all of its extents back into the free space
  419. * cache. If a block group is passed, this function will only free
  420. * a cluster that belongs to the passed block group.
  421. *
  422. * Otherwise, it'll get a reference on the block group pointed to by the
  423. * cluster and remove the cluster from it.
  424. */
  425. int btrfs_return_cluster_to_free_space(
  426. struct btrfs_block_group_cache *block_group,
  427. struct btrfs_free_cluster *cluster)
  428. {
  429. int ret;
  430. /* first, get a safe pointer to the block group */
  431. spin_lock(&cluster->lock);
  432. if (!block_group) {
  433. block_group = cluster->block_group;
  434. if (!block_group) {
  435. spin_unlock(&cluster->lock);
  436. return 0;
  437. }
  438. } else if (cluster->block_group != block_group) {
  439. /* someone else has already freed it don't redo their work */
  440. spin_unlock(&cluster->lock);
  441. return 0;
  442. }
  443. atomic_inc(&block_group->count);
  444. spin_unlock(&cluster->lock);
  445. /* now return any extents the cluster had on it */
  446. spin_lock(&block_group->tree_lock);
  447. ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
  448. spin_unlock(&block_group->tree_lock);
  449. /* finally drop our ref */
  450. btrfs_put_block_group(block_group);
  451. return ret;
  452. }
  453. /*
  454. * given a cluster, try to allocate 'bytes' from it, returns 0
  455. * if it couldn't find anything suitably large, or a logical disk offset
  456. * if things worked out
  457. */
  458. u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
  459. struct btrfs_free_cluster *cluster, u64 bytes,
  460. u64 min_start)
  461. {
  462. struct btrfs_free_space *entry = NULL;
  463. struct rb_node *node;
  464. u64 ret = 0;
  465. spin_lock(&cluster->lock);
  466. if (bytes > cluster->max_size)
  467. goto out;
  468. if (cluster->block_group != block_group)
  469. goto out;
  470. node = rb_first(&cluster->root);
  471. if (!node)
  472. goto out;
  473. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  474. while(1) {
  475. if (entry->bytes < bytes || entry->offset < min_start) {
  476. struct rb_node *node;
  477. node = rb_next(&entry->offset_index);
  478. if (!node)
  479. break;
  480. entry = rb_entry(node, struct btrfs_free_space,
  481. offset_index);
  482. continue;
  483. }
  484. ret = entry->offset;
  485. entry->offset += bytes;
  486. entry->bytes -= bytes;
  487. if (entry->bytes == 0) {
  488. rb_erase(&entry->offset_index, &cluster->root);
  489. kfree(entry);
  490. }
  491. break;
  492. }
  493. out:
  494. spin_unlock(&cluster->lock);
  495. return ret;
  496. }
  497. /*
  498. * here we try to find a cluster of blocks in a block group. The goal
  499. * is to find at least bytes free and up to empty_size + bytes free.
  500. * We might not find them all in one contiguous area.
  501. *
  502. * returns zero and sets up cluster if things worked out, otherwise
  503. * it returns -enospc
  504. */
  505. int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
  506. struct btrfs_block_group_cache *block_group,
  507. struct btrfs_free_cluster *cluster,
  508. u64 offset, u64 bytes, u64 empty_size)
  509. {
  510. struct btrfs_free_space *entry = NULL;
  511. struct rb_node *node;
  512. struct btrfs_free_space *next;
  513. struct btrfs_free_space *last;
  514. u64 min_bytes;
  515. u64 window_start;
  516. u64 window_free;
  517. u64 max_extent = 0;
  518. int total_retries = 0;
  519. int ret;
  520. /* for metadata, allow allocates with more holes */
  521. if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
  522. /*
  523. * we want to do larger allocations when we are
  524. * flushing out the delayed refs, it helps prevent
  525. * making more work as we go along.
  526. */
  527. if (trans->transaction->delayed_refs.flushing)
  528. min_bytes = max(bytes, (bytes + empty_size) >> 1);
  529. else
  530. min_bytes = max(bytes, (bytes + empty_size) >> 4);
  531. } else
  532. min_bytes = max(bytes, (bytes + empty_size) >> 2);
  533. spin_lock(&block_group->tree_lock);
  534. spin_lock(&cluster->lock);
  535. /* someone already found a cluster, hooray */
  536. if (cluster->block_group) {
  537. ret = 0;
  538. goto out;
  539. }
  540. again:
  541. min_bytes = min(min_bytes, bytes + empty_size);
  542. entry = tree_search_bytes(&block_group->free_space_bytes,
  543. offset, min_bytes);
  544. if (!entry) {
  545. ret = -ENOSPC;
  546. goto out;
  547. }
  548. window_start = entry->offset;
  549. window_free = entry->bytes;
  550. last = entry;
  551. max_extent = entry->bytes;
  552. while(1) {
  553. /* out window is just right, lets fill it */
  554. if (window_free >= bytes + empty_size)
  555. break;
  556. node = rb_next(&last->offset_index);
  557. if (!node) {
  558. ret = -ENOSPC;
  559. goto out;
  560. }
  561. next = rb_entry(node, struct btrfs_free_space, offset_index);
  562. /*
  563. * we haven't filled the empty size and the window is
  564. * very large. reset and try again
  565. */
  566. if (next->offset - window_start > (bytes + empty_size) * 2) {
  567. entry = next;
  568. window_start = entry->offset;
  569. window_free = entry->bytes;
  570. last = entry;
  571. max_extent = 0;
  572. total_retries++;
  573. if (total_retries % 256 == 0) {
  574. if (min_bytes >= (bytes + empty_size)) {
  575. ret = -ENOSPC;
  576. goto out;
  577. }
  578. /*
  579. * grow our allocation a bit, we're not having
  580. * much luck
  581. */
  582. min_bytes *= 2;
  583. goto again;
  584. }
  585. } else {
  586. last = next;
  587. window_free += next->bytes;
  588. if (entry->bytes > max_extent)
  589. max_extent = entry->bytes;
  590. }
  591. }
  592. cluster->window_start = entry->offset;
  593. /*
  594. * now we've found our entries, pull them out of the free space
  595. * cache and put them into the cluster rbtree
  596. *
  597. * The cluster includes an rbtree, but only uses the offset index
  598. * of each free space cache entry.
  599. */
  600. while(1) {
  601. node = rb_next(&entry->offset_index);
  602. unlink_free_space(block_group, entry);
  603. ret = tree_insert_offset(&cluster->root, entry->offset,
  604. &entry->offset_index);
  605. BUG_ON(ret);
  606. if (!node || entry == last)
  607. break;
  608. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  609. }
  610. ret = 0;
  611. cluster->max_size = max_extent;
  612. atomic_inc(&block_group->count);
  613. list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
  614. cluster->block_group = block_group;
  615. out:
  616. spin_unlock(&cluster->lock);
  617. spin_unlock(&block_group->tree_lock);
  618. return ret;
  619. }
  620. /*
  621. * simple code to zero out a cluster
  622. */
  623. void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
  624. {
  625. spin_lock_init(&cluster->lock);
  626. spin_lock_init(&cluster->refill_lock);
  627. cluster->root.rb_node = NULL;
  628. cluster->max_size = 0;
  629. INIT_LIST_HEAD(&cluster->block_group_list);
  630. cluster->block_group = NULL;
  631. }