free-space-cache.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * Copyright (C) 2008 Red Hat. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "free-space-cache.h"
  21. #include "transaction.h"
  22. struct btrfs_free_space {
  23. struct rb_node bytes_index;
  24. struct rb_node offset_index;
  25. u64 offset;
  26. u64 bytes;
  27. };
  28. static int tree_insert_offset(struct rb_root *root, u64 offset,
  29. struct rb_node *node)
  30. {
  31. struct rb_node **p = &root->rb_node;
  32. struct rb_node *parent = NULL;
  33. struct btrfs_free_space *info;
  34. while (*p) {
  35. parent = *p;
  36. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  37. if (offset < info->offset)
  38. p = &(*p)->rb_left;
  39. else if (offset > info->offset)
  40. p = &(*p)->rb_right;
  41. else
  42. return -EEXIST;
  43. }
  44. rb_link_node(node, parent, p);
  45. rb_insert_color(node, root);
  46. return 0;
  47. }
  48. static int tree_insert_bytes(struct rb_root *root, u64 bytes,
  49. struct rb_node *node)
  50. {
  51. struct rb_node **p = &root->rb_node;
  52. struct rb_node *parent = NULL;
  53. struct btrfs_free_space *info;
  54. while (*p) {
  55. parent = *p;
  56. info = rb_entry(parent, struct btrfs_free_space, bytes_index);
  57. if (bytes < info->bytes)
  58. p = &(*p)->rb_left;
  59. else
  60. p = &(*p)->rb_right;
  61. }
  62. rb_link_node(node, parent, p);
  63. rb_insert_color(node, root);
  64. return 0;
  65. }
  66. /*
  67. * searches the tree for the given offset.
  68. *
  69. * fuzzy == 1: this is used for allocations where we are given a hint of where
  70. * to look for free space. Because the hint may not be completely on an offset
  71. * mark, or the hint may no longer point to free space we need to fudge our
  72. * results a bit. So we look for free space starting at or after offset with at
  73. * least bytes size. We prefer to find as close to the given offset as we can.
  74. * Also if the offset is within a free space range, then we will return the free
  75. * space that contains the given offset, which means we can return a free space
  76. * chunk with an offset before the provided offset.
  77. *
  78. * fuzzy == 0: this is just a normal tree search. Give us the free space that
  79. * starts at the given offset which is at least bytes size, and if its not there
  80. * return NULL.
  81. */
  82. static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
  83. u64 offset, u64 bytes,
  84. int fuzzy)
  85. {
  86. struct rb_node *n = root->rb_node;
  87. struct btrfs_free_space *entry, *ret = NULL;
  88. while (n) {
  89. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  90. if (offset < entry->offset) {
  91. if (fuzzy &&
  92. (!ret || entry->offset < ret->offset) &&
  93. (bytes <= entry->bytes))
  94. ret = entry;
  95. n = n->rb_left;
  96. } else if (offset > entry->offset) {
  97. if (fuzzy &&
  98. (entry->offset + entry->bytes - 1) >= offset &&
  99. bytes <= entry->bytes) {
  100. ret = entry;
  101. break;
  102. }
  103. n = n->rb_right;
  104. } else {
  105. if (bytes > entry->bytes) {
  106. n = n->rb_right;
  107. continue;
  108. }
  109. ret = entry;
  110. break;
  111. }
  112. }
  113. return ret;
  114. }
  115. /*
  116. * return a chunk at least bytes size, as close to offset that we can get.
  117. */
  118. static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
  119. u64 offset, u64 bytes)
  120. {
  121. struct rb_node *n = root->rb_node;
  122. struct btrfs_free_space *entry, *ret = NULL;
  123. while (n) {
  124. entry = rb_entry(n, struct btrfs_free_space, bytes_index);
  125. if (bytes < entry->bytes) {
  126. /*
  127. * We prefer to get a hole size as close to the size we
  128. * are asking for so we don't take small slivers out of
  129. * huge holes, but we also want to get as close to the
  130. * offset as possible so we don't have a whole lot of
  131. * fragmentation.
  132. */
  133. if (offset <= entry->offset) {
  134. if (!ret)
  135. ret = entry;
  136. else if (entry->bytes < ret->bytes)
  137. ret = entry;
  138. else if (entry->offset < ret->offset)
  139. ret = entry;
  140. }
  141. n = n->rb_left;
  142. } else if (bytes > entry->bytes) {
  143. n = n->rb_right;
  144. } else {
  145. /*
  146. * Ok we may have multiple chunks of the wanted size,
  147. * so we don't want to take the first one we find, we
  148. * want to take the one closest to our given offset, so
  149. * keep searching just in case theres a better match.
  150. */
  151. n = n->rb_right;
  152. if (offset > entry->offset)
  153. continue;
  154. else if (!ret || entry->offset < ret->offset)
  155. ret = entry;
  156. }
  157. }
  158. return ret;
  159. }
  160. static void unlink_free_space(struct btrfs_block_group_cache *block_group,
  161. struct btrfs_free_space *info)
  162. {
  163. rb_erase(&info->offset_index, &block_group->free_space_offset);
  164. rb_erase(&info->bytes_index, &block_group->free_space_bytes);
  165. }
  166. static int link_free_space(struct btrfs_block_group_cache *block_group,
  167. struct btrfs_free_space *info)
  168. {
  169. int ret = 0;
  170. BUG_ON(!info->bytes);
  171. ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
  172. &info->offset_index);
  173. if (ret)
  174. return ret;
  175. ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
  176. &info->bytes_index);
  177. if (ret)
  178. return ret;
  179. return ret;
  180. }
  181. int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
  182. u64 offset, u64 bytes)
  183. {
  184. struct btrfs_free_space *right_info;
  185. struct btrfs_free_space *left_info;
  186. struct btrfs_free_space *info = NULL;
  187. int ret = 0;
  188. info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
  189. if (!info)
  190. return -ENOMEM;
  191. info->offset = offset;
  192. info->bytes = bytes;
  193. spin_lock(&block_group->tree_lock);
  194. /*
  195. * first we want to see if there is free space adjacent to the range we
  196. * are adding, if there is remove that struct and add a new one to
  197. * cover the entire range
  198. */
  199. right_info = tree_search_offset(&block_group->free_space_offset,
  200. offset+bytes, 0, 0);
  201. left_info = tree_search_offset(&block_group->free_space_offset,
  202. offset-1, 0, 1);
  203. if (right_info) {
  204. unlink_free_space(block_group, right_info);
  205. info->bytes += right_info->bytes;
  206. kfree(right_info);
  207. }
  208. if (left_info && left_info->offset + left_info->bytes == offset) {
  209. unlink_free_space(block_group, left_info);
  210. info->offset = left_info->offset;
  211. info->bytes += left_info->bytes;
  212. kfree(left_info);
  213. }
  214. ret = link_free_space(block_group, info);
  215. if (ret)
  216. kfree(info);
  217. spin_unlock(&block_group->tree_lock);
  218. if (ret) {
  219. printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
  220. if (ret == -EEXIST)
  221. BUG();
  222. }
  223. return ret;
  224. }
  225. int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
  226. u64 offset, u64 bytes)
  227. {
  228. struct btrfs_free_space *info;
  229. int ret = 0;
  230. spin_lock(&block_group->tree_lock);
  231. info = tree_search_offset(&block_group->free_space_offset, offset, 0,
  232. 1);
  233. if (info && info->offset == offset) {
  234. if (info->bytes < bytes) {
  235. printk(KERN_ERR "Found free space at %llu, size %llu,"
  236. "trying to use %llu\n",
  237. (unsigned long long)info->offset,
  238. (unsigned long long)info->bytes,
  239. (unsigned long long)bytes);
  240. WARN_ON(1);
  241. ret = -EINVAL;
  242. spin_unlock(&block_group->tree_lock);
  243. goto out;
  244. }
  245. unlink_free_space(block_group, info);
  246. if (info->bytes == bytes) {
  247. kfree(info);
  248. spin_unlock(&block_group->tree_lock);
  249. goto out;
  250. }
  251. info->offset += bytes;
  252. info->bytes -= bytes;
  253. ret = link_free_space(block_group, info);
  254. spin_unlock(&block_group->tree_lock);
  255. BUG_ON(ret);
  256. } else if (info && info->offset < offset &&
  257. info->offset + info->bytes >= offset + bytes) {
  258. u64 old_start = info->offset;
  259. /*
  260. * we're freeing space in the middle of the info,
  261. * this can happen during tree log replay
  262. *
  263. * first unlink the old info and then
  264. * insert it again after the hole we're creating
  265. */
  266. unlink_free_space(block_group, info);
  267. if (offset + bytes < info->offset + info->bytes) {
  268. u64 old_end = info->offset + info->bytes;
  269. info->offset = offset + bytes;
  270. info->bytes = old_end - info->offset;
  271. ret = link_free_space(block_group, info);
  272. BUG_ON(ret);
  273. } else {
  274. /* the hole we're creating ends at the end
  275. * of the info struct, just free the info
  276. */
  277. kfree(info);
  278. }
  279. spin_unlock(&block_group->tree_lock);
  280. /* step two, insert a new info struct to cover anything
  281. * before the hole
  282. */
  283. ret = btrfs_add_free_space(block_group, old_start,
  284. offset - old_start);
  285. BUG_ON(ret);
  286. } else {
  287. spin_unlock(&block_group->tree_lock);
  288. if (!info) {
  289. printk(KERN_ERR "couldn't find space %llu to free\n",
  290. (unsigned long long)offset);
  291. printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
  292. block_group->cached, block_group->key.objectid,
  293. block_group->key.offset);
  294. btrfs_dump_free_space(block_group, bytes);
  295. } else if (info) {
  296. printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
  297. "but wanted offset=%llu bytes=%llu\n",
  298. info->offset, info->bytes, offset, bytes);
  299. }
  300. WARN_ON(1);
  301. }
  302. out:
  303. return ret;
  304. }
  305. void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
  306. u64 bytes)
  307. {
  308. struct btrfs_free_space *info;
  309. struct rb_node *n;
  310. int count = 0;
  311. for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
  312. info = rb_entry(n, struct btrfs_free_space, offset_index);
  313. if (info->bytes >= bytes)
  314. count++;
  315. printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset,
  316. info->bytes);
  317. }
  318. printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
  319. "\n", count);
  320. }
  321. u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
  322. {
  323. struct btrfs_free_space *info;
  324. struct rb_node *n;
  325. u64 ret = 0;
  326. for (n = rb_first(&block_group->free_space_offset); n;
  327. n = rb_next(n)) {
  328. info = rb_entry(n, struct btrfs_free_space, offset_index);
  329. ret += info->bytes;
  330. }
  331. return ret;
  332. }
  333. /*
  334. * for a given cluster, put all of its extents back into the free
  335. * space cache. If the block group passed doesn't match the block group
  336. * pointed to by the cluster, someone else raced in and freed the
  337. * cluster already. In that case, we just return without changing anything
  338. */
  339. static int
  340. __btrfs_return_cluster_to_free_space(
  341. struct btrfs_block_group_cache *block_group,
  342. struct btrfs_free_cluster *cluster)
  343. {
  344. struct btrfs_free_space *entry;
  345. struct rb_node *node;
  346. spin_lock(&cluster->lock);
  347. if (cluster->block_group != block_group)
  348. goto out;
  349. cluster->window_start = 0;
  350. node = rb_first(&cluster->root);
  351. while(node) {
  352. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  353. node = rb_next(&entry->offset_index);
  354. rb_erase(&entry->offset_index, &cluster->root);
  355. link_free_space(block_group, entry);
  356. }
  357. list_del_init(&cluster->block_group_list);
  358. btrfs_put_block_group(cluster->block_group);
  359. cluster->block_group = NULL;
  360. cluster->root.rb_node = NULL;
  361. out:
  362. spin_unlock(&cluster->lock);
  363. return 0;
  364. }
  365. void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
  366. {
  367. struct btrfs_free_space *info;
  368. struct rb_node *node;
  369. struct btrfs_free_cluster *cluster;
  370. struct btrfs_free_cluster *safe;
  371. spin_lock(&block_group->tree_lock);
  372. list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
  373. block_group_list) {
  374. WARN_ON(cluster->block_group != block_group);
  375. __btrfs_return_cluster_to_free_space(block_group, cluster);
  376. }
  377. while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
  378. info = rb_entry(node, struct btrfs_free_space, bytes_index);
  379. unlink_free_space(block_group, info);
  380. kfree(info);
  381. if (need_resched()) {
  382. spin_unlock(&block_group->tree_lock);
  383. cond_resched();
  384. spin_lock(&block_group->tree_lock);
  385. }
  386. }
  387. spin_unlock(&block_group->tree_lock);
  388. }
  389. u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
  390. u64 offset, u64 bytes, u64 empty_size)
  391. {
  392. struct btrfs_free_space *entry = NULL;
  393. u64 ret = 0;
  394. spin_lock(&block_group->tree_lock);
  395. entry = tree_search_offset(&block_group->free_space_offset, offset,
  396. bytes + empty_size, 1);
  397. if (!entry)
  398. entry = tree_search_bytes(&block_group->free_space_bytes,
  399. offset, bytes + empty_size);
  400. if (entry) {
  401. unlink_free_space(block_group, entry);
  402. ret = entry->offset;
  403. entry->offset += bytes;
  404. entry->bytes -= bytes;
  405. if (!entry->bytes)
  406. kfree(entry);
  407. else
  408. link_free_space(block_group, entry);
  409. }
  410. spin_unlock(&block_group->tree_lock);
  411. return ret;
  412. }
  413. /*
  414. * given a cluster, put all of its extents back into the free space
  415. * cache. If a block group is passed, this function will only free
  416. * a cluster that belongs to the passed block group.
  417. *
  418. * Otherwise, it'll get a reference on the block group pointed to by the
  419. * cluster and remove the cluster from it.
  420. */
  421. int btrfs_return_cluster_to_free_space(
  422. struct btrfs_block_group_cache *block_group,
  423. struct btrfs_free_cluster *cluster)
  424. {
  425. int ret;
  426. /* first, get a safe pointer to the block group */
  427. spin_lock(&cluster->lock);
  428. if (!block_group) {
  429. block_group = cluster->block_group;
  430. if (!block_group) {
  431. spin_unlock(&cluster->lock);
  432. return 0;
  433. }
  434. } else if (cluster->block_group != block_group) {
  435. /* someone else has already freed it don't redo their work */
  436. spin_unlock(&cluster->lock);
  437. return 0;
  438. }
  439. atomic_inc(&block_group->count);
  440. spin_unlock(&cluster->lock);
  441. /* now return any extents the cluster had on it */
  442. spin_lock(&block_group->tree_lock);
  443. ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
  444. spin_unlock(&block_group->tree_lock);
  445. /* finally drop our ref */
  446. btrfs_put_block_group(block_group);
  447. return ret;
  448. }
  449. /*
  450. * given a cluster, try to allocate 'bytes' from it, returns 0
  451. * if it couldn't find anything suitably large, or a logical disk offset
  452. * if things worked out
  453. */
  454. u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
  455. struct btrfs_free_cluster *cluster, u64 bytes,
  456. u64 min_start)
  457. {
  458. struct btrfs_free_space *entry = NULL;
  459. struct rb_node *node;
  460. u64 ret = 0;
  461. spin_lock(&cluster->lock);
  462. if (bytes > cluster->max_size)
  463. goto out;
  464. if (cluster->block_group != block_group)
  465. goto out;
  466. node = rb_first(&cluster->root);
  467. if (!node)
  468. goto out;
  469. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  470. while(1) {
  471. if (entry->bytes < bytes || entry->offset < min_start) {
  472. struct rb_node *node;
  473. node = rb_next(&entry->offset_index);
  474. if (!node)
  475. break;
  476. entry = rb_entry(node, struct btrfs_free_space,
  477. offset_index);
  478. continue;
  479. }
  480. ret = entry->offset;
  481. entry->offset += bytes;
  482. entry->bytes -= bytes;
  483. if (entry->bytes == 0) {
  484. rb_erase(&entry->offset_index, &cluster->root);
  485. kfree(entry);
  486. }
  487. break;
  488. }
  489. out:
  490. spin_unlock(&cluster->lock);
  491. return ret;
  492. }
  493. /*
  494. * here we try to find a cluster of blocks in a block group. The goal
  495. * is to find at least bytes free and up to empty_size + bytes free.
  496. * We might not find them all in one contiguous area.
  497. *
  498. * returns zero and sets up cluster if things worked out, otherwise
  499. * it returns -enospc
  500. */
  501. int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
  502. struct btrfs_block_group_cache *block_group,
  503. struct btrfs_free_cluster *cluster,
  504. u64 offset, u64 bytes, u64 empty_size)
  505. {
  506. struct btrfs_free_space *entry = NULL;
  507. struct rb_node *node;
  508. struct btrfs_free_space *next;
  509. struct btrfs_free_space *last;
  510. u64 min_bytes;
  511. u64 window_start;
  512. u64 window_free;
  513. u64 max_extent = 0;
  514. int total_retries = 0;
  515. int ret;
  516. /* for metadata, allow allocates with more holes */
  517. if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
  518. /*
  519. * we want to do larger allocations when we are
  520. * flushing out the delayed refs, it helps prevent
  521. * making more work as we go along.
  522. */
  523. if (trans->transaction->delayed_refs.flushing)
  524. min_bytes = max(bytes, (bytes + empty_size) >> 1);
  525. else
  526. min_bytes = max(bytes, (bytes + empty_size) >> 4);
  527. } else
  528. min_bytes = max(bytes, (bytes + empty_size) >> 2);
  529. spin_lock(&block_group->tree_lock);
  530. spin_lock(&cluster->lock);
  531. /* someone already found a cluster, hooray */
  532. if (cluster->block_group) {
  533. ret = 0;
  534. goto out;
  535. }
  536. again:
  537. min_bytes = min(min_bytes, bytes + empty_size);
  538. entry = tree_search_bytes(&block_group->free_space_bytes,
  539. offset, min_bytes);
  540. if (!entry) {
  541. ret = -ENOSPC;
  542. goto out;
  543. }
  544. window_start = entry->offset;
  545. window_free = entry->bytes;
  546. last = entry;
  547. max_extent = entry->bytes;
  548. while(1) {
  549. /* out window is just right, lets fill it */
  550. if (window_free >= bytes + empty_size)
  551. break;
  552. node = rb_next(&last->offset_index);
  553. if (!node) {
  554. ret = -ENOSPC;
  555. goto out;
  556. }
  557. next = rb_entry(node, struct btrfs_free_space, offset_index);
  558. /*
  559. * we haven't filled the empty size and the window is
  560. * very large. reset and try again
  561. */
  562. if (next->offset - window_start > (bytes + empty_size) * 2) {
  563. entry = next;
  564. window_start = entry->offset;
  565. window_free = entry->bytes;
  566. last = entry;
  567. max_extent = 0;
  568. total_retries++;
  569. if (total_retries % 256 == 0) {
  570. if (min_bytes >= (bytes + empty_size)) {
  571. ret = -ENOSPC;
  572. goto out;
  573. }
  574. /*
  575. * grow our allocation a bit, we're not having
  576. * much luck
  577. */
  578. min_bytes *= 2;
  579. goto again;
  580. }
  581. } else {
  582. last = next;
  583. window_free += next->bytes;
  584. if (entry->bytes > max_extent)
  585. max_extent = entry->bytes;
  586. }
  587. }
  588. cluster->window_start = entry->offset;
  589. /*
  590. * now we've found our entries, pull them out of the free space
  591. * cache and put them into the cluster rbtree
  592. *
  593. * The cluster includes an rbtree, but only uses the offset index
  594. * of each free space cache entry.
  595. */
  596. while(1) {
  597. node = rb_next(&entry->offset_index);
  598. unlink_free_space(block_group, entry);
  599. ret = tree_insert_offset(&cluster->root, entry->offset,
  600. &entry->offset_index);
  601. BUG_ON(ret);
  602. if (!node || entry == last)
  603. break;
  604. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  605. }
  606. ret = 0;
  607. cluster->max_size = max_extent;
  608. atomic_inc(&block_group->count);
  609. list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
  610. cluster->block_group = block_group;
  611. out:
  612. spin_unlock(&cluster->lock);
  613. spin_unlock(&block_group->tree_lock);
  614. return ret;
  615. }
  616. /*
  617. * simple code to zero out a cluster
  618. */
  619. void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
  620. {
  621. spin_lock_init(&cluster->lock);
  622. spin_lock_init(&cluster->refill_lock);
  623. cluster->root.rb_node = NULL;
  624. cluster->max_size = 0;
  625. INIT_LIST_HEAD(&cluster->block_group_list);
  626. cluster->block_group = NULL;
  627. }