free-space-cache.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * Copyright (C) 2008 Red Hat. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "free-space-cache.h"
  21. #include "transaction.h"
  22. struct btrfs_free_space {
  23. struct rb_node bytes_index;
  24. struct rb_node offset_index;
  25. u64 offset;
  26. u64 bytes;
  27. };
  28. static int tree_insert_offset(struct rb_root *root, u64 offset,
  29. struct rb_node *node)
  30. {
  31. struct rb_node **p = &root->rb_node;
  32. struct rb_node *parent = NULL;
  33. struct btrfs_free_space *info;
  34. while (*p) {
  35. parent = *p;
  36. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  37. if (offset < info->offset)
  38. p = &(*p)->rb_left;
  39. else if (offset > info->offset)
  40. p = &(*p)->rb_right;
  41. else
  42. return -EEXIST;
  43. }
  44. rb_link_node(node, parent, p);
  45. rb_insert_color(node, root);
  46. return 0;
  47. }
  48. static int tree_insert_bytes(struct rb_root *root, u64 bytes,
  49. struct rb_node *node)
  50. {
  51. struct rb_node **p = &root->rb_node;
  52. struct rb_node *parent = NULL;
  53. struct btrfs_free_space *info;
  54. while (*p) {
  55. parent = *p;
  56. info = rb_entry(parent, struct btrfs_free_space, bytes_index);
  57. if (bytes < info->bytes)
  58. p = &(*p)->rb_left;
  59. else
  60. p = &(*p)->rb_right;
  61. }
  62. rb_link_node(node, parent, p);
  63. rb_insert_color(node, root);
  64. return 0;
  65. }
  66. /*
  67. * searches the tree for the given offset.
  68. *
  69. * fuzzy == 1: this is used for allocations where we are given a hint of where
  70. * to look for free space. Because the hint may not be completely on an offset
  71. * mark, or the hint may no longer point to free space we need to fudge our
  72. * results a bit. So we look for free space starting at or after offset with at
  73. * least bytes size. We prefer to find as close to the given offset as we can.
  74. * Also if the offset is within a free space range, then we will return the free
  75. * space that contains the given offset, which means we can return a free space
  76. * chunk with an offset before the provided offset.
  77. *
  78. * fuzzy == 0: this is just a normal tree search. Give us the free space that
  79. * starts at the given offset which is at least bytes size, and if its not there
  80. * return NULL.
  81. */
  82. static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
  83. u64 offset, u64 bytes,
  84. int fuzzy)
  85. {
  86. struct rb_node *n = root->rb_node;
  87. struct btrfs_free_space *entry, *ret = NULL;
  88. while (n) {
  89. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  90. if (offset < entry->offset) {
  91. if (fuzzy &&
  92. (!ret || entry->offset < ret->offset) &&
  93. (bytes <= entry->bytes))
  94. ret = entry;
  95. n = n->rb_left;
  96. } else if (offset > entry->offset) {
  97. if (fuzzy &&
  98. (entry->offset + entry->bytes - 1) >= offset &&
  99. bytes <= entry->bytes) {
  100. ret = entry;
  101. break;
  102. }
  103. n = n->rb_right;
  104. } else {
  105. if (bytes > entry->bytes) {
  106. n = n->rb_right;
  107. continue;
  108. }
  109. ret = entry;
  110. break;
  111. }
  112. }
  113. return ret;
  114. }
  115. /*
  116. * return a chunk at least bytes size, as close to offset that we can get.
  117. */
  118. static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
  119. u64 offset, u64 bytes)
  120. {
  121. struct rb_node *n = root->rb_node;
  122. struct btrfs_free_space *entry, *ret = NULL;
  123. while (n) {
  124. entry = rb_entry(n, struct btrfs_free_space, bytes_index);
  125. if (bytes < entry->bytes) {
  126. /*
  127. * We prefer to get a hole size as close to the size we
  128. * are asking for so we don't take small slivers out of
  129. * huge holes, but we also want to get as close to the
  130. * offset as possible so we don't have a whole lot of
  131. * fragmentation.
  132. */
  133. if (offset <= entry->offset) {
  134. if (!ret)
  135. ret = entry;
  136. else if (entry->bytes < ret->bytes)
  137. ret = entry;
  138. else if (entry->offset < ret->offset)
  139. ret = entry;
  140. }
  141. n = n->rb_left;
  142. } else if (bytes > entry->bytes) {
  143. n = n->rb_right;
  144. } else {
  145. /*
  146. * Ok we may have multiple chunks of the wanted size,
  147. * so we don't want to take the first one we find, we
  148. * want to take the one closest to our given offset, so
  149. * keep searching just in case theres a better match.
  150. */
  151. n = n->rb_right;
  152. if (offset > entry->offset)
  153. continue;
  154. else if (!ret || entry->offset < ret->offset)
  155. ret = entry;
  156. }
  157. }
  158. return ret;
  159. }
  160. static void unlink_free_space(struct btrfs_block_group_cache *block_group,
  161. struct btrfs_free_space *info)
  162. {
  163. rb_erase(&info->offset_index, &block_group->free_space_offset);
  164. rb_erase(&info->bytes_index, &block_group->free_space_bytes);
  165. }
  166. static int link_free_space(struct btrfs_block_group_cache *block_group,
  167. struct btrfs_free_space *info)
  168. {
  169. int ret = 0;
  170. BUG_ON(!info->bytes);
  171. ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
  172. &info->offset_index);
  173. if (ret)
  174. return ret;
  175. ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
  176. &info->bytes_index);
  177. if (ret)
  178. return ret;
  179. return ret;
  180. }
  181. int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
  182. u64 offset, u64 bytes)
  183. {
  184. struct btrfs_free_space *right_info;
  185. struct btrfs_free_space *left_info;
  186. struct btrfs_free_space *info = NULL;
  187. int ret = 0;
  188. info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
  189. if (!info)
  190. return -ENOMEM;
  191. info->offset = offset;
  192. info->bytes = bytes;
  193. spin_lock(&block_group->tree_lock);
  194. /*
  195. * first we want to see if there is free space adjacent to the range we
  196. * are adding, if there is remove that struct and add a new one to
  197. * cover the entire range
  198. */
  199. right_info = tree_search_offset(&block_group->free_space_offset,
  200. offset+bytes, 0, 0);
  201. left_info = tree_search_offset(&block_group->free_space_offset,
  202. offset-1, 0, 1);
  203. if (right_info) {
  204. unlink_free_space(block_group, right_info);
  205. info->bytes += right_info->bytes;
  206. kfree(right_info);
  207. }
  208. if (left_info && left_info->offset + left_info->bytes == offset) {
  209. unlink_free_space(block_group, left_info);
  210. info->offset = left_info->offset;
  211. info->bytes += left_info->bytes;
  212. kfree(left_info);
  213. }
  214. ret = link_free_space(block_group, info);
  215. if (ret)
  216. kfree(info);
  217. spin_unlock(&block_group->tree_lock);
  218. if (ret) {
  219. printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
  220. BUG_ON(ret == -EEXIST);
  221. }
  222. return ret;
  223. }
  224. int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
  225. u64 offset, u64 bytes)
  226. {
  227. struct btrfs_free_space *info;
  228. int ret = 0;
  229. spin_lock(&block_group->tree_lock);
  230. info = tree_search_offset(&block_group->free_space_offset, offset, 0,
  231. 1);
  232. if (info && info->offset == offset) {
  233. if (info->bytes < bytes) {
  234. printk(KERN_ERR "Found free space at %llu, size %llu,"
  235. "trying to use %llu\n",
  236. (unsigned long long)info->offset,
  237. (unsigned long long)info->bytes,
  238. (unsigned long long)bytes);
  239. WARN_ON(1);
  240. ret = -EINVAL;
  241. spin_unlock(&block_group->tree_lock);
  242. goto out;
  243. }
  244. unlink_free_space(block_group, info);
  245. if (info->bytes == bytes) {
  246. kfree(info);
  247. spin_unlock(&block_group->tree_lock);
  248. goto out;
  249. }
  250. info->offset += bytes;
  251. info->bytes -= bytes;
  252. ret = link_free_space(block_group, info);
  253. spin_unlock(&block_group->tree_lock);
  254. BUG_ON(ret);
  255. } else if (info && info->offset < offset &&
  256. info->offset + info->bytes >= offset + bytes) {
  257. u64 old_start = info->offset;
  258. /*
  259. * we're freeing space in the middle of the info,
  260. * this can happen during tree log replay
  261. *
  262. * first unlink the old info and then
  263. * insert it again after the hole we're creating
  264. */
  265. unlink_free_space(block_group, info);
  266. if (offset + bytes < info->offset + info->bytes) {
  267. u64 old_end = info->offset + info->bytes;
  268. info->offset = offset + bytes;
  269. info->bytes = old_end - info->offset;
  270. ret = link_free_space(block_group, info);
  271. BUG_ON(ret);
  272. } else {
  273. /* the hole we're creating ends at the end
  274. * of the info struct, just free the info
  275. */
  276. kfree(info);
  277. }
  278. spin_unlock(&block_group->tree_lock);
  279. /* step two, insert a new info struct to cover anything
  280. * before the hole
  281. */
  282. ret = btrfs_add_free_space(block_group, old_start,
  283. offset - old_start);
  284. BUG_ON(ret);
  285. } else {
  286. spin_unlock(&block_group->tree_lock);
  287. if (!info) {
  288. printk(KERN_ERR "couldn't find space %llu to free\n",
  289. (unsigned long long)offset);
  290. printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
  291. block_group->cached, block_group->key.objectid,
  292. block_group->key.offset);
  293. btrfs_dump_free_space(block_group, bytes);
  294. } else if (info) {
  295. printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
  296. "but wanted offset=%llu bytes=%llu\n",
  297. info->offset, info->bytes, offset, bytes);
  298. }
  299. WARN_ON(1);
  300. }
  301. out:
  302. return ret;
  303. }
  304. void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
  305. u64 bytes)
  306. {
  307. struct btrfs_free_space *info;
  308. struct rb_node *n;
  309. int count = 0;
  310. for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
  311. info = rb_entry(n, struct btrfs_free_space, offset_index);
  312. if (info->bytes >= bytes)
  313. count++;
  314. printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset,
  315. info->bytes);
  316. }
  317. printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
  318. "\n", count);
  319. }
  320. u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
  321. {
  322. struct btrfs_free_space *info;
  323. struct rb_node *n;
  324. u64 ret = 0;
  325. for (n = rb_first(&block_group->free_space_offset); n;
  326. n = rb_next(n)) {
  327. info = rb_entry(n, struct btrfs_free_space, offset_index);
  328. ret += info->bytes;
  329. }
  330. return ret;
  331. }
  332. /*
  333. * for a given cluster, put all of its extents back into the free
  334. * space cache. If the block group passed doesn't match the block group
  335. * pointed to by the cluster, someone else raced in and freed the
  336. * cluster already. In that case, we just return without changing anything
  337. */
  338. static int
  339. __btrfs_return_cluster_to_free_space(
  340. struct btrfs_block_group_cache *block_group,
  341. struct btrfs_free_cluster *cluster)
  342. {
  343. struct btrfs_free_space *entry;
  344. struct rb_node *node;
  345. spin_lock(&cluster->lock);
  346. if (cluster->block_group != block_group)
  347. goto out;
  348. cluster->window_start = 0;
  349. node = rb_first(&cluster->root);
  350. while(node) {
  351. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  352. node = rb_next(&entry->offset_index);
  353. rb_erase(&entry->offset_index, &cluster->root);
  354. link_free_space(block_group, entry);
  355. }
  356. list_del_init(&cluster->block_group_list);
  357. btrfs_put_block_group(cluster->block_group);
  358. cluster->block_group = NULL;
  359. cluster->root.rb_node = NULL;
  360. out:
  361. spin_unlock(&cluster->lock);
  362. return 0;
  363. }
  364. void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
  365. {
  366. struct btrfs_free_space *info;
  367. struct rb_node *node;
  368. struct btrfs_free_cluster *cluster;
  369. struct btrfs_free_cluster *safe;
  370. spin_lock(&block_group->tree_lock);
  371. list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
  372. block_group_list) {
  373. WARN_ON(cluster->block_group != block_group);
  374. __btrfs_return_cluster_to_free_space(block_group, cluster);
  375. }
  376. while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
  377. info = rb_entry(node, struct btrfs_free_space, bytes_index);
  378. unlink_free_space(block_group, info);
  379. kfree(info);
  380. if (need_resched()) {
  381. spin_unlock(&block_group->tree_lock);
  382. cond_resched();
  383. spin_lock(&block_group->tree_lock);
  384. }
  385. }
  386. spin_unlock(&block_group->tree_lock);
  387. }
  388. u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
  389. u64 offset, u64 bytes, u64 empty_size)
  390. {
  391. struct btrfs_free_space *entry = NULL;
  392. u64 ret = 0;
  393. spin_lock(&block_group->tree_lock);
  394. entry = tree_search_offset(&block_group->free_space_offset, offset,
  395. bytes + empty_size, 1);
  396. if (!entry)
  397. entry = tree_search_bytes(&block_group->free_space_bytes,
  398. offset, bytes + empty_size);
  399. if (entry) {
  400. unlink_free_space(block_group, entry);
  401. ret = entry->offset;
  402. entry->offset += bytes;
  403. entry->bytes -= bytes;
  404. if (!entry->bytes)
  405. kfree(entry);
  406. else
  407. link_free_space(block_group, entry);
  408. }
  409. spin_unlock(&block_group->tree_lock);
  410. return ret;
  411. }
  412. /*
  413. * given a cluster, put all of its extents back into the free space
  414. * cache. If a block group is passed, this function will only free
  415. * a cluster that belongs to the passed block group.
  416. *
  417. * Otherwise, it'll get a reference on the block group pointed to by the
  418. * cluster and remove the cluster from it.
  419. */
  420. int btrfs_return_cluster_to_free_space(
  421. struct btrfs_block_group_cache *block_group,
  422. struct btrfs_free_cluster *cluster)
  423. {
  424. int ret;
  425. /* first, get a safe pointer to the block group */
  426. spin_lock(&cluster->lock);
  427. if (!block_group) {
  428. block_group = cluster->block_group;
  429. if (!block_group) {
  430. spin_unlock(&cluster->lock);
  431. return 0;
  432. }
  433. } else if (cluster->block_group != block_group) {
  434. /* someone else has already freed it don't redo their work */
  435. spin_unlock(&cluster->lock);
  436. return 0;
  437. }
  438. atomic_inc(&block_group->count);
  439. spin_unlock(&cluster->lock);
  440. /* now return any extents the cluster had on it */
  441. spin_lock(&block_group->tree_lock);
  442. ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
  443. spin_unlock(&block_group->tree_lock);
  444. /* finally drop our ref */
  445. btrfs_put_block_group(block_group);
  446. return ret;
  447. }
  448. /*
  449. * given a cluster, try to allocate 'bytes' from it, returns 0
  450. * if it couldn't find anything suitably large, or a logical disk offset
  451. * if things worked out
  452. */
  453. u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
  454. struct btrfs_free_cluster *cluster, u64 bytes,
  455. u64 min_start)
  456. {
  457. struct btrfs_free_space *entry = NULL;
  458. struct rb_node *node;
  459. u64 ret = 0;
  460. spin_lock(&cluster->lock);
  461. if (bytes > cluster->max_size)
  462. goto out;
  463. if (cluster->block_group != block_group)
  464. goto out;
  465. node = rb_first(&cluster->root);
  466. if (!node)
  467. goto out;
  468. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  469. while(1) {
  470. if (entry->bytes < bytes || entry->offset < min_start) {
  471. struct rb_node *node;
  472. node = rb_next(&entry->offset_index);
  473. if (!node)
  474. break;
  475. entry = rb_entry(node, struct btrfs_free_space,
  476. offset_index);
  477. continue;
  478. }
  479. ret = entry->offset;
  480. entry->offset += bytes;
  481. entry->bytes -= bytes;
  482. if (entry->bytes == 0) {
  483. rb_erase(&entry->offset_index, &cluster->root);
  484. kfree(entry);
  485. }
  486. break;
  487. }
  488. out:
  489. spin_unlock(&cluster->lock);
  490. return ret;
  491. }
  492. /*
  493. * here we try to find a cluster of blocks in a block group. The goal
  494. * is to find at least bytes free and up to empty_size + bytes free.
  495. * We might not find them all in one contiguous area.
  496. *
  497. * returns zero and sets up cluster if things worked out, otherwise
  498. * it returns -enospc
  499. */
  500. int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
  501. struct btrfs_block_group_cache *block_group,
  502. struct btrfs_free_cluster *cluster,
  503. u64 offset, u64 bytes, u64 empty_size)
  504. {
  505. struct btrfs_free_space *entry = NULL;
  506. struct rb_node *node;
  507. struct btrfs_free_space *next;
  508. struct btrfs_free_space *last;
  509. u64 min_bytes;
  510. u64 window_start;
  511. u64 window_free;
  512. u64 max_extent = 0;
  513. int total_retries = 0;
  514. int ret;
  515. /* for metadata, allow allocates with more holes */
  516. if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
  517. /*
  518. * we want to do larger allocations when we are
  519. * flushing out the delayed refs, it helps prevent
  520. * making more work as we go along.
  521. */
  522. if (trans->transaction->delayed_refs.flushing)
  523. min_bytes = max(bytes, (bytes + empty_size) >> 1);
  524. else
  525. min_bytes = max(bytes, (bytes + empty_size) >> 4);
  526. } else
  527. min_bytes = max(bytes, (bytes + empty_size) >> 2);
  528. spin_lock(&block_group->tree_lock);
  529. spin_lock(&cluster->lock);
  530. /* someone already found a cluster, hooray */
  531. if (cluster->block_group) {
  532. ret = 0;
  533. goto out;
  534. }
  535. again:
  536. min_bytes = min(min_bytes, bytes + empty_size);
  537. entry = tree_search_bytes(&block_group->free_space_bytes,
  538. offset, min_bytes);
  539. if (!entry) {
  540. ret = -ENOSPC;
  541. goto out;
  542. }
  543. window_start = entry->offset;
  544. window_free = entry->bytes;
  545. last = entry;
  546. max_extent = entry->bytes;
  547. while(1) {
  548. /* out window is just right, lets fill it */
  549. if (window_free >= bytes + empty_size)
  550. break;
  551. node = rb_next(&last->offset_index);
  552. if (!node) {
  553. ret = -ENOSPC;
  554. goto out;
  555. }
  556. next = rb_entry(node, struct btrfs_free_space, offset_index);
  557. /*
  558. * we haven't filled the empty size and the window is
  559. * very large. reset and try again
  560. */
  561. if (next->offset - window_start > (bytes + empty_size) * 2) {
  562. entry = next;
  563. window_start = entry->offset;
  564. window_free = entry->bytes;
  565. last = entry;
  566. max_extent = 0;
  567. total_retries++;
  568. if (total_retries % 256 == 0) {
  569. if (min_bytes >= (bytes + empty_size)) {
  570. ret = -ENOSPC;
  571. goto out;
  572. }
  573. /*
  574. * grow our allocation a bit, we're not having
  575. * much luck
  576. */
  577. min_bytes *= 2;
  578. goto again;
  579. }
  580. } else {
  581. last = next;
  582. window_free += next->bytes;
  583. if (entry->bytes > max_extent)
  584. max_extent = entry->bytes;
  585. }
  586. }
  587. cluster->window_start = entry->offset;
  588. /*
  589. * now we've found our entries, pull them out of the free space
  590. * cache and put them into the cluster rbtree
  591. *
  592. * The cluster includes an rbtree, but only uses the offset index
  593. * of each free space cache entry.
  594. */
  595. while(1) {
  596. node = rb_next(&entry->offset_index);
  597. unlink_free_space(block_group, entry);
  598. ret = tree_insert_offset(&cluster->root, entry->offset,
  599. &entry->offset_index);
  600. BUG_ON(ret);
  601. if (!node || entry == last)
  602. break;
  603. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  604. }
  605. ret = 0;
  606. cluster->max_size = max_extent;
  607. atomic_inc(&block_group->count);
  608. list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
  609. cluster->block_group = block_group;
  610. out:
  611. spin_unlock(&cluster->lock);
  612. spin_unlock(&block_group->tree_lock);
  613. return ret;
  614. }
  615. /*
  616. * simple code to zero out a cluster
  617. */
  618. void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
  619. {
  620. spin_lock_init(&cluster->lock);
  621. spin_lock_init(&cluster->refill_lock);
  622. cluster->root.rb_node = NULL;
  623. cluster->max_size = 0;
  624. INIT_LIST_HEAD(&cluster->block_group_list);
  625. cluster->block_group = NULL;
  626. }