extent-tree.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "disk-io.h"
  21. #include "print-tree.h"
  22. #include "transaction.h"
  23. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  24. btrfs_root *extent_root);
  25. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  26. btrfs_root *extent_root);
  27. static int cache_block_group(struct btrfs_root *root,
  28. struct btrfs_block_group_cache *block_group)
  29. {
  30. struct btrfs_path *path;
  31. int ret;
  32. struct btrfs_key key;
  33. struct btrfs_leaf *leaf;
  34. struct radix_tree_root *extent_radix;
  35. int slot;
  36. u64 i;
  37. u64 last = 0;
  38. u64 hole_size;
  39. u64 limit;
  40. int found = 0;
  41. root = root->fs_info->extent_root;
  42. extent_radix = &root->fs_info->extent_map_radix;
  43. if (block_group->cached)
  44. return 0;
  45. if (block_group->data)
  46. return 0;
  47. path = btrfs_alloc_path();
  48. if (!path)
  49. return -ENOMEM;
  50. path->reada = 1;
  51. key.objectid = block_group->key.objectid;
  52. key.flags = 0;
  53. key.offset = 0;
  54. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  55. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  56. if (ret < 0)
  57. return ret;
  58. if (ret && path->slots[0] > 0)
  59. path->slots[0]--;
  60. limit = block_group->key.objectid + block_group->key.offset;
  61. while(1) {
  62. leaf = btrfs_buffer_leaf(path->nodes[0]);
  63. slot = path->slots[0];
  64. if (slot >= btrfs_header_nritems(&leaf->header)) {
  65. ret = btrfs_next_leaf(root, path);
  66. if (ret < 0)
  67. goto err;
  68. if (ret == 0) {
  69. continue;
  70. } else {
  71. if (found) {
  72. hole_size = block_group->key.objectid +
  73. block_group->key.offset - last;
  74. } else {
  75. last = block_group->key.objectid;
  76. hole_size = block_group->key.offset;
  77. }
  78. for (i = 0; i < hole_size; i++) {
  79. set_radix_bit(extent_radix,
  80. last + i);
  81. }
  82. break;
  83. }
  84. }
  85. btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
  86. if (key.objectid >= block_group->key.objectid +
  87. block_group->key.offset) {
  88. if (found) {
  89. hole_size = block_group->key.objectid +
  90. block_group->key.offset - last;
  91. } else {
  92. last = block_group->key.objectid;
  93. hole_size = block_group->key.offset;
  94. }
  95. for (i = 0; i < hole_size; i++) {
  96. set_radix_bit(extent_radix, last + i);
  97. }
  98. break;
  99. }
  100. if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
  101. if (!found) {
  102. last = key.objectid + key.offset;
  103. found = 1;
  104. } else {
  105. hole_size = key.objectid - last;
  106. for (i = 0; i < hole_size; i++) {
  107. set_radix_bit(extent_radix, last + i);
  108. }
  109. last = key.objectid + key.offset;
  110. }
  111. }
  112. path->slots[0]++;
  113. }
  114. block_group->cached = 1;
  115. err:
  116. btrfs_free_path(path);
  117. return 0;
  118. }
  119. struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
  120. btrfs_fs_info *info,
  121. u64 blocknr)
  122. {
  123. struct btrfs_block_group_cache *block_group;
  124. int ret;
  125. ret = radix_tree_gang_lookup(&info->block_group_radix,
  126. (void **)&block_group,
  127. blocknr, 1);
  128. if (ret) {
  129. if (block_group->key.objectid <= blocknr && blocknr <=
  130. block_group->key.objectid + block_group->key.offset)
  131. return block_group;
  132. }
  133. ret = radix_tree_gang_lookup(&info->block_group_data_radix,
  134. (void **)&block_group,
  135. blocknr, 1);
  136. if (ret) {
  137. if (block_group->key.objectid <= blocknr && blocknr <=
  138. block_group->key.objectid + block_group->key.offset)
  139. return block_group;
  140. }
  141. return NULL;
  142. }
  143. static u64 leaf_range(struct btrfs_root *root)
  144. {
  145. u64 size = BTRFS_LEAF_DATA_SIZE(root);
  146. do_div(size, sizeof(struct btrfs_extent_item) +
  147. sizeof(struct btrfs_item));
  148. return size;
  149. }
  150. static u64 find_search_start(struct btrfs_root *root,
  151. struct btrfs_block_group_cache **cache_ret,
  152. u64 search_start, int num)
  153. {
  154. unsigned long gang[8];
  155. int ret;
  156. struct btrfs_block_group_cache *cache = *cache_ret;
  157. u64 last = max(search_start, cache->key.objectid);
  158. if (cache->data)
  159. goto out;
  160. again:
  161. ret = cache_block_group(root, cache);
  162. if (ret)
  163. goto out;
  164. while(1) {
  165. ret = find_first_radix_bit(&root->fs_info->extent_map_radix,
  166. gang, last, ARRAY_SIZE(gang));
  167. if (!ret)
  168. goto out;
  169. last = gang[ret-1] + 1;
  170. if (num > 1) {
  171. if (ret != ARRAY_SIZE(gang)) {
  172. goto new_group;
  173. }
  174. if (gang[ret-1] - gang[0] > leaf_range(root)) {
  175. continue;
  176. }
  177. }
  178. if (gang[0] >= cache->key.objectid + cache->key.offset) {
  179. goto new_group;
  180. }
  181. return gang[0];
  182. }
  183. out:
  184. return max(cache->last_alloc, search_start);
  185. new_group:
  186. cache = btrfs_lookup_block_group(root->fs_info,
  187. last + cache->key.offset - 1);
  188. if (!cache) {
  189. return max((*cache_ret)->last_alloc, search_start);
  190. }
  191. cache = btrfs_find_block_group(root, cache,
  192. last + cache->key.offset - 1, 0, 0);
  193. *cache_ret = cache;
  194. goto again;
  195. }
  196. static u64 div_factor(u64 num, int factor)
  197. {
  198. num *= factor;
  199. do_div(num, 10);
  200. return num;
  201. }
  202. struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
  203. struct btrfs_block_group_cache
  204. *hint, u64 search_start,
  205. int data, int owner)
  206. {
  207. struct btrfs_block_group_cache *cache[8];
  208. struct btrfs_block_group_cache *found_group = NULL;
  209. struct btrfs_fs_info *info = root->fs_info;
  210. struct radix_tree_root *radix;
  211. struct radix_tree_root *swap_radix;
  212. u64 used;
  213. u64 last = 0;
  214. u64 hint_last;
  215. int i;
  216. int ret;
  217. int full_search = 0;
  218. int factor = 8;
  219. int data_swap = 0;
  220. if (!owner)
  221. factor = 5;
  222. if (data) {
  223. radix = &info->block_group_data_radix;
  224. swap_radix = &info->block_group_radix;
  225. } else {
  226. radix = &info->block_group_radix;
  227. swap_radix = &info->block_group_data_radix;
  228. }
  229. if (search_start) {
  230. struct btrfs_block_group_cache *shint;
  231. shint = btrfs_lookup_block_group(info, search_start);
  232. if (shint->data == data) {
  233. used = btrfs_block_group_used(&shint->item);
  234. if (used + shint->pinned <
  235. div_factor(shint->key.offset, factor)) {
  236. return shint;
  237. }
  238. }
  239. }
  240. if (hint && hint->data == data) {
  241. used = btrfs_block_group_used(&hint->item);
  242. if (used + hint->pinned <
  243. div_factor(hint->key.offset, factor)) {
  244. return hint;
  245. }
  246. if (used >= div_factor(hint->key.offset, 8)) {
  247. radix_tree_tag_clear(radix,
  248. hint->key.objectid +
  249. hint->key.offset - 1,
  250. BTRFS_BLOCK_GROUP_AVAIL);
  251. }
  252. last = hint->key.offset * 3;
  253. if (hint->key.objectid >= last)
  254. last = max(search_start + hint->key.offset - 1,
  255. hint->key.objectid - last);
  256. else
  257. last = hint->key.objectid + hint->key.offset;
  258. hint_last = last;
  259. } else {
  260. if (hint)
  261. hint_last = max(hint->key.objectid, search_start);
  262. else
  263. hint_last = search_start;
  264. last = hint_last;
  265. }
  266. while(1) {
  267. ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
  268. last, ARRAY_SIZE(cache),
  269. BTRFS_BLOCK_GROUP_AVAIL);
  270. if (!ret)
  271. break;
  272. for (i = 0; i < ret; i++) {
  273. last = cache[i]->key.objectid +
  274. cache[i]->key.offset;
  275. used = btrfs_block_group_used(&cache[i]->item);
  276. if (used + cache[i]->pinned <
  277. div_factor(cache[i]->key.offset, factor)) {
  278. found_group = cache[i];
  279. goto found;
  280. }
  281. if (used >= div_factor(cache[i]->key.offset, 8)) {
  282. radix_tree_tag_clear(radix,
  283. cache[i]->key.objectid +
  284. cache[i]->key.offset - 1,
  285. BTRFS_BLOCK_GROUP_AVAIL);
  286. }
  287. }
  288. cond_resched();
  289. }
  290. last = hint_last;
  291. again:
  292. while(1) {
  293. ret = radix_tree_gang_lookup(radix, (void **)cache,
  294. last, ARRAY_SIZE(cache));
  295. if (!ret)
  296. break;
  297. for (i = 0; i < ret; i++) {
  298. last = cache[i]->key.objectid +
  299. cache[i]->key.offset;
  300. used = btrfs_block_group_used(&cache[i]->item);
  301. if (used + cache[i]->pinned < cache[i]->key.offset) {
  302. found_group = cache[i];
  303. goto found;
  304. }
  305. if (used >= cache[i]->key.offset) {
  306. radix_tree_tag_clear(radix,
  307. cache[i]->key.objectid +
  308. cache[i]->key.offset - 1,
  309. BTRFS_BLOCK_GROUP_AVAIL);
  310. }
  311. }
  312. cond_resched();
  313. }
  314. if (!full_search) {
  315. last = search_start;
  316. full_search = 1;
  317. goto again;
  318. }
  319. if (!data_swap) {
  320. struct radix_tree_root *tmp = radix;
  321. data_swap = 1;
  322. radix = swap_radix;
  323. swap_radix = tmp;
  324. last = search_start;
  325. goto again;
  326. }
  327. if (!found_group) {
  328. ret = radix_tree_gang_lookup(radix,
  329. (void **)&found_group, 0, 1);
  330. if (ret == 0) {
  331. ret = radix_tree_gang_lookup(swap_radix,
  332. (void **)&found_group,
  333. 0, 1);
  334. }
  335. BUG_ON(ret != 1);
  336. }
  337. found:
  338. return found_group;
  339. }
  340. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  341. struct btrfs_root *root,
  342. u64 blocknr, u64 num_blocks)
  343. {
  344. struct btrfs_path *path;
  345. int ret;
  346. struct btrfs_key key;
  347. struct btrfs_leaf *l;
  348. struct btrfs_extent_item *item;
  349. u32 refs;
  350. path = btrfs_alloc_path();
  351. if (!path)
  352. return -ENOMEM;
  353. key.objectid = blocknr;
  354. key.flags = 0;
  355. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  356. key.offset = num_blocks;
  357. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  358. 0, 1);
  359. if (ret < 0)
  360. return ret;
  361. if (ret != 0) {
  362. BUG();
  363. }
  364. BUG_ON(ret != 0);
  365. l = btrfs_buffer_leaf(path->nodes[0]);
  366. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  367. refs = btrfs_extent_refs(item);
  368. btrfs_set_extent_refs(item, refs + 1);
  369. btrfs_mark_buffer_dirty(path->nodes[0]);
  370. btrfs_release_path(root->fs_info->extent_root, path);
  371. btrfs_free_path(path);
  372. finish_current_insert(trans, root->fs_info->extent_root);
  373. del_pending_extents(trans, root->fs_info->extent_root);
  374. return 0;
  375. }
  376. int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
  377. struct btrfs_root *root)
  378. {
  379. finish_current_insert(trans, root->fs_info->extent_root);
  380. del_pending_extents(trans, root->fs_info->extent_root);
  381. return 0;
  382. }
  383. static int lookup_extent_ref(struct btrfs_trans_handle *trans,
  384. struct btrfs_root *root, u64 blocknr,
  385. u64 num_blocks, u32 *refs)
  386. {
  387. struct btrfs_path *path;
  388. int ret;
  389. struct btrfs_key key;
  390. struct btrfs_leaf *l;
  391. struct btrfs_extent_item *item;
  392. path = btrfs_alloc_path();
  393. key.objectid = blocknr;
  394. key.offset = num_blocks;
  395. key.flags = 0;
  396. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  397. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  398. 0, 0);
  399. if (ret < 0)
  400. goto out;
  401. if (ret != 0)
  402. BUG();
  403. l = btrfs_buffer_leaf(path->nodes[0]);
  404. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  405. *refs = btrfs_extent_refs(item);
  406. out:
  407. btrfs_free_path(path);
  408. return 0;
  409. }
  410. int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
  411. struct btrfs_root *root)
  412. {
  413. return btrfs_inc_extent_ref(trans, root, bh_blocknr(root->node), 1);
  414. }
  415. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  416. struct buffer_head *buf)
  417. {
  418. u64 blocknr;
  419. struct btrfs_node *buf_node;
  420. struct btrfs_leaf *buf_leaf;
  421. struct btrfs_disk_key *key;
  422. struct btrfs_file_extent_item *fi;
  423. int i;
  424. int leaf;
  425. int ret;
  426. int faili;
  427. int err;
  428. if (!root->ref_cows)
  429. return 0;
  430. buf_node = btrfs_buffer_node(buf);
  431. leaf = btrfs_is_leaf(buf_node);
  432. buf_leaf = btrfs_buffer_leaf(buf);
  433. for (i = 0; i < btrfs_header_nritems(&buf_node->header); i++) {
  434. if (leaf) {
  435. u64 disk_blocknr;
  436. key = &buf_leaf->items[i].key;
  437. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  438. continue;
  439. fi = btrfs_item_ptr(buf_leaf, i,
  440. struct btrfs_file_extent_item);
  441. if (btrfs_file_extent_type(fi) ==
  442. BTRFS_FILE_EXTENT_INLINE)
  443. continue;
  444. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  445. if (disk_blocknr == 0)
  446. continue;
  447. ret = btrfs_inc_extent_ref(trans, root, disk_blocknr,
  448. btrfs_file_extent_disk_num_blocks(fi));
  449. if (ret) {
  450. faili = i;
  451. goto fail;
  452. }
  453. } else {
  454. blocknr = btrfs_node_blockptr(buf_node, i);
  455. ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
  456. if (ret) {
  457. faili = i;
  458. goto fail;
  459. }
  460. }
  461. }
  462. return 0;
  463. fail:
  464. WARN_ON(1);
  465. for (i =0; i < faili; i++) {
  466. if (leaf) {
  467. u64 disk_blocknr;
  468. key = &buf_leaf->items[i].key;
  469. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  470. continue;
  471. fi = btrfs_item_ptr(buf_leaf, i,
  472. struct btrfs_file_extent_item);
  473. if (btrfs_file_extent_type(fi) ==
  474. BTRFS_FILE_EXTENT_INLINE)
  475. continue;
  476. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  477. if (disk_blocknr == 0)
  478. continue;
  479. err = btrfs_free_extent(trans, root, disk_blocknr,
  480. btrfs_file_extent_disk_num_blocks(fi), 0);
  481. BUG_ON(err);
  482. } else {
  483. blocknr = btrfs_node_blockptr(buf_node, i);
  484. err = btrfs_free_extent(trans, root, blocknr, 1, 0);
  485. BUG_ON(err);
  486. }
  487. }
  488. return ret;
  489. }
  490. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  491. struct btrfs_root *root,
  492. struct btrfs_path *path,
  493. struct btrfs_block_group_cache *cache)
  494. {
  495. int ret;
  496. int pending_ret;
  497. struct btrfs_root *extent_root = root->fs_info->extent_root;
  498. struct btrfs_block_group_item *bi;
  499. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  500. if (ret < 0)
  501. goto fail;
  502. BUG_ON(ret);
  503. bi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
  504. struct btrfs_block_group_item);
  505. memcpy(bi, &cache->item, sizeof(*bi));
  506. btrfs_mark_buffer_dirty(path->nodes[0]);
  507. btrfs_release_path(extent_root, path);
  508. fail:
  509. finish_current_insert(trans, extent_root);
  510. pending_ret = del_pending_extents(trans, extent_root);
  511. if (ret)
  512. return ret;
  513. if (pending_ret)
  514. return pending_ret;
  515. if (cache->data)
  516. cache->last_alloc = cache->first_free;
  517. return 0;
  518. }
  519. static int write_dirty_block_radix(struct btrfs_trans_handle *trans,
  520. struct btrfs_root *root,
  521. struct radix_tree_root *radix)
  522. {
  523. struct btrfs_block_group_cache *cache[8];
  524. int ret;
  525. int err = 0;
  526. int werr = 0;
  527. int i;
  528. struct btrfs_path *path;
  529. unsigned long off = 0;
  530. path = btrfs_alloc_path();
  531. if (!path)
  532. return -ENOMEM;
  533. while(1) {
  534. ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
  535. off, ARRAY_SIZE(cache),
  536. BTRFS_BLOCK_GROUP_DIRTY);
  537. if (!ret)
  538. break;
  539. for (i = 0; i < ret; i++) {
  540. err = write_one_cache_group(trans, root,
  541. path, cache[i]);
  542. /*
  543. * if we fail to write the cache group, we want
  544. * to keep it marked dirty in hopes that a later
  545. * write will work
  546. */
  547. if (err) {
  548. werr = err;
  549. off = cache[i]->key.objectid +
  550. cache[i]->key.offset;
  551. continue;
  552. }
  553. radix_tree_tag_clear(radix, cache[i]->key.objectid +
  554. cache[i]->key.offset - 1,
  555. BTRFS_BLOCK_GROUP_DIRTY);
  556. }
  557. }
  558. btrfs_free_path(path);
  559. return werr;
  560. }
  561. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  562. struct btrfs_root *root)
  563. {
  564. int ret;
  565. int ret2;
  566. ret = write_dirty_block_radix(trans, root,
  567. &root->fs_info->block_group_radix);
  568. ret2 = write_dirty_block_radix(trans, root,
  569. &root->fs_info->block_group_data_radix);
  570. if (ret)
  571. return ret;
  572. if (ret2)
  573. return ret2;
  574. return 0;
  575. }
  576. static int update_block_group(struct btrfs_trans_handle *trans,
  577. struct btrfs_root *root,
  578. u64 blocknr, u64 num, int alloc, int mark_free,
  579. int data)
  580. {
  581. struct btrfs_block_group_cache *cache;
  582. struct btrfs_fs_info *info = root->fs_info;
  583. u64 total = num;
  584. u64 old_val;
  585. u64 block_in_group;
  586. u64 i;
  587. int ret;
  588. while(total) {
  589. cache = btrfs_lookup_block_group(info, blocknr);
  590. if (!cache) {
  591. return -1;
  592. }
  593. block_in_group = blocknr - cache->key.objectid;
  594. WARN_ON(block_in_group > cache->key.offset);
  595. radix_tree_tag_set(cache->radix, cache->key.objectid +
  596. cache->key.offset - 1,
  597. BTRFS_BLOCK_GROUP_DIRTY);
  598. old_val = btrfs_block_group_used(&cache->item);
  599. num = min(total, cache->key.offset - block_in_group);
  600. if (alloc) {
  601. if (blocknr > cache->last_alloc)
  602. cache->last_alloc = blocknr;
  603. if (!cache->data) {
  604. for (i = 0; i < num; i++) {
  605. clear_radix_bit(&info->extent_map_radix,
  606. blocknr + i);
  607. }
  608. }
  609. if (cache->data != data &&
  610. old_val < (cache->key.offset >> 1)) {
  611. cache->data = data;
  612. radix_tree_delete(cache->radix,
  613. cache->key.objectid +
  614. cache->key.offset - 1);
  615. if (data) {
  616. cache->radix =
  617. &info->block_group_data_radix;
  618. cache->item.flags |=
  619. BTRFS_BLOCK_GROUP_DATA;
  620. } else {
  621. cache->radix = &info->block_group_radix;
  622. cache->item.flags &=
  623. ~BTRFS_BLOCK_GROUP_DATA;
  624. }
  625. ret = radix_tree_insert(cache->radix,
  626. cache->key.objectid +
  627. cache->key.offset - 1,
  628. (void *)cache);
  629. }
  630. old_val += num;
  631. } else {
  632. old_val -= num;
  633. if (blocknr < cache->first_free)
  634. cache->first_free = blocknr;
  635. if (!cache->data && mark_free) {
  636. for (i = 0; i < num; i++) {
  637. set_radix_bit(&info->extent_map_radix,
  638. blocknr + i);
  639. }
  640. }
  641. if (old_val < (cache->key.offset >> 1) &&
  642. old_val + num >= (cache->key.offset >> 1)) {
  643. radix_tree_tag_set(cache->radix,
  644. cache->key.objectid +
  645. cache->key.offset - 1,
  646. BTRFS_BLOCK_GROUP_AVAIL);
  647. }
  648. }
  649. btrfs_set_block_group_used(&cache->item, old_val);
  650. total -= num;
  651. blocknr += num;
  652. }
  653. return 0;
  654. }
  655. int btrfs_copy_pinned(struct btrfs_root *root, struct radix_tree_root *copy)
  656. {
  657. unsigned long gang[8];
  658. u64 last = 0;
  659. struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
  660. int ret;
  661. int i;
  662. while(1) {
  663. ret = find_first_radix_bit(pinned_radix, gang, last,
  664. ARRAY_SIZE(gang));
  665. if (!ret)
  666. break;
  667. for (i = 0 ; i < ret; i++) {
  668. set_radix_bit(copy, gang[i]);
  669. last = gang[i] + 1;
  670. }
  671. }
  672. ret = find_first_radix_bit(&root->fs_info->extent_ins_radix, gang, 0,
  673. ARRAY_SIZE(gang));
  674. WARN_ON(ret);
  675. return 0;
  676. }
  677. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  678. struct btrfs_root *root,
  679. struct radix_tree_root *unpin_radix)
  680. {
  681. unsigned long gang[8];
  682. struct btrfs_block_group_cache *block_group;
  683. u64 first = 0;
  684. int ret;
  685. int i;
  686. struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
  687. struct radix_tree_root *extent_radix = &root->fs_info->extent_map_radix;
  688. while(1) {
  689. ret = find_first_radix_bit(unpin_radix, gang, 0,
  690. ARRAY_SIZE(gang));
  691. if (!ret)
  692. break;
  693. if (!first)
  694. first = gang[0];
  695. for (i = 0; i < ret; i++) {
  696. clear_radix_bit(pinned_radix, gang[i]);
  697. clear_radix_bit(unpin_radix, gang[i]);
  698. block_group = btrfs_lookup_block_group(root->fs_info,
  699. gang[i]);
  700. if (block_group) {
  701. WARN_ON(block_group->pinned == 0);
  702. block_group->pinned--;
  703. if (gang[i] < block_group->last_alloc)
  704. block_group->last_alloc = gang[i];
  705. if (!block_group->data)
  706. set_radix_bit(extent_radix, gang[i]);
  707. }
  708. }
  709. }
  710. return 0;
  711. }
  712. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  713. btrfs_root *extent_root)
  714. {
  715. struct btrfs_key ins;
  716. struct btrfs_extent_item extent_item;
  717. int i;
  718. int ret;
  719. int err;
  720. unsigned long gang[8];
  721. struct btrfs_fs_info *info = extent_root->fs_info;
  722. btrfs_set_extent_refs(&extent_item, 1);
  723. ins.offset = 1;
  724. ins.flags = 0;
  725. btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
  726. btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
  727. while(1) {
  728. ret = find_first_radix_bit(&info->extent_ins_radix, gang, 0,
  729. ARRAY_SIZE(gang));
  730. if (!ret)
  731. break;
  732. for (i = 0; i < ret; i++) {
  733. ins.objectid = gang[i];
  734. err = btrfs_insert_item(trans, extent_root, &ins,
  735. &extent_item,
  736. sizeof(extent_item));
  737. clear_radix_bit(&info->extent_ins_radix, gang[i]);
  738. WARN_ON(err);
  739. }
  740. }
  741. return 0;
  742. }
  743. static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
  744. {
  745. int err;
  746. struct btrfs_header *header;
  747. struct buffer_head *bh;
  748. if (!pending) {
  749. bh = btrfs_find_tree_block(root, blocknr);
  750. if (bh) {
  751. if (buffer_uptodate(bh)) {
  752. u64 transid =
  753. root->fs_info->running_transaction->transid;
  754. header = btrfs_buffer_header(bh);
  755. if (btrfs_header_generation(header) ==
  756. transid) {
  757. btrfs_block_release(root, bh);
  758. return 0;
  759. }
  760. }
  761. btrfs_block_release(root, bh);
  762. }
  763. err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
  764. if (!err) {
  765. struct btrfs_block_group_cache *cache;
  766. cache = btrfs_lookup_block_group(root->fs_info,
  767. blocknr);
  768. if (cache)
  769. cache->pinned++;
  770. }
  771. } else {
  772. err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
  773. }
  774. BUG_ON(err < 0);
  775. return 0;
  776. }
  777. /*
  778. * remove an extent from the root, returns 0 on success
  779. */
  780. static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  781. *root, u64 blocknr, u64 num_blocks, int pin,
  782. int mark_free)
  783. {
  784. struct btrfs_path *path;
  785. struct btrfs_key key;
  786. struct btrfs_fs_info *info = root->fs_info;
  787. struct btrfs_root *extent_root = info->extent_root;
  788. int ret;
  789. struct btrfs_extent_item *ei;
  790. u32 refs;
  791. key.objectid = blocknr;
  792. key.flags = 0;
  793. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  794. key.offset = num_blocks;
  795. path = btrfs_alloc_path();
  796. if (!path)
  797. return -ENOMEM;
  798. ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
  799. if (ret < 0)
  800. return ret;
  801. BUG_ON(ret);
  802. ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
  803. struct btrfs_extent_item);
  804. BUG_ON(ei->refs == 0);
  805. refs = btrfs_extent_refs(ei) - 1;
  806. btrfs_set_extent_refs(ei, refs);
  807. btrfs_mark_buffer_dirty(path->nodes[0]);
  808. if (refs == 0) {
  809. u64 super_blocks_used;
  810. if (pin) {
  811. ret = pin_down_block(root, blocknr, 0);
  812. BUG_ON(ret);
  813. }
  814. super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
  815. btrfs_set_super_blocks_used(&info->super_copy,
  816. super_blocks_used - num_blocks);
  817. ret = btrfs_del_item(trans, extent_root, path);
  818. if (ret) {
  819. return ret;
  820. }
  821. ret = update_block_group(trans, root, blocknr, num_blocks, 0,
  822. mark_free, 0);
  823. BUG_ON(ret);
  824. }
  825. btrfs_free_path(path);
  826. finish_current_insert(trans, extent_root);
  827. return ret;
  828. }
  829. /*
  830. * find all the blocks marked as pending in the radix tree and remove
  831. * them from the extent map
  832. */
  833. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  834. btrfs_root *extent_root)
  835. {
  836. int ret;
  837. int wret;
  838. int err = 0;
  839. unsigned long gang[4];
  840. int i;
  841. struct radix_tree_root *pending_radix;
  842. struct radix_tree_root *pinned_radix;
  843. struct btrfs_block_group_cache *cache;
  844. pending_radix = &extent_root->fs_info->pending_del_radix;
  845. pinned_radix = &extent_root->fs_info->pinned_radix;
  846. while(1) {
  847. ret = find_first_radix_bit(pending_radix, gang, 0,
  848. ARRAY_SIZE(gang));
  849. if (!ret)
  850. break;
  851. for (i = 0; i < ret; i++) {
  852. wret = set_radix_bit(pinned_radix, gang[i]);
  853. if (wret == 0) {
  854. cache =
  855. btrfs_lookup_block_group(extent_root->fs_info,
  856. gang[i]);
  857. if (cache)
  858. cache->pinned++;
  859. }
  860. if (wret < 0) {
  861. printk(KERN_CRIT "set_radix_bit, err %d\n",
  862. wret);
  863. BUG_ON(wret < 0);
  864. }
  865. wret = clear_radix_bit(pending_radix, gang[i]);
  866. BUG_ON(wret);
  867. wret = __free_extent(trans, extent_root,
  868. gang[i], 1, 0, 0);
  869. if (wret)
  870. err = wret;
  871. }
  872. }
  873. return err;
  874. }
  875. /*
  876. * remove an extent from the root, returns 0 on success
  877. */
  878. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  879. *root, u64 blocknr, u64 num_blocks, int pin)
  880. {
  881. struct btrfs_root *extent_root = root->fs_info->extent_root;
  882. int pending_ret;
  883. int ret;
  884. if (root == extent_root) {
  885. pin_down_block(root, blocknr, 1);
  886. return 0;
  887. }
  888. ret = __free_extent(trans, root, blocknr, num_blocks, pin, pin == 0);
  889. pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
  890. return ret ? ret : pending_ret;
  891. }
  892. /*
  893. * walks the btree of allocated extents and find a hole of a given size.
  894. * The key ins is changed to record the hole:
  895. * ins->objectid == block start
  896. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  897. * ins->offset == number of blocks
  898. * Any available blocks before search_start are skipped.
  899. */
  900. static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  901. *orig_root, u64 num_blocks, u64 empty_size,
  902. u64 search_start, u64 search_end, u64 hint_block,
  903. struct btrfs_key *ins, u64 exclude_start,
  904. u64 exclude_nr, int data)
  905. {
  906. struct btrfs_path *path;
  907. struct btrfs_key key;
  908. int ret;
  909. u64 hole_size = 0;
  910. int slot = 0;
  911. u64 last_block = 0;
  912. u64 test_block;
  913. u64 orig_search_start = search_start;
  914. int start_found;
  915. struct btrfs_leaf *l;
  916. struct btrfs_root * root = orig_root->fs_info->extent_root;
  917. struct btrfs_fs_info *info = root->fs_info;
  918. int total_needed = num_blocks;
  919. int level;
  920. struct btrfs_block_group_cache *block_group;
  921. int full_scan = 0;
  922. int wrapped = 0;
  923. u64 limit;
  924. WARN_ON(num_blocks < 1);
  925. ins->flags = 0;
  926. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  927. level = btrfs_header_level(btrfs_buffer_header(root->node));
  928. if (search_end == (u64)-1)
  929. search_end = btrfs_super_total_blocks(&info->super_copy);
  930. if (hint_block) {
  931. block_group = btrfs_lookup_block_group(info, hint_block);
  932. block_group = btrfs_find_block_group(root, block_group,
  933. hint_block, data, 1);
  934. } else {
  935. block_group = btrfs_find_block_group(root,
  936. trans->block_group, 0,
  937. data, 1);
  938. }
  939. total_needed += empty_size;
  940. path = btrfs_alloc_path();
  941. check_failed:
  942. if (!block_group->data)
  943. search_start = find_search_start(root, &block_group,
  944. search_start, total_needed);
  945. else if (!full_scan)
  946. search_start = max(block_group->last_alloc, search_start);
  947. btrfs_init_path(path);
  948. ins->objectid = search_start;
  949. ins->offset = 0;
  950. start_found = 0;
  951. path->reada = 1;
  952. ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
  953. if (ret < 0)
  954. goto error;
  955. if (path->slots[0] > 0) {
  956. path->slots[0]--;
  957. }
  958. l = btrfs_buffer_leaf(path->nodes[0]);
  959. btrfs_disk_key_to_cpu(&key, &l->items[path->slots[0]].key);
  960. /*
  961. * a rare case, go back one key if we hit a block group item
  962. * instead of an extent item
  963. */
  964. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
  965. key.objectid + key.offset >= search_start) {
  966. ins->objectid = key.objectid;
  967. ins->offset = key.offset - 1;
  968. btrfs_release_path(root, path);
  969. ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
  970. if (ret < 0)
  971. goto error;
  972. if (path->slots[0] > 0) {
  973. path->slots[0]--;
  974. }
  975. }
  976. while (1) {
  977. l = btrfs_buffer_leaf(path->nodes[0]);
  978. slot = path->slots[0];
  979. if (slot >= btrfs_header_nritems(&l->header)) {
  980. if (start_found)
  981. limit = last_block +
  982. (block_group->key.offset >> 1);
  983. else
  984. limit = search_start +
  985. (block_group->key.offset >> 1);
  986. ret = btrfs_next_leaf(root, path);
  987. if (ret == 0)
  988. continue;
  989. if (ret < 0)
  990. goto error;
  991. if (!start_found) {
  992. ins->objectid = search_start;
  993. ins->offset = search_end - search_start;
  994. start_found = 1;
  995. goto check_pending;
  996. }
  997. ins->objectid = last_block > search_start ?
  998. last_block : search_start;
  999. ins->offset = search_end - ins->objectid;
  1000. goto check_pending;
  1001. }
  1002. btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
  1003. if (key.objectid >= search_start && key.objectid > last_block &&
  1004. start_found) {
  1005. if (last_block < search_start)
  1006. last_block = search_start;
  1007. hole_size = key.objectid - last_block;
  1008. if (hole_size >= num_blocks) {
  1009. ins->objectid = last_block;
  1010. ins->offset = hole_size;
  1011. goto check_pending;
  1012. }
  1013. }
  1014. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
  1015. goto next;
  1016. start_found = 1;
  1017. last_block = key.objectid + key.offset;
  1018. if (!full_scan && last_block >= block_group->key.objectid +
  1019. block_group->key.offset) {
  1020. btrfs_release_path(root, path);
  1021. search_start = block_group->key.objectid +
  1022. block_group->key.offset * 2;
  1023. goto new_group;
  1024. }
  1025. next:
  1026. path->slots[0]++;
  1027. cond_resched();
  1028. }
  1029. check_pending:
  1030. /* we have to make sure we didn't find an extent that has already
  1031. * been allocated by the map tree or the original allocation
  1032. */
  1033. btrfs_release_path(root, path);
  1034. BUG_ON(ins->objectid < search_start);
  1035. if (ins->objectid + num_blocks >= search_end) {
  1036. if (full_scan) {
  1037. ret = -ENOSPC;
  1038. goto error;
  1039. }
  1040. search_start = orig_search_start;
  1041. if (wrapped) {
  1042. if (!full_scan)
  1043. total_needed -= empty_size;
  1044. full_scan = 1;
  1045. } else
  1046. wrapped = 1;
  1047. goto new_group;
  1048. }
  1049. for (test_block = ins->objectid;
  1050. test_block < ins->objectid + num_blocks; test_block++) {
  1051. if (test_radix_bit(&info->pinned_radix, test_block) ||
  1052. test_radix_bit(&info->extent_ins_radix, test_block)) {
  1053. search_start = test_block + 1;
  1054. goto new_group;
  1055. }
  1056. }
  1057. if (exclude_nr > 0 && (ins->objectid + num_blocks > exclude_start &&
  1058. ins->objectid < exclude_start + exclude_nr)) {
  1059. search_start = exclude_start + exclude_nr;
  1060. goto new_group;
  1061. }
  1062. if (!data) {
  1063. block_group = btrfs_lookup_block_group(info, ins->objectid);
  1064. if (block_group)
  1065. trans->block_group = block_group;
  1066. }
  1067. ins->offset = num_blocks;
  1068. btrfs_free_path(path);
  1069. return 0;
  1070. new_group:
  1071. if (search_start + num_blocks >= search_end) {
  1072. search_start = orig_search_start;
  1073. if (full_scan) {
  1074. ret = -ENOSPC;
  1075. goto error;
  1076. }
  1077. if (wrapped) {
  1078. if (!full_scan)
  1079. total_needed -= empty_size;
  1080. full_scan = 1;
  1081. } else
  1082. wrapped = 1;
  1083. }
  1084. block_group = btrfs_lookup_block_group(info, search_start);
  1085. cond_resched();
  1086. if (!full_scan)
  1087. block_group = btrfs_find_block_group(root, block_group,
  1088. search_start, data, 0);
  1089. goto check_failed;
  1090. error:
  1091. btrfs_release_path(root, path);
  1092. btrfs_free_path(path);
  1093. return ret;
  1094. }
  1095. /*
  1096. * finds a free extent and does all the dirty work required for allocation
  1097. * returns the key for the extent through ins, and a tree buffer for
  1098. * the first block of the extent through buf.
  1099. *
  1100. * returns 0 if everything worked, non-zero otherwise.
  1101. */
  1102. int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
  1103. struct btrfs_root *root, u64 owner,
  1104. u64 num_blocks, u64 empty_size, u64 hint_block,
  1105. u64 search_end, struct btrfs_key *ins, int data)
  1106. {
  1107. int ret;
  1108. int pending_ret;
  1109. u64 super_blocks_used;
  1110. u64 search_start = 0;
  1111. struct btrfs_fs_info *info = root->fs_info;
  1112. struct btrfs_root *extent_root = info->extent_root;
  1113. struct btrfs_extent_item extent_item;
  1114. btrfs_set_extent_refs(&extent_item, 1);
  1115. btrfs_set_extent_owner(&extent_item, owner);
  1116. WARN_ON(num_blocks < 1);
  1117. ret = find_free_extent(trans, root, num_blocks, empty_size,
  1118. search_start, search_end, hint_block, ins,
  1119. trans->alloc_exclude_start,
  1120. trans->alloc_exclude_nr, data);
  1121. BUG_ON(ret);
  1122. if (ret)
  1123. return ret;
  1124. super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
  1125. btrfs_set_super_blocks_used(&info->super_copy, super_blocks_used +
  1126. num_blocks);
  1127. if (root == extent_root) {
  1128. BUG_ON(num_blocks != 1);
  1129. set_radix_bit(&root->fs_info->extent_ins_radix, ins->objectid);
  1130. goto update_block;
  1131. }
  1132. WARN_ON(trans->alloc_exclude_nr);
  1133. trans->alloc_exclude_start = ins->objectid;
  1134. trans->alloc_exclude_nr = ins->offset;
  1135. ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
  1136. sizeof(extent_item));
  1137. trans->alloc_exclude_start = 0;
  1138. trans->alloc_exclude_nr = 0;
  1139. BUG_ON(ret);
  1140. finish_current_insert(trans, extent_root);
  1141. pending_ret = del_pending_extents(trans, extent_root);
  1142. if (ret) {
  1143. return ret;
  1144. }
  1145. if (pending_ret) {
  1146. return pending_ret;
  1147. }
  1148. update_block:
  1149. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
  1150. data);
  1151. BUG_ON(ret);
  1152. return 0;
  1153. }
  1154. /*
  1155. * helper function to allocate a block for a given tree
  1156. * returns the tree buffer or NULL.
  1157. */
  1158. struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  1159. struct btrfs_root *root, u64 hint,
  1160. u64 empty_size)
  1161. {
  1162. struct btrfs_key ins;
  1163. int ret;
  1164. struct buffer_head *buf;
  1165. ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
  1166. 1, empty_size, hint,
  1167. (unsigned long)-1, &ins, 0);
  1168. if (ret) {
  1169. BUG_ON(ret > 0);
  1170. return ERR_PTR(ret);
  1171. }
  1172. buf = btrfs_find_create_tree_block(root, ins.objectid);
  1173. if (!buf) {
  1174. btrfs_free_extent(trans, root, ins.objectid, 1, 0);
  1175. return ERR_PTR(-ENOMEM);
  1176. }
  1177. WARN_ON(buffer_dirty(buf));
  1178. set_buffer_uptodate(buf);
  1179. set_buffer_checked(buf);
  1180. set_buffer_defrag(buf);
  1181. set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
  1182. return buf;
  1183. }
  1184. static int drop_leaf_ref(struct btrfs_trans_handle *trans,
  1185. struct btrfs_root *root, struct buffer_head *cur)
  1186. {
  1187. struct btrfs_disk_key *key;
  1188. struct btrfs_leaf *leaf;
  1189. struct btrfs_file_extent_item *fi;
  1190. int i;
  1191. int nritems;
  1192. int ret;
  1193. BUG_ON(!btrfs_is_leaf(btrfs_buffer_node(cur)));
  1194. leaf = btrfs_buffer_leaf(cur);
  1195. nritems = btrfs_header_nritems(&leaf->header);
  1196. for (i = 0; i < nritems; i++) {
  1197. u64 disk_blocknr;
  1198. key = &leaf->items[i].key;
  1199. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  1200. continue;
  1201. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  1202. if (btrfs_file_extent_type(fi) == BTRFS_FILE_EXTENT_INLINE)
  1203. continue;
  1204. /*
  1205. * FIXME make sure to insert a trans record that
  1206. * repeats the snapshot del on crash
  1207. */
  1208. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  1209. if (disk_blocknr == 0)
  1210. continue;
  1211. ret = btrfs_free_extent(trans, root, disk_blocknr,
  1212. btrfs_file_extent_disk_num_blocks(fi),
  1213. 0);
  1214. BUG_ON(ret);
  1215. }
  1216. return 0;
  1217. }
  1218. static void reada_walk_down(struct btrfs_root *root,
  1219. struct btrfs_node *node)
  1220. {
  1221. int i;
  1222. u32 nritems;
  1223. u64 blocknr;
  1224. int ret;
  1225. u32 refs;
  1226. nritems = btrfs_header_nritems(&node->header);
  1227. for (i = 0; i < nritems; i++) {
  1228. blocknr = btrfs_node_blockptr(node, i);
  1229. ret = lookup_extent_ref(NULL, root, blocknr, 1, &refs);
  1230. BUG_ON(ret);
  1231. if (refs != 1)
  1232. continue;
  1233. mutex_unlock(&root->fs_info->fs_mutex);
  1234. ret = readahead_tree_block(root, blocknr);
  1235. cond_resched();
  1236. mutex_lock(&root->fs_info->fs_mutex);
  1237. if (ret)
  1238. break;
  1239. }
  1240. }
  1241. /*
  1242. * helper function for drop_snapshot, this walks down the tree dropping ref
  1243. * counts as it goes.
  1244. */
  1245. static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
  1246. *root, struct btrfs_path *path, int *level)
  1247. {
  1248. struct buffer_head *next;
  1249. struct buffer_head *cur;
  1250. u64 blocknr;
  1251. int ret;
  1252. u32 refs;
  1253. WARN_ON(*level < 0);
  1254. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1255. ret = lookup_extent_ref(trans, root, bh_blocknr(path->nodes[*level]),
  1256. 1, &refs);
  1257. BUG_ON(ret);
  1258. if (refs > 1)
  1259. goto out;
  1260. /*
  1261. * walk down to the last node level and free all the leaves
  1262. */
  1263. while(*level >= 0) {
  1264. WARN_ON(*level < 0);
  1265. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1266. cur = path->nodes[*level];
  1267. if (*level > 0 && path->slots[*level] == 0)
  1268. reada_walk_down(root, btrfs_buffer_node(cur));
  1269. if (btrfs_header_level(btrfs_buffer_header(cur)) != *level)
  1270. WARN_ON(1);
  1271. if (path->slots[*level] >=
  1272. btrfs_header_nritems(btrfs_buffer_header(cur)))
  1273. break;
  1274. if (*level == 0) {
  1275. ret = drop_leaf_ref(trans, root, cur);
  1276. BUG_ON(ret);
  1277. break;
  1278. }
  1279. blocknr = btrfs_node_blockptr(btrfs_buffer_node(cur),
  1280. path->slots[*level]);
  1281. ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
  1282. BUG_ON(ret);
  1283. if (refs != 1) {
  1284. path->slots[*level]++;
  1285. ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
  1286. BUG_ON(ret);
  1287. continue;
  1288. }
  1289. next = btrfs_find_tree_block(root, blocknr);
  1290. if (!next || !buffer_uptodate(next)) {
  1291. brelse(next);
  1292. mutex_unlock(&root->fs_info->fs_mutex);
  1293. next = read_tree_block(root, blocknr);
  1294. mutex_lock(&root->fs_info->fs_mutex);
  1295. /* we dropped the lock, check one more time */
  1296. ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
  1297. BUG_ON(ret);
  1298. if (refs != 1) {
  1299. path->slots[*level]++;
  1300. brelse(next);
  1301. ret = btrfs_free_extent(trans, root,
  1302. blocknr, 1, 1);
  1303. BUG_ON(ret);
  1304. continue;
  1305. }
  1306. }
  1307. WARN_ON(*level <= 0);
  1308. if (path->nodes[*level-1])
  1309. btrfs_block_release(root, path->nodes[*level-1]);
  1310. path->nodes[*level-1] = next;
  1311. *level = btrfs_header_level(btrfs_buffer_header(next));
  1312. path->slots[*level] = 0;
  1313. }
  1314. out:
  1315. WARN_ON(*level < 0);
  1316. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1317. ret = btrfs_free_extent(trans, root,
  1318. bh_blocknr(path->nodes[*level]), 1, 1);
  1319. btrfs_block_release(root, path->nodes[*level]);
  1320. path->nodes[*level] = NULL;
  1321. *level += 1;
  1322. BUG_ON(ret);
  1323. return 0;
  1324. }
  1325. /*
  1326. * helper for dropping snapshots. This walks back up the tree in the path
  1327. * to find the first node higher up where we haven't yet gone through
  1328. * all the slots
  1329. */
  1330. static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
  1331. *root, struct btrfs_path *path, int *level)
  1332. {
  1333. int i;
  1334. int slot;
  1335. int ret;
  1336. struct btrfs_root_item *root_item = &root->root_item;
  1337. for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
  1338. slot = path->slots[i];
  1339. if (slot < btrfs_header_nritems(
  1340. btrfs_buffer_header(path->nodes[i])) - 1) {
  1341. struct btrfs_node *node;
  1342. node = btrfs_buffer_node(path->nodes[i]);
  1343. path->slots[i]++;
  1344. *level = i;
  1345. WARN_ON(*level == 0);
  1346. memcpy(&root_item->drop_progress,
  1347. &node->ptrs[path->slots[i]].key,
  1348. sizeof(root_item->drop_progress));
  1349. root_item->drop_level = i;
  1350. return 0;
  1351. } else {
  1352. ret = btrfs_free_extent(trans, root,
  1353. bh_blocknr(path->nodes[*level]),
  1354. 1, 1);
  1355. BUG_ON(ret);
  1356. btrfs_block_release(root, path->nodes[*level]);
  1357. path->nodes[*level] = NULL;
  1358. *level = i + 1;
  1359. }
  1360. }
  1361. return 1;
  1362. }
  1363. /*
  1364. * drop the reference count on the tree rooted at 'snap'. This traverses
  1365. * the tree freeing any blocks that have a ref count of zero after being
  1366. * decremented.
  1367. */
  1368. int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
  1369. *root)
  1370. {
  1371. int ret = 0;
  1372. int wret;
  1373. int level;
  1374. struct btrfs_path *path;
  1375. int i;
  1376. int orig_level;
  1377. struct btrfs_root_item *root_item = &root->root_item;
  1378. path = btrfs_alloc_path();
  1379. BUG_ON(!path);
  1380. level = btrfs_header_level(btrfs_buffer_header(root->node));
  1381. orig_level = level;
  1382. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  1383. path->nodes[level] = root->node;
  1384. path->slots[level] = 0;
  1385. } else {
  1386. struct btrfs_key key;
  1387. struct btrfs_disk_key *found_key;
  1388. struct btrfs_node *node;
  1389. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  1390. level = root_item->drop_level;
  1391. path->lowest_level = level;
  1392. wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1393. if (wret < 0) {
  1394. ret = wret;
  1395. goto out;
  1396. }
  1397. node = btrfs_buffer_node(path->nodes[level]);
  1398. found_key = &node->ptrs[path->slots[level]].key;
  1399. WARN_ON(memcmp(found_key, &root_item->drop_progress,
  1400. sizeof(*found_key)));
  1401. }
  1402. while(1) {
  1403. wret = walk_down_tree(trans, root, path, &level);
  1404. if (wret > 0)
  1405. break;
  1406. if (wret < 0)
  1407. ret = wret;
  1408. wret = walk_up_tree(trans, root, path, &level);
  1409. if (wret > 0)
  1410. break;
  1411. if (wret < 0)
  1412. ret = wret;
  1413. ret = -EAGAIN;
  1414. get_bh(root->node);
  1415. break;
  1416. }
  1417. for (i = 0; i <= orig_level; i++) {
  1418. if (path->nodes[i]) {
  1419. btrfs_block_release(root, path->nodes[i]);
  1420. path->nodes[i] = 0;
  1421. }
  1422. }
  1423. out:
  1424. btrfs_free_path(path);
  1425. return ret;
  1426. }
  1427. static int free_block_group_radix(struct radix_tree_root *radix)
  1428. {
  1429. int ret;
  1430. struct btrfs_block_group_cache *cache[8];
  1431. int i;
  1432. while(1) {
  1433. ret = radix_tree_gang_lookup(radix, (void **)cache, 0,
  1434. ARRAY_SIZE(cache));
  1435. if (!ret)
  1436. break;
  1437. for (i = 0; i < ret; i++) {
  1438. radix_tree_delete(radix, cache[i]->key.objectid +
  1439. cache[i]->key.offset - 1);
  1440. kfree(cache[i]);
  1441. }
  1442. }
  1443. return 0;
  1444. }
  1445. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  1446. {
  1447. int ret;
  1448. int ret2;
  1449. unsigned long gang[16];
  1450. int i;
  1451. ret = free_block_group_radix(&info->block_group_radix);
  1452. ret2 = free_block_group_radix(&info->block_group_data_radix);
  1453. if (ret)
  1454. return ret;
  1455. if (ret2)
  1456. return ret2;
  1457. while(1) {
  1458. ret = find_first_radix_bit(&info->extent_map_radix,
  1459. gang, 0, ARRAY_SIZE(gang));
  1460. if (!ret)
  1461. break;
  1462. for (i = 0; i < ret; i++) {
  1463. clear_radix_bit(&info->extent_map_radix, gang[i]);
  1464. }
  1465. }
  1466. return 0;
  1467. }
  1468. int btrfs_read_block_groups(struct btrfs_root *root)
  1469. {
  1470. struct btrfs_path *path;
  1471. int ret;
  1472. int err = 0;
  1473. struct btrfs_block_group_item *bi;
  1474. struct btrfs_block_group_cache *cache;
  1475. struct btrfs_fs_info *info = root->fs_info;
  1476. struct radix_tree_root *radix;
  1477. struct btrfs_key key;
  1478. struct btrfs_key found_key;
  1479. struct btrfs_leaf *leaf;
  1480. u64 group_size_blocks;
  1481. u64 used;
  1482. group_size_blocks = BTRFS_BLOCK_GROUP_SIZE >>
  1483. root->fs_info->sb->s_blocksize_bits;
  1484. root = info->extent_root;
  1485. key.objectid = 0;
  1486. key.offset = group_size_blocks;
  1487. key.flags = 0;
  1488. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  1489. path = btrfs_alloc_path();
  1490. if (!path)
  1491. return -ENOMEM;
  1492. while(1) {
  1493. ret = btrfs_search_slot(NULL, info->extent_root,
  1494. &key, path, 0, 0);
  1495. if (ret != 0) {
  1496. err = ret;
  1497. break;
  1498. }
  1499. leaf = btrfs_buffer_leaf(path->nodes[0]);
  1500. btrfs_disk_key_to_cpu(&found_key,
  1501. &leaf->items[path->slots[0]].key);
  1502. cache = kmalloc(sizeof(*cache), GFP_NOFS);
  1503. if (!cache) {
  1504. err = -1;
  1505. break;
  1506. }
  1507. bi = btrfs_item_ptr(leaf, path->slots[0],
  1508. struct btrfs_block_group_item);
  1509. if (bi->flags & BTRFS_BLOCK_GROUP_DATA) {
  1510. radix = &info->block_group_data_radix;
  1511. cache->data = 1;
  1512. } else {
  1513. radix = &info->block_group_radix;
  1514. cache->data = 0;
  1515. }
  1516. memcpy(&cache->item, bi, sizeof(*bi));
  1517. memcpy(&cache->key, &found_key, sizeof(found_key));
  1518. cache->last_alloc = cache->key.objectid;
  1519. cache->first_free = cache->key.objectid;
  1520. cache->pinned = 0;
  1521. cache->cached = 0;
  1522. cache->radix = radix;
  1523. key.objectid = found_key.objectid + found_key.offset;
  1524. btrfs_release_path(root, path);
  1525. ret = radix_tree_insert(radix, found_key.objectid +
  1526. found_key.offset - 1,
  1527. (void *)cache);
  1528. BUG_ON(ret);
  1529. used = btrfs_block_group_used(bi);
  1530. if (used < div_factor(key.offset, 8)) {
  1531. radix_tree_tag_set(radix, found_key.objectid +
  1532. found_key.offset - 1,
  1533. BTRFS_BLOCK_GROUP_AVAIL);
  1534. }
  1535. if (key.objectid >=
  1536. btrfs_super_total_blocks(&info->super_copy))
  1537. break;
  1538. }
  1539. btrfs_free_path(path);
  1540. return 0;
  1541. }