extent-tree.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586
  1. #include <linux/module.h>
  2. #include "ctree.h"
  3. #include "disk-io.h"
  4. #include "print-tree.h"
  5. #include "transaction.h"
  6. static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  7. *orig_root, u64 num_blocks, u64 search_start, u64
  8. search_end, struct btrfs_key *ins, int data);
  9. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  10. btrfs_root *extent_root);
  11. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  12. btrfs_root *extent_root);
  13. static void reada_extent_leaves(struct btrfs_root *root,
  14. struct btrfs_path *path, u64 limit)
  15. {
  16. struct btrfs_node *node;
  17. int i;
  18. int nritems;
  19. u64 item_objectid;
  20. u64 blocknr;
  21. int slot;
  22. int ret;
  23. if (!path->nodes[1])
  24. return;
  25. node = btrfs_buffer_node(path->nodes[1]);
  26. slot = path->slots[1] + 1;
  27. nritems = btrfs_header_nritems(&node->header);
  28. for (i = slot; i < nritems && i < slot + 8; i++) {
  29. item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
  30. if (item_objectid > limit)
  31. break;
  32. blocknr = btrfs_node_blockptr(node, i);
  33. ret = readahead_tree_block(root, blocknr);
  34. if (ret)
  35. break;
  36. }
  37. }
  38. static int cache_block_group(struct btrfs_root *root,
  39. struct btrfs_block_group_cache *block_group)
  40. {
  41. struct btrfs_path *path;
  42. int ret;
  43. struct btrfs_key key;
  44. struct btrfs_leaf *leaf;
  45. struct radix_tree_root *extent_radix;
  46. int slot;
  47. u64 i;
  48. u64 last = 0;
  49. u64 hole_size;
  50. u64 limit;
  51. int found = 0;
  52. root = root->fs_info->extent_root;
  53. extent_radix = &root->fs_info->extent_map_radix;
  54. if (block_group->cached)
  55. return 0;
  56. if (block_group->data)
  57. return 0;
  58. path = btrfs_alloc_path();
  59. if (!path)
  60. return -ENOMEM;
  61. printk("cache block group %Lu\n", block_group->key.objectid);
  62. key.objectid = block_group->key.objectid;
  63. key.flags = 0;
  64. key.offset = 0;
  65. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  66. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  67. if (ret < 0)
  68. return ret;
  69. if (ret && path->slots[0] > 0)
  70. path->slots[0]--;
  71. limit = block_group->key.objectid + block_group->key.offset;
  72. reada_extent_leaves(root, path, limit);
  73. while(1) {
  74. leaf = btrfs_buffer_leaf(path->nodes[0]);
  75. slot = path->slots[0];
  76. if (slot >= btrfs_header_nritems(&leaf->header)) {
  77. reada_extent_leaves(root, path, limit);
  78. ret = btrfs_next_leaf(root, path);
  79. if (ret == 0) {
  80. continue;
  81. } else {
  82. if (found) {
  83. hole_size = block_group->key.objectid +
  84. block_group->key.offset - last;
  85. } else {
  86. last = block_group->key.objectid;
  87. hole_size = block_group->key.offset;
  88. }
  89. for (i = 0; i < hole_size; i++) {
  90. set_radix_bit(extent_radix,
  91. last + i);
  92. }
  93. break;
  94. }
  95. }
  96. btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
  97. if (key.objectid >= block_group->key.objectid +
  98. block_group->key.offset) {
  99. if (found) {
  100. hole_size = block_group->key.objectid +
  101. block_group->key.offset - last;
  102. } else {
  103. last = block_group->key.objectid;
  104. hole_size = block_group->key.offset;
  105. }
  106. for (i = 0; i < hole_size; i++) {
  107. set_radix_bit(extent_radix, last + i);
  108. }
  109. break;
  110. }
  111. if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
  112. if (!found) {
  113. last = key.objectid + key.offset;
  114. found = 1;
  115. } else {
  116. hole_size = key.objectid - last;
  117. for (i = 0; i < hole_size; i++) {
  118. set_radix_bit(extent_radix, last + i);
  119. }
  120. last = key.objectid + key.offset;
  121. }
  122. }
  123. path->slots[0]++;
  124. }
  125. block_group->cached = 1;
  126. btrfs_free_path(path);
  127. return 0;
  128. }
  129. static struct btrfs_block_group_cache *lookup_block_group(struct
  130. btrfs_fs_info *info,
  131. u64 blocknr)
  132. {
  133. struct btrfs_block_group_cache *block_group;
  134. int ret;
  135. ret = radix_tree_gang_lookup(&info->block_group_radix,
  136. (void **)&block_group,
  137. blocknr, 1);
  138. if (ret) {
  139. if (block_group->key.objectid <= blocknr && blocknr <=
  140. block_group->key.objectid + block_group->key.offset)
  141. return block_group;
  142. }
  143. ret = radix_tree_gang_lookup(&info->block_group_data_radix,
  144. (void **)&block_group,
  145. blocknr, 1);
  146. if (ret) {
  147. if (block_group->key.objectid <= blocknr && blocknr <=
  148. block_group->key.objectid + block_group->key.offset)
  149. return block_group;
  150. }
  151. WARN_ON(1);
  152. printk("lookup_block_group fails for blocknr %Lu\n", blocknr);
  153. printk("last ret was %d\n", ret);
  154. if (ret) {
  155. printk("last block group was %Lu %Lu\n", block_group->key.objectid, block_group->key.offset);
  156. }
  157. return NULL;
  158. }
  159. static u64 leaf_range(struct btrfs_root *root)
  160. {
  161. u64 size = BTRFS_LEAF_DATA_SIZE(root);
  162. size = size / (sizeof(struct btrfs_extent_item) +
  163. sizeof(struct btrfs_item));
  164. return size;
  165. }
  166. static u64 find_search_start(struct btrfs_root *root,
  167. struct btrfs_block_group_cache **cache_ret,
  168. u64 search_start, int num)
  169. {
  170. unsigned long gang[8];
  171. int ret;
  172. struct btrfs_block_group_cache *cache = *cache_ret;
  173. u64 last = max(search_start, cache->key.objectid);
  174. if (cache->data)
  175. goto out;
  176. if (num > 1) {
  177. last = max(last, cache->last_prealloc);
  178. }
  179. again:
  180. cache_block_group(root, cache);
  181. while(1) {
  182. ret = find_first_radix_bit(&root->fs_info->extent_map_radix,
  183. gang, last, ARRAY_SIZE(gang));
  184. if (!ret)
  185. goto out;
  186. last = gang[ret-1] + 1;
  187. if (num > 1) {
  188. if (ret != ARRAY_SIZE(gang)) {
  189. goto new_group;
  190. }
  191. if (gang[ret-1] - gang[0] > leaf_range(root)) {
  192. continue;
  193. }
  194. }
  195. if (gang[0] >= cache->key.objectid + cache->key.offset) {
  196. goto new_group;
  197. }
  198. return gang[0];
  199. }
  200. out:
  201. return max(cache->last_alloc, search_start);
  202. new_group:
  203. cache = lookup_block_group(root->fs_info, last + cache->key.offset - 1);
  204. if (!cache) {
  205. return max((*cache_ret)->last_alloc, search_start);
  206. }
  207. cache = btrfs_find_block_group(root, cache,
  208. last + cache->key.offset - 1, 0, 0);
  209. *cache_ret = cache;
  210. goto again;
  211. }
  212. struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
  213. struct btrfs_block_group_cache
  214. *hint, u64 search_start,
  215. int data, int owner)
  216. {
  217. struct btrfs_block_group_cache *cache[8];
  218. struct btrfs_block_group_cache *found_group = NULL;
  219. struct btrfs_fs_info *info = root->fs_info;
  220. struct radix_tree_root *radix;
  221. struct radix_tree_root *swap_radix;
  222. u64 used;
  223. u64 last = 0;
  224. u64 hint_last;
  225. int i;
  226. int ret;
  227. int full_search = 0;
  228. int factor = 8;
  229. int data_swap = 0;
  230. if (!owner)
  231. factor = 5;
  232. if (data) {
  233. radix = &info->block_group_data_radix;
  234. swap_radix = &info->block_group_radix;
  235. } else {
  236. radix = &info->block_group_radix;
  237. swap_radix = &info->block_group_data_radix;
  238. }
  239. if (search_start) {
  240. struct btrfs_block_group_cache *shint;
  241. shint = lookup_block_group(info, search_start);
  242. if (shint->data == data) {
  243. used = btrfs_block_group_used(&shint->item);
  244. if (used + shint->pinned <
  245. (shint->key.offset * factor) / 10) {
  246. return shint;
  247. }
  248. }
  249. }
  250. if (hint && hint->data == data) {
  251. used = btrfs_block_group_used(&hint->item);
  252. if (used + hint->pinned < (hint->key.offset * factor) / 10) {
  253. return hint;
  254. }
  255. if (used >= (hint->key.offset * 8) / 10) {
  256. radix_tree_tag_clear(radix,
  257. hint->key.objectid +
  258. hint->key.offset - 1,
  259. BTRFS_BLOCK_GROUP_AVAIL);
  260. }
  261. last = hint->key.offset * 3;
  262. if (hint->key.objectid >= last)
  263. last = max(search_start + hint->key.offset - 1,
  264. hint->key.objectid - last);
  265. else
  266. last = hint->key.objectid + hint->key.offset;
  267. hint_last = last;
  268. } else {
  269. if (hint)
  270. hint_last = max(hint->key.objectid, search_start);
  271. else
  272. hint_last = search_start;
  273. last = hint_last;
  274. }
  275. while(1) {
  276. ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
  277. last, ARRAY_SIZE(cache),
  278. BTRFS_BLOCK_GROUP_AVAIL);
  279. if (!ret)
  280. break;
  281. for (i = 0; i < ret; i++) {
  282. last = cache[i]->key.objectid +
  283. cache[i]->key.offset;
  284. used = btrfs_block_group_used(&cache[i]->item);
  285. if (used + cache[i]->pinned <
  286. (cache[i]->key.offset * factor) / 10) {
  287. found_group = cache[i];
  288. goto found;
  289. }
  290. if (used >= (cache[i]->key.offset * 8) / 10) {
  291. radix_tree_tag_clear(radix,
  292. cache[i]->key.objectid +
  293. cache[i]->key.offset - 1,
  294. BTRFS_BLOCK_GROUP_AVAIL);
  295. }
  296. }
  297. cond_resched();
  298. }
  299. last = hint_last;
  300. again:
  301. while(1) {
  302. ret = radix_tree_gang_lookup(radix, (void **)cache,
  303. last, ARRAY_SIZE(cache));
  304. if (!ret)
  305. break;
  306. for (i = 0; i < ret; i++) {
  307. last = cache[i]->key.objectid +
  308. cache[i]->key.offset;
  309. used = btrfs_block_group_used(&cache[i]->item);
  310. if (used + cache[i]->pinned < cache[i]->key.offset) {
  311. found_group = cache[i];
  312. goto found;
  313. }
  314. if (used >= cache[i]->key.offset) {
  315. radix_tree_tag_clear(radix,
  316. cache[i]->key.objectid +
  317. cache[i]->key.offset - 1,
  318. BTRFS_BLOCK_GROUP_AVAIL);
  319. }
  320. }
  321. cond_resched();
  322. }
  323. if (!full_search) {
  324. last = search_start;
  325. full_search = 1;
  326. goto again;
  327. }
  328. if (!data_swap) {
  329. struct radix_tree_root *tmp = radix;
  330. data_swap = 1;
  331. radix = swap_radix;
  332. swap_radix = tmp;
  333. last = search_start;
  334. goto again;
  335. }
  336. if (!found_group) {
  337. printk("find block group bailing to zero data %d\n", data);
  338. ret = radix_tree_gang_lookup(radix,
  339. (void **)&found_group, 0, 1);
  340. if (ret == 0) {
  341. ret = radix_tree_gang_lookup(swap_radix,
  342. (void **)&found_group,
  343. 0, 1);
  344. }
  345. BUG_ON(ret != 1);
  346. }
  347. found:
  348. return found_group;
  349. }
  350. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  351. struct btrfs_root *root,
  352. u64 blocknr, u64 num_blocks)
  353. {
  354. struct btrfs_path *path;
  355. int ret;
  356. struct btrfs_key key;
  357. struct btrfs_leaf *l;
  358. struct btrfs_extent_item *item;
  359. struct btrfs_key ins;
  360. u32 refs;
  361. find_free_extent(trans, root->fs_info->extent_root, 0, 0, (u64)-1,
  362. &ins, 0);
  363. path = btrfs_alloc_path();
  364. BUG_ON(!path);
  365. btrfs_init_path(path);
  366. key.objectid = blocknr;
  367. key.flags = 0;
  368. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  369. key.offset = num_blocks;
  370. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  371. 0, 1);
  372. if (ret != 0) {
  373. printk("can't find block %Lu %Lu\n", blocknr, num_blocks);
  374. BUG();
  375. }
  376. BUG_ON(ret != 0);
  377. l = btrfs_buffer_leaf(path->nodes[0]);
  378. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  379. refs = btrfs_extent_refs(item);
  380. btrfs_set_extent_refs(item, refs + 1);
  381. btrfs_mark_buffer_dirty(path->nodes[0]);
  382. btrfs_release_path(root->fs_info->extent_root, path);
  383. btrfs_free_path(path);
  384. finish_current_insert(trans, root->fs_info->extent_root);
  385. del_pending_extents(trans, root->fs_info->extent_root);
  386. return 0;
  387. }
  388. static int lookup_extent_ref(struct btrfs_trans_handle *trans,
  389. struct btrfs_root *root, u64 blocknr,
  390. u64 num_blocks, u32 *refs)
  391. {
  392. struct btrfs_path *path;
  393. int ret;
  394. struct btrfs_key key;
  395. struct btrfs_leaf *l;
  396. struct btrfs_extent_item *item;
  397. path = btrfs_alloc_path();
  398. btrfs_init_path(path);
  399. key.objectid = blocknr;
  400. key.offset = num_blocks;
  401. key.flags = 0;
  402. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  403. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  404. 0, 0);
  405. if (ret != 0)
  406. BUG();
  407. l = btrfs_buffer_leaf(path->nodes[0]);
  408. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  409. *refs = btrfs_extent_refs(item);
  410. btrfs_release_path(root->fs_info->extent_root, path);
  411. btrfs_free_path(path);
  412. return 0;
  413. }
  414. int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
  415. struct btrfs_root *root)
  416. {
  417. return btrfs_inc_extent_ref(trans, root, bh_blocknr(root->node), 1);
  418. }
  419. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  420. struct buffer_head *buf)
  421. {
  422. u64 blocknr;
  423. struct btrfs_node *buf_node;
  424. struct btrfs_leaf *buf_leaf;
  425. struct btrfs_disk_key *key;
  426. struct btrfs_file_extent_item *fi;
  427. int i;
  428. int leaf;
  429. int ret;
  430. if (!root->ref_cows)
  431. return 0;
  432. buf_node = btrfs_buffer_node(buf);
  433. leaf = btrfs_is_leaf(buf_node);
  434. buf_leaf = btrfs_buffer_leaf(buf);
  435. for (i = 0; i < btrfs_header_nritems(&buf_node->header); i++) {
  436. if (leaf) {
  437. u64 disk_blocknr;
  438. key = &buf_leaf->items[i].key;
  439. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  440. continue;
  441. fi = btrfs_item_ptr(buf_leaf, i,
  442. struct btrfs_file_extent_item);
  443. if (btrfs_file_extent_type(fi) ==
  444. BTRFS_FILE_EXTENT_INLINE)
  445. continue;
  446. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  447. if (disk_blocknr == 0)
  448. continue;
  449. ret = btrfs_inc_extent_ref(trans, root, disk_blocknr,
  450. btrfs_file_extent_disk_num_blocks(fi));
  451. BUG_ON(ret);
  452. } else {
  453. blocknr = btrfs_node_blockptr(buf_node, i);
  454. ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
  455. BUG_ON(ret);
  456. }
  457. }
  458. return 0;
  459. }
  460. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  461. struct btrfs_root *root,
  462. struct btrfs_path *path,
  463. struct btrfs_block_group_cache *cache)
  464. {
  465. int ret;
  466. int pending_ret;
  467. struct btrfs_root *extent_root = root->fs_info->extent_root;
  468. struct btrfs_block_group_item *bi;
  469. struct btrfs_key ins;
  470. find_free_extent(trans, extent_root, 0, 0, (u64)-1, &ins, 0);
  471. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  472. BUG_ON(ret);
  473. bi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
  474. struct btrfs_block_group_item);
  475. memcpy(bi, &cache->item, sizeof(*bi));
  476. mark_buffer_dirty(path->nodes[0]);
  477. btrfs_release_path(extent_root, path);
  478. finish_current_insert(trans, extent_root);
  479. pending_ret = del_pending_extents(trans, extent_root);
  480. if (ret)
  481. return ret;
  482. if (pending_ret)
  483. return pending_ret;
  484. if (cache->data)
  485. cache->last_alloc = cache->first_free;
  486. return 0;
  487. }
  488. static int write_dirty_block_radix(struct btrfs_trans_handle *trans,
  489. struct btrfs_root *root,
  490. struct radix_tree_root *radix)
  491. {
  492. struct btrfs_block_group_cache *cache[8];
  493. int ret;
  494. int err = 0;
  495. int werr = 0;
  496. int i;
  497. struct btrfs_path *path;
  498. path = btrfs_alloc_path();
  499. if (!path)
  500. return -ENOMEM;
  501. while(1) {
  502. ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
  503. 0, ARRAY_SIZE(cache),
  504. BTRFS_BLOCK_GROUP_DIRTY);
  505. if (!ret)
  506. break;
  507. for (i = 0; i < ret; i++) {
  508. radix_tree_tag_clear(radix, cache[i]->key.objectid +
  509. cache[i]->key.offset - 1,
  510. BTRFS_BLOCK_GROUP_DIRTY);
  511. err = write_one_cache_group(trans, root,
  512. path, cache[i]);
  513. if (err)
  514. werr = err;
  515. }
  516. }
  517. btrfs_free_path(path);
  518. return werr;
  519. }
  520. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  521. struct btrfs_root *root)
  522. {
  523. int ret;
  524. int ret2;
  525. ret = write_dirty_block_radix(trans, root,
  526. &root->fs_info->block_group_radix);
  527. ret2 = write_dirty_block_radix(trans, root,
  528. &root->fs_info->block_group_data_radix);
  529. if (ret)
  530. return ret;
  531. if (ret2)
  532. return ret2;
  533. return 0;
  534. }
  535. static int update_block_group(struct btrfs_trans_handle *trans,
  536. struct btrfs_root *root,
  537. u64 blocknr, u64 num, int alloc, int mark_free,
  538. int data)
  539. {
  540. struct btrfs_block_group_cache *cache;
  541. struct btrfs_fs_info *info = root->fs_info;
  542. u64 total = num;
  543. u64 old_val;
  544. u64 block_in_group;
  545. u64 i;
  546. int ret;
  547. while(total) {
  548. cache = lookup_block_group(info, blocknr);
  549. if (!cache) {
  550. printk(KERN_CRIT "blocknr %Lu lookup failed\n",
  551. blocknr);
  552. return -1;
  553. }
  554. block_in_group = blocknr - cache->key.objectid;
  555. WARN_ON(block_in_group > cache->key.offset);
  556. radix_tree_tag_set(cache->radix, cache->key.objectid +
  557. cache->key.offset - 1,
  558. BTRFS_BLOCK_GROUP_DIRTY);
  559. old_val = btrfs_block_group_used(&cache->item);
  560. num = min(total, cache->key.offset - block_in_group);
  561. if (alloc) {
  562. if (blocknr > cache->last_alloc)
  563. cache->last_alloc = blocknr;
  564. if (!cache->data) {
  565. for (i = 0; i < num; i++) {
  566. clear_radix_bit(&info->extent_map_radix,
  567. blocknr + i);
  568. }
  569. }
  570. if (cache->data != data &&
  571. old_val < cache->key.offset / 2) {
  572. printk("changing block group %Lu from %d to %d\n", cache->key.objectid, cache->data, data);
  573. cache->data = data;
  574. radix_tree_delete(cache->radix,
  575. cache->key.objectid +
  576. cache->key.offset - 1);
  577. if (data) {
  578. cache->radix =
  579. &info->block_group_data_radix;
  580. cache->item.flags |=
  581. BTRFS_BLOCK_GROUP_DATA;
  582. } else {
  583. cache->radix = &info->block_group_radix;
  584. cache->item.flags &=
  585. ~BTRFS_BLOCK_GROUP_DATA;
  586. }
  587. ret = radix_tree_insert(cache->radix,
  588. cache->key.objectid +
  589. cache->key.offset - 1,
  590. (void *)cache);
  591. }
  592. old_val += num;
  593. } else {
  594. old_val -= num;
  595. if (blocknr < cache->first_free)
  596. cache->first_free = blocknr;
  597. if (!cache->data && mark_free) {
  598. for (i = 0; i < num; i++) {
  599. set_radix_bit(&info->extent_map_radix,
  600. blocknr + i);
  601. }
  602. }
  603. if (old_val < cache->key.offset / 2 &&
  604. old_val + num >= cache->key.offset / 2) {
  605. printk("group %Lu now available\n", cache->key.objectid);
  606. radix_tree_tag_set(cache->radix,
  607. cache->key.objectid +
  608. cache->key.offset - 1,
  609. BTRFS_BLOCK_GROUP_AVAIL);
  610. }
  611. }
  612. btrfs_set_block_group_used(&cache->item, old_val);
  613. total -= num;
  614. blocknr += num;
  615. }
  616. return 0;
  617. }
  618. static int try_remove_page(struct address_space *mapping, unsigned long index)
  619. {
  620. int ret;
  621. ret = invalidate_mapping_pages(mapping, index, index);
  622. return ret;
  623. }
  624. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
  625. btrfs_root *root)
  626. {
  627. unsigned long gang[8];
  628. struct inode *btree_inode = root->fs_info->btree_inode;
  629. struct btrfs_block_group_cache *block_group;
  630. u64 first = 0;
  631. int ret;
  632. int i;
  633. struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
  634. struct radix_tree_root *extent_radix = &root->fs_info->extent_map_radix;
  635. while(1) {
  636. ret = find_first_radix_bit(pinned_radix, gang, 0,
  637. ARRAY_SIZE(gang));
  638. if (!ret)
  639. break;
  640. if (!first)
  641. first = gang[0];
  642. for (i = 0; i < ret; i++) {
  643. clear_radix_bit(pinned_radix, gang[i]);
  644. block_group = lookup_block_group(root->fs_info,
  645. gang[i]);
  646. if (block_group) {
  647. WARN_ON(block_group->pinned == 0);
  648. block_group->pinned--;
  649. if (gang[i] < block_group->last_alloc)
  650. block_group->last_alloc = gang[i];
  651. if (gang[i] < block_group->last_prealloc)
  652. block_group->last_prealloc = gang[i];
  653. if (!block_group->data)
  654. set_radix_bit(extent_radix, gang[i]);
  655. }
  656. try_remove_page(btree_inode->i_mapping,
  657. gang[i] << (PAGE_CACHE_SHIFT -
  658. btree_inode->i_blkbits));
  659. }
  660. }
  661. return 0;
  662. }
  663. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  664. btrfs_root *extent_root)
  665. {
  666. struct btrfs_key ins;
  667. struct btrfs_extent_item extent_item;
  668. int i;
  669. int ret;
  670. u64 super_blocks_used;
  671. struct btrfs_fs_info *info = extent_root->fs_info;
  672. btrfs_set_extent_refs(&extent_item, 1);
  673. ins.offset = 1;
  674. ins.flags = 0;
  675. btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
  676. btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
  677. for (i = 0; i < extent_root->fs_info->extent_tree_insert_nr; i++) {
  678. ins.objectid = extent_root->fs_info->extent_tree_insert[i];
  679. super_blocks_used = btrfs_super_blocks_used(info->disk_super);
  680. btrfs_set_super_blocks_used(info->disk_super,
  681. super_blocks_used + 1);
  682. ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
  683. sizeof(extent_item));
  684. BUG_ON(ret);
  685. }
  686. extent_root->fs_info->extent_tree_insert_nr = 0;
  687. extent_root->fs_info->extent_tree_prealloc_nr = 0;
  688. return 0;
  689. }
  690. static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
  691. {
  692. int err;
  693. struct btrfs_header *header;
  694. struct buffer_head *bh;
  695. if (!pending) {
  696. bh = btrfs_find_tree_block(root, blocknr);
  697. if (bh) {
  698. if (buffer_uptodate(bh)) {
  699. u64 transid =
  700. root->fs_info->running_transaction->transid;
  701. header = btrfs_buffer_header(bh);
  702. if (btrfs_header_generation(header) ==
  703. transid) {
  704. btrfs_block_release(root, bh);
  705. return 0;
  706. }
  707. }
  708. btrfs_block_release(root, bh);
  709. }
  710. err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
  711. if (!err) {
  712. struct btrfs_block_group_cache *cache;
  713. cache = lookup_block_group(root->fs_info, blocknr);
  714. if (cache)
  715. cache->pinned++;
  716. }
  717. } else {
  718. err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
  719. }
  720. BUG_ON(err < 0);
  721. return 0;
  722. }
  723. /*
  724. * remove an extent from the root, returns 0 on success
  725. */
  726. static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  727. *root, u64 blocknr, u64 num_blocks, int pin,
  728. int mark_free)
  729. {
  730. struct btrfs_path *path;
  731. struct btrfs_key key;
  732. struct btrfs_fs_info *info = root->fs_info;
  733. struct btrfs_root *extent_root = info->extent_root;
  734. int ret;
  735. struct btrfs_extent_item *ei;
  736. struct btrfs_key ins;
  737. u32 refs;
  738. key.objectid = blocknr;
  739. key.flags = 0;
  740. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  741. key.offset = num_blocks;
  742. find_free_extent(trans, root, 0, 0, (u64)-1, &ins, 0);
  743. path = btrfs_alloc_path();
  744. BUG_ON(!path);
  745. btrfs_init_path(path);
  746. ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
  747. if (ret) {
  748. printk("failed to find %Lu\n", key.objectid);
  749. btrfs_print_tree(extent_root, extent_root->node);
  750. printk("failed to find %Lu\n", key.objectid);
  751. BUG();
  752. }
  753. ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
  754. struct btrfs_extent_item);
  755. BUG_ON(ei->refs == 0);
  756. refs = btrfs_extent_refs(ei) - 1;
  757. btrfs_set_extent_refs(ei, refs);
  758. btrfs_mark_buffer_dirty(path->nodes[0]);
  759. if (refs == 0) {
  760. u64 super_blocks_used;
  761. if (pin) {
  762. ret = pin_down_block(root, blocknr, 0);
  763. BUG_ON(ret);
  764. }
  765. super_blocks_used = btrfs_super_blocks_used(info->disk_super);
  766. btrfs_set_super_blocks_used(info->disk_super,
  767. super_blocks_used - num_blocks);
  768. ret = btrfs_del_item(trans, extent_root, path);
  769. if (ret)
  770. BUG();
  771. ret = update_block_group(trans, root, blocknr, num_blocks, 0,
  772. mark_free, 0);
  773. BUG_ON(ret);
  774. }
  775. btrfs_free_path(path);
  776. finish_current_insert(trans, extent_root);
  777. return ret;
  778. }
  779. /*
  780. * find all the blocks marked as pending in the radix tree and remove
  781. * them from the extent map
  782. */
  783. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  784. btrfs_root *extent_root)
  785. {
  786. int ret;
  787. int wret;
  788. int err = 0;
  789. unsigned long gang[4];
  790. int i;
  791. struct radix_tree_root *pending_radix;
  792. struct radix_tree_root *pinned_radix;
  793. struct btrfs_block_group_cache *cache;
  794. pending_radix = &extent_root->fs_info->pending_del_radix;
  795. pinned_radix = &extent_root->fs_info->pinned_radix;
  796. while(1) {
  797. ret = find_first_radix_bit(pending_radix, gang, 0,
  798. ARRAY_SIZE(gang));
  799. if (!ret)
  800. break;
  801. for (i = 0; i < ret; i++) {
  802. wret = set_radix_bit(pinned_radix, gang[i]);
  803. if (wret == 0) {
  804. cache = lookup_block_group(extent_root->fs_info,
  805. gang[i]);
  806. if (cache)
  807. cache->pinned++;
  808. }
  809. if (wret < 0) {
  810. printk(KERN_CRIT "set_radix_bit, err %d\n",
  811. wret);
  812. BUG_ON(wret < 0);
  813. }
  814. wret = clear_radix_bit(pending_radix, gang[i]);
  815. BUG_ON(wret);
  816. wret = __free_extent(trans, extent_root,
  817. gang[i], 1, 0, 0);
  818. if (wret)
  819. err = wret;
  820. }
  821. }
  822. return err;
  823. }
  824. /*
  825. * remove an extent from the root, returns 0 on success
  826. */
  827. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  828. *root, u64 blocknr, u64 num_blocks, int pin)
  829. {
  830. struct btrfs_root *extent_root = root->fs_info->extent_root;
  831. int pending_ret;
  832. int ret;
  833. if (root == extent_root) {
  834. pin_down_block(root, blocknr, 1);
  835. return 0;
  836. }
  837. ret = __free_extent(trans, root, blocknr, num_blocks, pin, pin == 0);
  838. pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
  839. return ret ? ret : pending_ret;
  840. }
  841. /*
  842. * walks the btree of allocated extents and find a hole of a given size.
  843. * The key ins is changed to record the hole:
  844. * ins->objectid == block start
  845. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  846. * ins->offset == number of blocks
  847. * Any available blocks before search_start are skipped.
  848. */
  849. static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  850. *orig_root, u64 num_blocks, u64 search_start, u64
  851. search_end, struct btrfs_key *ins, int data)
  852. {
  853. struct btrfs_path *path;
  854. struct btrfs_key key;
  855. int ret;
  856. u64 hole_size = 0;
  857. int slot = 0;
  858. u64 last_block = 0;
  859. u64 test_block;
  860. u64 orig_search_start = search_start;
  861. int start_found;
  862. struct btrfs_leaf *l;
  863. struct btrfs_root * root = orig_root->fs_info->extent_root;
  864. struct btrfs_fs_info *info = root->fs_info;
  865. int total_needed = num_blocks;
  866. int total_found = 0;
  867. int fill_prealloc = 0;
  868. int level;
  869. struct btrfs_block_group_cache *block_group;
  870. int full_scan = 0;
  871. u64 limit;
  872. path = btrfs_alloc_path();
  873. ins->flags = 0;
  874. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  875. level = btrfs_header_level(btrfs_buffer_header(root->node));
  876. if (num_blocks == 0) {
  877. fill_prealloc = 1;
  878. num_blocks = 1;
  879. total_needed = (min(level + 1, BTRFS_MAX_LEVEL) + 2) * 3;
  880. }
  881. if (search_end == (u64)-1)
  882. search_end = btrfs_super_total_blocks(info->disk_super);
  883. if (search_start) {
  884. block_group = lookup_block_group(info, search_start);
  885. block_group = btrfs_find_block_group(root, block_group,
  886. search_start, data, 1);
  887. } else {
  888. block_group = btrfs_find_block_group(root,
  889. trans->block_group, 0,
  890. data, 1);
  891. }
  892. check_failed:
  893. if (!block_group->data)
  894. search_start = find_search_start(root, &block_group,
  895. search_start, total_needed);
  896. else
  897. search_start = max(block_group->last_alloc, search_start);
  898. btrfs_init_path(path);
  899. ins->objectid = search_start;
  900. ins->offset = 0;
  901. start_found = 0;
  902. ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
  903. if (ret < 0)
  904. goto error;
  905. if (path->slots[0] > 0) {
  906. path->slots[0]--;
  907. }
  908. l = btrfs_buffer_leaf(path->nodes[0]);
  909. btrfs_disk_key_to_cpu(&key, &l->items[path->slots[0]].key);
  910. /*
  911. * a rare case, go back one key if we hit a block group item
  912. * instead of an extent item
  913. */
  914. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
  915. key.objectid + key.offset >= search_start) {
  916. ins->objectid = key.objectid;
  917. ins->offset = key.offset - 1;
  918. btrfs_release_path(root, path);
  919. ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
  920. if (ret < 0)
  921. goto error;
  922. if (path->slots[0] > 0) {
  923. path->slots[0]--;
  924. }
  925. }
  926. while (1) {
  927. l = btrfs_buffer_leaf(path->nodes[0]);
  928. slot = path->slots[0];
  929. if (slot >= btrfs_header_nritems(&l->header)) {
  930. if (fill_prealloc) {
  931. info->extent_tree_prealloc_nr = 0;
  932. total_found = 0;
  933. }
  934. if (start_found)
  935. limit = last_block +
  936. block_group->key.offset / 2;
  937. else
  938. limit = search_start +
  939. block_group->key.offset / 2;
  940. ret = btrfs_next_leaf(root, path);
  941. if (ret == 0)
  942. continue;
  943. if (ret < 0)
  944. goto error;
  945. if (!start_found) {
  946. ins->objectid = search_start;
  947. ins->offset = search_end - search_start;
  948. start_found = 1;
  949. goto check_pending;
  950. }
  951. ins->objectid = last_block > search_start ?
  952. last_block : search_start;
  953. ins->offset = search_end - ins->objectid;
  954. goto check_pending;
  955. }
  956. btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
  957. if (key.objectid >= search_start && key.objectid > last_block &&
  958. start_found) {
  959. if (last_block < search_start)
  960. last_block = search_start;
  961. hole_size = key.objectid - last_block;
  962. if (hole_size >= num_blocks) {
  963. ins->objectid = last_block;
  964. ins->offset = hole_size;
  965. goto check_pending;
  966. }
  967. }
  968. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
  969. goto next;
  970. start_found = 1;
  971. last_block = key.objectid + key.offset;
  972. if (last_block >= block_group->key.objectid +
  973. block_group->key.offset) {
  974. btrfs_release_path(root, path);
  975. search_start = block_group->key.objectid +
  976. block_group->key.offset * 2;
  977. goto new_group;
  978. }
  979. next:
  980. path->slots[0]++;
  981. cond_resched();
  982. }
  983. // FIXME -ENOSPC
  984. check_pending:
  985. /* we have to make sure we didn't find an extent that has already
  986. * been allocated by the map tree or the original allocation
  987. */
  988. btrfs_release_path(root, path);
  989. BUG_ON(ins->objectid < search_start);
  990. if (ins->objectid + num_blocks >= search_end) {
  991. if (full_scan)
  992. return -ENOSPC;
  993. search_start = orig_search_start;
  994. full_scan = 1;
  995. goto new_group;
  996. }
  997. for (test_block = ins->objectid;
  998. test_block < ins->objectid + num_blocks; test_block++) {
  999. if (test_radix_bit(&info->pinned_radix, test_block)) {
  1000. search_start = test_block + 1;
  1001. goto new_group;
  1002. }
  1003. }
  1004. if (!fill_prealloc && info->extent_tree_insert_nr) {
  1005. u64 last =
  1006. info->extent_tree_insert[info->extent_tree_insert_nr - 1];
  1007. if (ins->objectid + num_blocks >
  1008. info->extent_tree_insert[0] &&
  1009. ins->objectid <= last) {
  1010. search_start = last + 1;
  1011. WARN_ON(!full_scan);
  1012. goto new_group;
  1013. }
  1014. }
  1015. if (!fill_prealloc && info->extent_tree_prealloc_nr) {
  1016. u64 first =
  1017. info->extent_tree_prealloc[info->extent_tree_prealloc_nr - 1];
  1018. if (ins->objectid + num_blocks > first &&
  1019. ins->objectid <= info->extent_tree_prealloc[0]) {
  1020. search_start = info->extent_tree_prealloc[0] + 1;
  1021. WARN_ON(!full_scan);
  1022. goto new_group;
  1023. }
  1024. }
  1025. if (fill_prealloc) {
  1026. int nr;
  1027. test_block = ins->objectid;
  1028. if (test_block - info->extent_tree_prealloc[total_needed - 1] >=
  1029. leaf_range(root)) {
  1030. total_found = 0;
  1031. info->extent_tree_prealloc_nr = total_found;
  1032. }
  1033. while(test_block < ins->objectid + ins->offset &&
  1034. total_found < total_needed) {
  1035. nr = total_needed - total_found - 1;
  1036. BUG_ON(nr < 0);
  1037. info->extent_tree_prealloc[nr] = test_block;
  1038. total_found++;
  1039. test_block++;
  1040. }
  1041. if (total_found < total_needed) {
  1042. search_start = test_block;
  1043. goto new_group;
  1044. }
  1045. info->extent_tree_prealloc_nr = total_found;
  1046. }
  1047. if (!data) {
  1048. block_group = lookup_block_group(info, ins->objectid);
  1049. if (block_group) {
  1050. if (fill_prealloc)
  1051. block_group->last_prealloc =
  1052. info->extent_tree_prealloc[total_needed-1];
  1053. else
  1054. trans->block_group = block_group;
  1055. }
  1056. }
  1057. ins->offset = num_blocks;
  1058. btrfs_free_path(path);
  1059. return 0;
  1060. new_group:
  1061. if (search_start + num_blocks >= search_end) {
  1062. search_start = orig_search_start;
  1063. printk("doing full scan!\n");
  1064. full_scan = 1;
  1065. }
  1066. block_group = lookup_block_group(info, search_start);
  1067. if (!full_scan)
  1068. block_group = btrfs_find_block_group(root, block_group,
  1069. search_start, data, 0);
  1070. cond_resched();
  1071. goto check_failed;
  1072. error:
  1073. btrfs_release_path(root, path);
  1074. btrfs_free_path(path);
  1075. return ret;
  1076. }
  1077. /*
  1078. * finds a free extent and does all the dirty work required for allocation
  1079. * returns the key for the extent through ins, and a tree buffer for
  1080. * the first block of the extent through buf.
  1081. *
  1082. * returns 0 if everything worked, non-zero otherwise.
  1083. */
  1084. int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
  1085. struct btrfs_root *root, u64 owner,
  1086. u64 num_blocks, u64 search_start,
  1087. u64 search_end, struct btrfs_key *ins, int data)
  1088. {
  1089. int ret;
  1090. int pending_ret;
  1091. u64 super_blocks_used;
  1092. struct btrfs_fs_info *info = root->fs_info;
  1093. struct btrfs_root *extent_root = info->extent_root;
  1094. struct btrfs_extent_item extent_item;
  1095. struct btrfs_key prealloc_key;
  1096. btrfs_set_extent_refs(&extent_item, 1);
  1097. btrfs_set_extent_owner(&extent_item, owner);
  1098. if (root == extent_root) {
  1099. int nr;
  1100. BUG_ON(info->extent_tree_prealloc_nr == 0);
  1101. BUG_ON(num_blocks != 1);
  1102. ins->offset = 1;
  1103. info->extent_tree_prealloc_nr--;
  1104. nr = info->extent_tree_prealloc_nr;
  1105. ins->objectid = info->extent_tree_prealloc[nr];
  1106. info->extent_tree_insert[info->extent_tree_insert_nr++] =
  1107. ins->objectid;
  1108. ret = update_block_group(trans, root,
  1109. ins->objectid, ins->offset, 1, 0, 0);
  1110. BUG_ON(ret);
  1111. return 0;
  1112. }
  1113. /*
  1114. * if we're doing a data allocation, preallocate room in the
  1115. * extent tree first. This way the extent tree blocks end up
  1116. * in the correct block group.
  1117. */
  1118. if (data) {
  1119. ret = find_free_extent(trans, root, 0, 0,
  1120. search_end, &prealloc_key, 0);
  1121. if (ret) {
  1122. return ret;
  1123. }
  1124. if (prealloc_key.objectid + prealloc_key.offset >= search_end) {
  1125. int nr = info->extent_tree_prealloc_nr;
  1126. search_end = info->extent_tree_prealloc[nr - 1] - 1;
  1127. } else {
  1128. search_start = info->extent_tree_prealloc[0] + 1;
  1129. }
  1130. }
  1131. /* do the real allocation */
  1132. ret = find_free_extent(trans, root, num_blocks, search_start,
  1133. search_end, ins, data);
  1134. if (ret) {
  1135. return ret;
  1136. }
  1137. /*
  1138. * if we're doing a metadata allocation, preallocate space in the
  1139. * extent tree second. This way, we don't create a tiny hole
  1140. * in the allocation map between any unused preallocation blocks
  1141. * and the metadata block we're actually allocating. On disk,
  1142. * it'll go:
  1143. * [block we've allocated], [used prealloc 1], [ unused prealloc ]
  1144. * The unused prealloc will get reused the next time around.
  1145. */
  1146. if (!data) {
  1147. if (ins->objectid + ins->offset >= search_end)
  1148. search_end = ins->objectid - 1;
  1149. else
  1150. search_start = ins->objectid + ins->offset;
  1151. ret = find_free_extent(trans, root, 0, search_start,
  1152. search_end, &prealloc_key, 0);
  1153. if (ret) {
  1154. return ret;
  1155. }
  1156. }
  1157. super_blocks_used = btrfs_super_blocks_used(info->disk_super);
  1158. btrfs_set_super_blocks_used(info->disk_super, super_blocks_used +
  1159. num_blocks);
  1160. ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
  1161. sizeof(extent_item));
  1162. finish_current_insert(trans, extent_root);
  1163. pending_ret = del_pending_extents(trans, extent_root);
  1164. if (ret) {
  1165. return ret;
  1166. }
  1167. if (pending_ret) {
  1168. return pending_ret;
  1169. }
  1170. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
  1171. data);
  1172. return 0;
  1173. }
  1174. /*
  1175. * helper function to allocate a block for a given tree
  1176. * returns the tree buffer or NULL.
  1177. */
  1178. struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  1179. struct btrfs_root *root, u64 hint)
  1180. {
  1181. struct btrfs_key ins;
  1182. int ret;
  1183. struct buffer_head *buf;
  1184. ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
  1185. 1, hint, (unsigned long)-1, &ins, 0);
  1186. if (ret) {
  1187. BUG();
  1188. return NULL;
  1189. }
  1190. BUG_ON(ret);
  1191. buf = btrfs_find_create_tree_block(root, ins.objectid);
  1192. set_buffer_uptodate(buf);
  1193. set_buffer_checked(buf);
  1194. set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
  1195. return buf;
  1196. }
  1197. static int drop_leaf_ref(struct btrfs_trans_handle *trans,
  1198. struct btrfs_root *root, struct buffer_head *cur)
  1199. {
  1200. struct btrfs_disk_key *key;
  1201. struct btrfs_leaf *leaf;
  1202. struct btrfs_file_extent_item *fi;
  1203. int i;
  1204. int nritems;
  1205. int ret;
  1206. BUG_ON(!btrfs_is_leaf(btrfs_buffer_node(cur)));
  1207. leaf = btrfs_buffer_leaf(cur);
  1208. nritems = btrfs_header_nritems(&leaf->header);
  1209. for (i = 0; i < nritems; i++) {
  1210. u64 disk_blocknr;
  1211. key = &leaf->items[i].key;
  1212. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  1213. continue;
  1214. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  1215. if (btrfs_file_extent_type(fi) == BTRFS_FILE_EXTENT_INLINE)
  1216. continue;
  1217. /*
  1218. * FIXME make sure to insert a trans record that
  1219. * repeats the snapshot del on crash
  1220. */
  1221. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  1222. if (disk_blocknr == 0)
  1223. continue;
  1224. ret = btrfs_free_extent(trans, root, disk_blocknr,
  1225. btrfs_file_extent_disk_num_blocks(fi),
  1226. 0);
  1227. BUG_ON(ret);
  1228. }
  1229. return 0;
  1230. }
  1231. /*
  1232. * helper function for drop_snapshot, this walks down the tree dropping ref
  1233. * counts as it goes.
  1234. */
  1235. static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
  1236. *root, struct btrfs_path *path, int *level)
  1237. {
  1238. struct buffer_head *next;
  1239. struct buffer_head *cur;
  1240. u64 blocknr;
  1241. int ret;
  1242. u32 refs;
  1243. WARN_ON(*level < 0);
  1244. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1245. ret = lookup_extent_ref(trans, root, bh_blocknr(path->nodes[*level]),
  1246. 1, &refs);
  1247. BUG_ON(ret);
  1248. if (refs > 1)
  1249. goto out;
  1250. /*
  1251. * walk down to the last node level and free all the leaves
  1252. */
  1253. while(*level >= 0) {
  1254. WARN_ON(*level < 0);
  1255. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1256. cur = path->nodes[*level];
  1257. if (btrfs_header_level(btrfs_buffer_header(cur)) != *level)
  1258. WARN_ON(1);
  1259. if (path->slots[*level] >=
  1260. btrfs_header_nritems(btrfs_buffer_header(cur)))
  1261. break;
  1262. if (*level == 0) {
  1263. ret = drop_leaf_ref(trans, root, cur);
  1264. BUG_ON(ret);
  1265. break;
  1266. }
  1267. blocknr = btrfs_node_blockptr(btrfs_buffer_node(cur),
  1268. path->slots[*level]);
  1269. ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
  1270. BUG_ON(ret);
  1271. if (refs != 1) {
  1272. path->slots[*level]++;
  1273. ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
  1274. BUG_ON(ret);
  1275. continue;
  1276. }
  1277. next = read_tree_block(root, blocknr);
  1278. WARN_ON(*level <= 0);
  1279. if (path->nodes[*level-1])
  1280. btrfs_block_release(root, path->nodes[*level-1]);
  1281. path->nodes[*level-1] = next;
  1282. *level = btrfs_header_level(btrfs_buffer_header(next));
  1283. path->slots[*level] = 0;
  1284. }
  1285. out:
  1286. WARN_ON(*level < 0);
  1287. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1288. ret = btrfs_free_extent(trans, root,
  1289. bh_blocknr(path->nodes[*level]), 1, 1);
  1290. btrfs_block_release(root, path->nodes[*level]);
  1291. path->nodes[*level] = NULL;
  1292. *level += 1;
  1293. BUG_ON(ret);
  1294. return 0;
  1295. }
  1296. /*
  1297. * helper for dropping snapshots. This walks back up the tree in the path
  1298. * to find the first node higher up where we haven't yet gone through
  1299. * all the slots
  1300. */
  1301. static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
  1302. *root, struct btrfs_path *path, int *level)
  1303. {
  1304. int i;
  1305. int slot;
  1306. int ret;
  1307. for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
  1308. slot = path->slots[i];
  1309. if (slot < btrfs_header_nritems(
  1310. btrfs_buffer_header(path->nodes[i])) - 1) {
  1311. path->slots[i]++;
  1312. *level = i;
  1313. return 0;
  1314. } else {
  1315. ret = btrfs_free_extent(trans, root,
  1316. bh_blocknr(path->nodes[*level]),
  1317. 1, 1);
  1318. BUG_ON(ret);
  1319. btrfs_block_release(root, path->nodes[*level]);
  1320. path->nodes[*level] = NULL;
  1321. *level = i + 1;
  1322. }
  1323. }
  1324. return 1;
  1325. }
  1326. /*
  1327. * drop the reference count on the tree rooted at 'snap'. This traverses
  1328. * the tree freeing any blocks that have a ref count of zero after being
  1329. * decremented.
  1330. */
  1331. int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
  1332. *root, struct buffer_head *snap)
  1333. {
  1334. int ret = 0;
  1335. int wret;
  1336. int level;
  1337. struct btrfs_path *path;
  1338. int i;
  1339. int orig_level;
  1340. path = btrfs_alloc_path();
  1341. BUG_ON(!path);
  1342. btrfs_init_path(path);
  1343. level = btrfs_header_level(btrfs_buffer_header(snap));
  1344. orig_level = level;
  1345. path->nodes[level] = snap;
  1346. path->slots[level] = 0;
  1347. while(1) {
  1348. wret = walk_down_tree(trans, root, path, &level);
  1349. if (wret > 0)
  1350. break;
  1351. if (wret < 0)
  1352. ret = wret;
  1353. wret = walk_up_tree(trans, root, path, &level);
  1354. if (wret > 0)
  1355. break;
  1356. if (wret < 0)
  1357. ret = wret;
  1358. btrfs_btree_balance_dirty(root);
  1359. }
  1360. for (i = 0; i <= orig_level; i++) {
  1361. if (path->nodes[i]) {
  1362. btrfs_block_release(root, path->nodes[i]);
  1363. }
  1364. }
  1365. btrfs_free_path(path);
  1366. return ret;
  1367. }
  1368. static int free_block_group_radix(struct radix_tree_root *radix)
  1369. {
  1370. int ret;
  1371. struct btrfs_block_group_cache *cache[8];
  1372. int i;
  1373. while(1) {
  1374. ret = radix_tree_gang_lookup(radix, (void **)cache, 0,
  1375. ARRAY_SIZE(cache));
  1376. if (!ret)
  1377. break;
  1378. for (i = 0; i < ret; i++) {
  1379. radix_tree_delete(radix, cache[i]->key.objectid +
  1380. cache[i]->key.offset - 1);
  1381. kfree(cache[i]);
  1382. }
  1383. }
  1384. return 0;
  1385. }
  1386. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  1387. {
  1388. int ret;
  1389. int ret2;
  1390. unsigned long gang[16];
  1391. int i;
  1392. ret = free_block_group_radix(&info->block_group_radix);
  1393. ret2 = free_block_group_radix(&info->block_group_data_radix);
  1394. if (ret)
  1395. return ret;
  1396. if (ret2)
  1397. return ret2;
  1398. while(1) {
  1399. ret = find_first_radix_bit(&info->extent_map_radix,
  1400. gang, 0, ARRAY_SIZE(gang));
  1401. if (!ret)
  1402. break;
  1403. for (i = 0; i < ret; i++) {
  1404. clear_radix_bit(&info->extent_map_radix, gang[i]);
  1405. }
  1406. }
  1407. return 0;
  1408. }
  1409. int btrfs_read_block_groups(struct btrfs_root *root)
  1410. {
  1411. struct btrfs_path *path;
  1412. int ret;
  1413. int err = 0;
  1414. struct btrfs_block_group_item *bi;
  1415. struct btrfs_block_group_cache *cache;
  1416. struct btrfs_fs_info *info = root->fs_info;
  1417. struct radix_tree_root *radix;
  1418. struct btrfs_key key;
  1419. struct btrfs_key found_key;
  1420. struct btrfs_leaf *leaf;
  1421. u64 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE / root->blocksize;
  1422. u64 used;
  1423. root = info->extent_root;
  1424. key.objectid = 0;
  1425. key.offset = group_size_blocks;
  1426. key.flags = 0;
  1427. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  1428. path = btrfs_alloc_path();
  1429. if (!path)
  1430. return -ENOMEM;
  1431. while(1) {
  1432. ret = btrfs_search_slot(NULL, info->extent_root,
  1433. &key, path, 0, 0);
  1434. if (ret != 0) {
  1435. err = ret;
  1436. break;
  1437. }
  1438. leaf = btrfs_buffer_leaf(path->nodes[0]);
  1439. btrfs_disk_key_to_cpu(&found_key,
  1440. &leaf->items[path->slots[0]].key);
  1441. cache = kmalloc(sizeof(*cache), GFP_NOFS);
  1442. if (!cache) {
  1443. err = -1;
  1444. break;
  1445. }
  1446. bi = btrfs_item_ptr(leaf, path->slots[0],
  1447. struct btrfs_block_group_item);
  1448. if (bi->flags & BTRFS_BLOCK_GROUP_DATA) {
  1449. radix = &info->block_group_data_radix;
  1450. cache->data = 1;
  1451. } else {
  1452. radix = &info->block_group_radix;
  1453. cache->data = 0;
  1454. }
  1455. memcpy(&cache->item, bi, sizeof(*bi));
  1456. memcpy(&cache->key, &found_key, sizeof(found_key));
  1457. cache->last_alloc = cache->key.objectid;
  1458. cache->first_free = cache->key.objectid;
  1459. cache->last_prealloc = cache->key.objectid;
  1460. cache->pinned = 0;
  1461. cache->cached = 0;
  1462. cache->radix = radix;
  1463. key.objectid = found_key.objectid + found_key.offset;
  1464. btrfs_release_path(root, path);
  1465. ret = radix_tree_insert(radix, found_key.objectid +
  1466. found_key.offset - 1,
  1467. (void *)cache);
  1468. BUG_ON(ret);
  1469. used = btrfs_block_group_used(bi);
  1470. if (used < (key.offset * 8) / 10) {
  1471. radix_tree_tag_set(radix, found_key.objectid +
  1472. found_key.offset - 1,
  1473. BTRFS_BLOCK_GROUP_AVAIL);
  1474. }
  1475. if (key.objectid >=
  1476. btrfs_super_total_blocks(info->disk_super))
  1477. break;
  1478. }
  1479. btrfs_free_path(path);
  1480. return 0;
  1481. }