extent-tree.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include "ctree.h"
  20. #include "disk-io.h"
  21. #include "print-tree.h"
  22. #include "transaction.h"
  23. static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  24. *orig_root, u64 num_blocks, u64 empty_size,
  25. u64 search_start,
  26. u64 search_end, u64 hint_block,
  27. struct btrfs_key *ins, u64 exclude_start,
  28. u64 exclude_nr, int data);
  29. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  30. btrfs_root *extent_root);
  31. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  32. btrfs_root *extent_root);
  33. static int cache_block_group(struct btrfs_root *root,
  34. struct btrfs_block_group_cache *block_group)
  35. {
  36. struct btrfs_path *path;
  37. int ret;
  38. struct btrfs_key key;
  39. struct btrfs_leaf *leaf;
  40. struct radix_tree_root *extent_radix;
  41. int slot;
  42. u64 i;
  43. u64 last = 0;
  44. u64 hole_size;
  45. u64 limit;
  46. int found = 0;
  47. root = root->fs_info->extent_root;
  48. extent_radix = &root->fs_info->extent_map_radix;
  49. if (block_group->cached)
  50. return 0;
  51. if (block_group->data)
  52. return 0;
  53. path = btrfs_alloc_path();
  54. if (!path)
  55. return -ENOMEM;
  56. path->reada = 1;
  57. key.objectid = block_group->key.objectid;
  58. key.flags = 0;
  59. key.offset = 0;
  60. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  61. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  62. if (ret < 0)
  63. return ret;
  64. if (ret && path->slots[0] > 0)
  65. path->slots[0]--;
  66. limit = block_group->key.objectid + block_group->key.offset;
  67. while(1) {
  68. leaf = btrfs_buffer_leaf(path->nodes[0]);
  69. slot = path->slots[0];
  70. if (slot >= btrfs_header_nritems(&leaf->header)) {
  71. ret = btrfs_next_leaf(root, path);
  72. if (ret < 0)
  73. goto err;
  74. if (ret == 0) {
  75. continue;
  76. } else {
  77. if (found) {
  78. hole_size = block_group->key.objectid +
  79. block_group->key.offset - last;
  80. } else {
  81. last = block_group->key.objectid;
  82. hole_size = block_group->key.offset;
  83. }
  84. for (i = 0; i < hole_size; i++) {
  85. set_radix_bit(extent_radix,
  86. last + i);
  87. }
  88. break;
  89. }
  90. }
  91. btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
  92. if (key.objectid >= block_group->key.objectid +
  93. block_group->key.offset) {
  94. if (found) {
  95. hole_size = block_group->key.objectid +
  96. block_group->key.offset - last;
  97. } else {
  98. last = block_group->key.objectid;
  99. hole_size = block_group->key.offset;
  100. }
  101. for (i = 0; i < hole_size; i++) {
  102. set_radix_bit(extent_radix, last + i);
  103. }
  104. break;
  105. }
  106. if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
  107. if (!found) {
  108. last = key.objectid + key.offset;
  109. found = 1;
  110. } else {
  111. hole_size = key.objectid - last;
  112. for (i = 0; i < hole_size; i++) {
  113. set_radix_bit(extent_radix, last + i);
  114. }
  115. last = key.objectid + key.offset;
  116. }
  117. }
  118. path->slots[0]++;
  119. }
  120. block_group->cached = 1;
  121. err:
  122. btrfs_free_path(path);
  123. return 0;
  124. }
  125. struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
  126. btrfs_fs_info *info,
  127. u64 blocknr)
  128. {
  129. struct btrfs_block_group_cache *block_group;
  130. int ret;
  131. ret = radix_tree_gang_lookup(&info->block_group_radix,
  132. (void **)&block_group,
  133. blocknr, 1);
  134. if (ret) {
  135. if (block_group->key.objectid <= blocknr && blocknr <=
  136. block_group->key.objectid + block_group->key.offset)
  137. return block_group;
  138. }
  139. ret = radix_tree_gang_lookup(&info->block_group_data_radix,
  140. (void **)&block_group,
  141. blocknr, 1);
  142. if (ret) {
  143. if (block_group->key.objectid <= blocknr && blocknr <=
  144. block_group->key.objectid + block_group->key.offset)
  145. return block_group;
  146. }
  147. return NULL;
  148. }
  149. static u64 leaf_range(struct btrfs_root *root)
  150. {
  151. u64 size = BTRFS_LEAF_DATA_SIZE(root);
  152. do_div(size, sizeof(struct btrfs_extent_item) +
  153. sizeof(struct btrfs_item));
  154. return size;
  155. }
  156. static u64 find_search_start(struct btrfs_root *root,
  157. struct btrfs_block_group_cache **cache_ret,
  158. u64 search_start, int num)
  159. {
  160. unsigned long gang[8];
  161. int ret;
  162. struct btrfs_block_group_cache *cache = *cache_ret;
  163. u64 last = max(search_start, cache->key.objectid);
  164. if (cache->data)
  165. goto out;
  166. if (num > 1) {
  167. last = max(last, cache->last_prealloc);
  168. }
  169. again:
  170. ret = cache_block_group(root, cache);
  171. if (ret)
  172. goto out;
  173. while(1) {
  174. ret = find_first_radix_bit(&root->fs_info->extent_map_radix,
  175. gang, last, ARRAY_SIZE(gang));
  176. if (!ret)
  177. goto out;
  178. last = gang[ret-1] + 1;
  179. if (num > 1) {
  180. if (ret != ARRAY_SIZE(gang)) {
  181. goto new_group;
  182. }
  183. if (gang[ret-1] - gang[0] > leaf_range(root)) {
  184. continue;
  185. }
  186. }
  187. if (gang[0] >= cache->key.objectid + cache->key.offset) {
  188. goto new_group;
  189. }
  190. return gang[0];
  191. }
  192. out:
  193. return max(cache->last_alloc, search_start);
  194. new_group:
  195. cache = btrfs_lookup_block_group(root->fs_info,
  196. last + cache->key.offset - 1);
  197. if (!cache) {
  198. return max((*cache_ret)->last_alloc, search_start);
  199. }
  200. cache = btrfs_find_block_group(root, cache,
  201. last + cache->key.offset - 1, 0, 0);
  202. *cache_ret = cache;
  203. goto again;
  204. }
  205. static u64 div_factor(u64 num, int factor)
  206. {
  207. num *= factor;
  208. do_div(num, 10);
  209. return num;
  210. }
  211. struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
  212. struct btrfs_block_group_cache
  213. *hint, u64 search_start,
  214. int data, int owner)
  215. {
  216. struct btrfs_block_group_cache *cache[8];
  217. struct btrfs_block_group_cache *found_group = NULL;
  218. struct btrfs_fs_info *info = root->fs_info;
  219. struct radix_tree_root *radix;
  220. struct radix_tree_root *swap_radix;
  221. u64 used;
  222. u64 last = 0;
  223. u64 hint_last;
  224. int i;
  225. int ret;
  226. int full_search = 0;
  227. int factor = 8;
  228. int data_swap = 0;
  229. if (!owner)
  230. factor = 5;
  231. if (data) {
  232. radix = &info->block_group_data_radix;
  233. swap_radix = &info->block_group_radix;
  234. } else {
  235. radix = &info->block_group_radix;
  236. swap_radix = &info->block_group_data_radix;
  237. }
  238. if (search_start) {
  239. struct btrfs_block_group_cache *shint;
  240. shint = btrfs_lookup_block_group(info, search_start);
  241. if (shint->data == data) {
  242. used = btrfs_block_group_used(&shint->item);
  243. if (used + shint->pinned <
  244. div_factor(shint->key.offset, factor)) {
  245. return shint;
  246. }
  247. }
  248. }
  249. if (hint && hint->data == data) {
  250. used = btrfs_block_group_used(&hint->item);
  251. if (used + hint->pinned <
  252. div_factor(hint->key.offset, factor)) {
  253. return hint;
  254. }
  255. if (used >= div_factor(hint->key.offset, 8)) {
  256. radix_tree_tag_clear(radix,
  257. hint->key.objectid +
  258. hint->key.offset - 1,
  259. BTRFS_BLOCK_GROUP_AVAIL);
  260. }
  261. last = hint->key.offset * 3;
  262. if (hint->key.objectid >= last)
  263. last = max(search_start + hint->key.offset - 1,
  264. hint->key.objectid - last);
  265. else
  266. last = hint->key.objectid + hint->key.offset;
  267. hint_last = last;
  268. } else {
  269. if (hint)
  270. hint_last = max(hint->key.objectid, search_start);
  271. else
  272. hint_last = search_start;
  273. last = hint_last;
  274. }
  275. while(1) {
  276. ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
  277. last, ARRAY_SIZE(cache),
  278. BTRFS_BLOCK_GROUP_AVAIL);
  279. if (!ret)
  280. break;
  281. for (i = 0; i < ret; i++) {
  282. last = cache[i]->key.objectid +
  283. cache[i]->key.offset;
  284. used = btrfs_block_group_used(&cache[i]->item);
  285. if (used + cache[i]->pinned <
  286. div_factor(cache[i]->key.offset, factor)) {
  287. found_group = cache[i];
  288. goto found;
  289. }
  290. if (used >= div_factor(cache[i]->key.offset, 8)) {
  291. radix_tree_tag_clear(radix,
  292. cache[i]->key.objectid +
  293. cache[i]->key.offset - 1,
  294. BTRFS_BLOCK_GROUP_AVAIL);
  295. }
  296. }
  297. cond_resched();
  298. }
  299. last = hint_last;
  300. again:
  301. while(1) {
  302. ret = radix_tree_gang_lookup(radix, (void **)cache,
  303. last, ARRAY_SIZE(cache));
  304. if (!ret)
  305. break;
  306. for (i = 0; i < ret; i++) {
  307. last = cache[i]->key.objectid +
  308. cache[i]->key.offset;
  309. used = btrfs_block_group_used(&cache[i]->item);
  310. if (used + cache[i]->pinned < cache[i]->key.offset) {
  311. found_group = cache[i];
  312. goto found;
  313. }
  314. if (used >= cache[i]->key.offset) {
  315. radix_tree_tag_clear(radix,
  316. cache[i]->key.objectid +
  317. cache[i]->key.offset - 1,
  318. BTRFS_BLOCK_GROUP_AVAIL);
  319. }
  320. }
  321. cond_resched();
  322. }
  323. if (!full_search) {
  324. last = search_start;
  325. full_search = 1;
  326. goto again;
  327. }
  328. if (!data_swap) {
  329. struct radix_tree_root *tmp = radix;
  330. data_swap = 1;
  331. radix = swap_radix;
  332. swap_radix = tmp;
  333. last = search_start;
  334. goto again;
  335. }
  336. if (!found_group) {
  337. ret = radix_tree_gang_lookup(radix,
  338. (void **)&found_group, 0, 1);
  339. if (ret == 0) {
  340. ret = radix_tree_gang_lookup(swap_radix,
  341. (void **)&found_group,
  342. 0, 1);
  343. }
  344. BUG_ON(ret != 1);
  345. }
  346. found:
  347. return found_group;
  348. }
  349. int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
  350. struct btrfs_root *root,
  351. u64 blocknr, u64 num_blocks)
  352. {
  353. struct btrfs_path *path;
  354. int ret;
  355. struct btrfs_key key;
  356. struct btrfs_leaf *l;
  357. struct btrfs_extent_item *item;
  358. struct btrfs_key ins;
  359. u32 refs;
  360. path = btrfs_alloc_path();
  361. if (!path)
  362. return -ENOMEM;
  363. ret = find_free_extent(trans, root->fs_info->extent_root, 0, 0, 0,
  364. (u64)-1, 0, &ins, 0, 0, 0);
  365. if (ret) {
  366. btrfs_free_path(path);
  367. return ret;
  368. }
  369. key.objectid = blocknr;
  370. key.flags = 0;
  371. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  372. key.offset = num_blocks;
  373. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  374. 0, 1);
  375. if (ret < 0)
  376. return ret;
  377. if (ret != 0) {
  378. BUG();
  379. }
  380. BUG_ON(ret != 0);
  381. l = btrfs_buffer_leaf(path->nodes[0]);
  382. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  383. refs = btrfs_extent_refs(item);
  384. btrfs_set_extent_refs(item, refs + 1);
  385. btrfs_mark_buffer_dirty(path->nodes[0]);
  386. btrfs_release_path(root->fs_info->extent_root, path);
  387. btrfs_free_path(path);
  388. finish_current_insert(trans, root->fs_info->extent_root);
  389. del_pending_extents(trans, root->fs_info->extent_root);
  390. return 0;
  391. }
  392. static int lookup_extent_ref(struct btrfs_trans_handle *trans,
  393. struct btrfs_root *root, u64 blocknr,
  394. u64 num_blocks, u32 *refs)
  395. {
  396. struct btrfs_path *path;
  397. int ret;
  398. struct btrfs_key key;
  399. struct btrfs_leaf *l;
  400. struct btrfs_extent_item *item;
  401. path = btrfs_alloc_path();
  402. key.objectid = blocknr;
  403. key.offset = num_blocks;
  404. key.flags = 0;
  405. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  406. ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
  407. 0, 0);
  408. if (ret < 0)
  409. goto out;
  410. if (ret != 0)
  411. BUG();
  412. l = btrfs_buffer_leaf(path->nodes[0]);
  413. item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
  414. *refs = btrfs_extent_refs(item);
  415. out:
  416. btrfs_free_path(path);
  417. return 0;
  418. }
  419. int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
  420. struct btrfs_root *root)
  421. {
  422. return btrfs_inc_extent_ref(trans, root, bh_blocknr(root->node), 1);
  423. }
  424. int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
  425. struct buffer_head *buf)
  426. {
  427. u64 blocknr;
  428. struct btrfs_node *buf_node;
  429. struct btrfs_leaf *buf_leaf;
  430. struct btrfs_disk_key *key;
  431. struct btrfs_file_extent_item *fi;
  432. int i;
  433. int leaf;
  434. int ret;
  435. int faili;
  436. int err;
  437. if (!root->ref_cows)
  438. return 0;
  439. buf_node = btrfs_buffer_node(buf);
  440. leaf = btrfs_is_leaf(buf_node);
  441. buf_leaf = btrfs_buffer_leaf(buf);
  442. for (i = 0; i < btrfs_header_nritems(&buf_node->header); i++) {
  443. if (leaf) {
  444. u64 disk_blocknr;
  445. key = &buf_leaf->items[i].key;
  446. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  447. continue;
  448. fi = btrfs_item_ptr(buf_leaf, i,
  449. struct btrfs_file_extent_item);
  450. if (btrfs_file_extent_type(fi) ==
  451. BTRFS_FILE_EXTENT_INLINE)
  452. continue;
  453. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  454. if (disk_blocknr == 0)
  455. continue;
  456. ret = btrfs_inc_extent_ref(trans, root, disk_blocknr,
  457. btrfs_file_extent_disk_num_blocks(fi));
  458. if (ret) {
  459. faili = i;
  460. goto fail;
  461. }
  462. } else {
  463. blocknr = btrfs_node_blockptr(buf_node, i);
  464. ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
  465. if (ret) {
  466. faili = i;
  467. goto fail;
  468. }
  469. }
  470. }
  471. return 0;
  472. fail:
  473. WARN_ON(1);
  474. for (i =0; i < faili; i++) {
  475. if (leaf) {
  476. u64 disk_blocknr;
  477. key = &buf_leaf->items[i].key;
  478. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  479. continue;
  480. fi = btrfs_item_ptr(buf_leaf, i,
  481. struct btrfs_file_extent_item);
  482. if (btrfs_file_extent_type(fi) ==
  483. BTRFS_FILE_EXTENT_INLINE)
  484. continue;
  485. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  486. if (disk_blocknr == 0)
  487. continue;
  488. err = btrfs_free_extent(trans, root, disk_blocknr,
  489. btrfs_file_extent_disk_num_blocks(fi), 0);
  490. BUG_ON(err);
  491. } else {
  492. blocknr = btrfs_node_blockptr(buf_node, i);
  493. err = btrfs_free_extent(trans, root, blocknr, 1, 0);
  494. BUG_ON(err);
  495. }
  496. }
  497. return ret;
  498. }
  499. static int write_one_cache_group(struct btrfs_trans_handle *trans,
  500. struct btrfs_root *root,
  501. struct btrfs_path *path,
  502. struct btrfs_block_group_cache *cache)
  503. {
  504. int ret;
  505. int pending_ret;
  506. struct btrfs_root *extent_root = root->fs_info->extent_root;
  507. struct btrfs_block_group_item *bi;
  508. struct btrfs_key ins;
  509. ret = find_free_extent(trans, extent_root, 0, 0, 0, (u64)-1, 0, &ins,
  510. 0, 0, 0);
  511. /* FIXME, set bit to recalc cache groups on next mount */
  512. if (ret)
  513. return ret;
  514. ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
  515. if (ret < 0)
  516. goto fail;
  517. BUG_ON(ret);
  518. bi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
  519. struct btrfs_block_group_item);
  520. memcpy(bi, &cache->item, sizeof(*bi));
  521. btrfs_mark_buffer_dirty(path->nodes[0]);
  522. btrfs_release_path(extent_root, path);
  523. fail:
  524. finish_current_insert(trans, extent_root);
  525. pending_ret = del_pending_extents(trans, extent_root);
  526. if (ret)
  527. return ret;
  528. if (pending_ret)
  529. return pending_ret;
  530. if (cache->data)
  531. cache->last_alloc = cache->first_free;
  532. return 0;
  533. }
  534. static int write_dirty_block_radix(struct btrfs_trans_handle *trans,
  535. struct btrfs_root *root,
  536. struct radix_tree_root *radix)
  537. {
  538. struct btrfs_block_group_cache *cache[8];
  539. int ret;
  540. int err = 0;
  541. int werr = 0;
  542. int i;
  543. struct btrfs_path *path;
  544. unsigned long off = 0;
  545. path = btrfs_alloc_path();
  546. if (!path)
  547. return -ENOMEM;
  548. while(1) {
  549. ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
  550. off, ARRAY_SIZE(cache),
  551. BTRFS_BLOCK_GROUP_DIRTY);
  552. if (!ret)
  553. break;
  554. for (i = 0; i < ret; i++) {
  555. err = write_one_cache_group(trans, root,
  556. path, cache[i]);
  557. /*
  558. * if we fail to write the cache group, we want
  559. * to keep it marked dirty in hopes that a later
  560. * write will work
  561. */
  562. if (err) {
  563. werr = err;
  564. off = cache[i]->key.objectid +
  565. cache[i]->key.offset;
  566. continue;
  567. }
  568. radix_tree_tag_clear(radix, cache[i]->key.objectid +
  569. cache[i]->key.offset - 1,
  570. BTRFS_BLOCK_GROUP_DIRTY);
  571. }
  572. }
  573. btrfs_free_path(path);
  574. return werr;
  575. }
  576. int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
  577. struct btrfs_root *root)
  578. {
  579. int ret;
  580. int ret2;
  581. ret = write_dirty_block_radix(trans, root,
  582. &root->fs_info->block_group_radix);
  583. ret2 = write_dirty_block_radix(trans, root,
  584. &root->fs_info->block_group_data_radix);
  585. if (ret)
  586. return ret;
  587. if (ret2)
  588. return ret2;
  589. return 0;
  590. }
  591. static int update_block_group(struct btrfs_trans_handle *trans,
  592. struct btrfs_root *root,
  593. u64 blocknr, u64 num, int alloc, int mark_free,
  594. int data)
  595. {
  596. struct btrfs_block_group_cache *cache;
  597. struct btrfs_fs_info *info = root->fs_info;
  598. u64 total = num;
  599. u64 old_val;
  600. u64 block_in_group;
  601. u64 i;
  602. int ret;
  603. while(total) {
  604. cache = btrfs_lookup_block_group(info, blocknr);
  605. if (!cache) {
  606. return -1;
  607. }
  608. block_in_group = blocknr - cache->key.objectid;
  609. WARN_ON(block_in_group > cache->key.offset);
  610. radix_tree_tag_set(cache->radix, cache->key.objectid +
  611. cache->key.offset - 1,
  612. BTRFS_BLOCK_GROUP_DIRTY);
  613. old_val = btrfs_block_group_used(&cache->item);
  614. num = min(total, cache->key.offset - block_in_group);
  615. if (alloc) {
  616. if (blocknr > cache->last_alloc)
  617. cache->last_alloc = blocknr;
  618. if (!cache->data) {
  619. for (i = 0; i < num; i++) {
  620. clear_radix_bit(&info->extent_map_radix,
  621. blocknr + i);
  622. }
  623. }
  624. if (cache->data != data &&
  625. old_val < (cache->key.offset >> 1)) {
  626. cache->data = data;
  627. radix_tree_delete(cache->radix,
  628. cache->key.objectid +
  629. cache->key.offset - 1);
  630. if (data) {
  631. cache->radix =
  632. &info->block_group_data_radix;
  633. cache->item.flags |=
  634. BTRFS_BLOCK_GROUP_DATA;
  635. } else {
  636. cache->radix = &info->block_group_radix;
  637. cache->item.flags &=
  638. ~BTRFS_BLOCK_GROUP_DATA;
  639. }
  640. ret = radix_tree_insert(cache->radix,
  641. cache->key.objectid +
  642. cache->key.offset - 1,
  643. (void *)cache);
  644. }
  645. old_val += num;
  646. } else {
  647. old_val -= num;
  648. if (blocknr < cache->first_free)
  649. cache->first_free = blocknr;
  650. if (!cache->data && mark_free) {
  651. for (i = 0; i < num; i++) {
  652. set_radix_bit(&info->extent_map_radix,
  653. blocknr + i);
  654. }
  655. }
  656. if (old_val < (cache->key.offset >> 1) &&
  657. old_val + num >= (cache->key.offset >> 1)) {
  658. radix_tree_tag_set(cache->radix,
  659. cache->key.objectid +
  660. cache->key.offset - 1,
  661. BTRFS_BLOCK_GROUP_AVAIL);
  662. }
  663. }
  664. btrfs_set_block_group_used(&cache->item, old_val);
  665. total -= num;
  666. blocknr += num;
  667. }
  668. return 0;
  669. }
  670. static int try_remove_page(struct address_space *mapping, unsigned long index)
  671. {
  672. int ret;
  673. return 0;
  674. ret = invalidate_mapping_pages(mapping, index, index);
  675. return ret;
  676. }
  677. int btrfs_copy_pinned(struct btrfs_root *root, struct radix_tree_root *copy)
  678. {
  679. unsigned long gang[8];
  680. u64 last = 0;
  681. struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
  682. int ret;
  683. int i;
  684. while(1) {
  685. ret = find_first_radix_bit(pinned_radix, gang, last,
  686. ARRAY_SIZE(gang));
  687. if (!ret)
  688. break;
  689. for (i = 0 ; i < ret; i++) {
  690. set_radix_bit(copy, gang[i]);
  691. last = gang[i] + 1;
  692. }
  693. }
  694. return 0;
  695. }
  696. int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
  697. struct btrfs_root *root,
  698. struct radix_tree_root *unpin_radix)
  699. {
  700. unsigned long gang[8];
  701. struct inode *btree_inode = root->fs_info->btree_inode;
  702. struct btrfs_block_group_cache *block_group;
  703. u64 first = 0;
  704. int ret;
  705. int i;
  706. struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
  707. struct radix_tree_root *extent_radix = &root->fs_info->extent_map_radix;
  708. while(1) {
  709. ret = find_first_radix_bit(unpin_radix, gang, 0,
  710. ARRAY_SIZE(gang));
  711. if (!ret)
  712. break;
  713. if (!first)
  714. first = gang[0];
  715. for (i = 0; i < ret; i++) {
  716. clear_radix_bit(pinned_radix, gang[i]);
  717. clear_radix_bit(unpin_radix, gang[i]);
  718. block_group = btrfs_lookup_block_group(root->fs_info,
  719. gang[i]);
  720. if (block_group) {
  721. WARN_ON(block_group->pinned == 0);
  722. block_group->pinned--;
  723. if (gang[i] < block_group->last_alloc)
  724. block_group->last_alloc = gang[i];
  725. if (gang[i] < block_group->last_prealloc)
  726. block_group->last_prealloc = gang[i];
  727. if (!block_group->data)
  728. set_radix_bit(extent_radix, gang[i]);
  729. }
  730. try_remove_page(btree_inode->i_mapping,
  731. gang[i] << (PAGE_CACHE_SHIFT -
  732. btree_inode->i_blkbits));
  733. }
  734. }
  735. return 0;
  736. }
  737. static int finish_current_insert(struct btrfs_trans_handle *trans, struct
  738. btrfs_root *extent_root)
  739. {
  740. struct btrfs_key ins;
  741. struct btrfs_extent_item extent_item;
  742. int i;
  743. int ret;
  744. u64 super_blocks_used;
  745. struct btrfs_fs_info *info = extent_root->fs_info;
  746. btrfs_set_extent_refs(&extent_item, 1);
  747. ins.offset = 1;
  748. ins.flags = 0;
  749. btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
  750. btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
  751. for (i = 0; i < extent_root->fs_info->extent_tree_insert_nr; i++) {
  752. ins.objectid = extent_root->fs_info->extent_tree_insert[i];
  753. super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
  754. btrfs_set_super_blocks_used(&info->super_copy,
  755. super_blocks_used + 1);
  756. ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
  757. sizeof(extent_item));
  758. BUG_ON(ret);
  759. }
  760. extent_root->fs_info->extent_tree_insert_nr = 0;
  761. return 0;
  762. }
  763. static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
  764. {
  765. int err;
  766. struct btrfs_header *header;
  767. struct buffer_head *bh;
  768. if (!pending) {
  769. bh = btrfs_find_tree_block(root, blocknr);
  770. if (bh) {
  771. if (buffer_uptodate(bh)) {
  772. u64 transid =
  773. root->fs_info->running_transaction->transid;
  774. header = btrfs_buffer_header(bh);
  775. if (btrfs_header_generation(header) ==
  776. transid) {
  777. btrfs_block_release(root, bh);
  778. return 0;
  779. }
  780. }
  781. btrfs_block_release(root, bh);
  782. }
  783. err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
  784. if (!err) {
  785. struct btrfs_block_group_cache *cache;
  786. cache = btrfs_lookup_block_group(root->fs_info,
  787. blocknr);
  788. if (cache)
  789. cache->pinned++;
  790. }
  791. } else {
  792. err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
  793. }
  794. BUG_ON(err < 0);
  795. return 0;
  796. }
  797. /*
  798. * remove an extent from the root, returns 0 on success
  799. */
  800. static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  801. *root, u64 blocknr, u64 num_blocks, int pin,
  802. int mark_free)
  803. {
  804. struct btrfs_path *path;
  805. struct btrfs_key key;
  806. struct btrfs_fs_info *info = root->fs_info;
  807. struct btrfs_root *extent_root = info->extent_root;
  808. int ret;
  809. struct btrfs_extent_item *ei;
  810. struct btrfs_key ins;
  811. u32 refs;
  812. key.objectid = blocknr;
  813. key.flags = 0;
  814. btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
  815. key.offset = num_blocks;
  816. path = btrfs_alloc_path();
  817. if (!path)
  818. return -ENOMEM;
  819. ret = find_free_extent(trans, root, 0, 0, 0, (u64)-1, 0, &ins, 0, 0, 0);
  820. if (ret) {
  821. btrfs_free_path(path);
  822. return ret;
  823. }
  824. ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
  825. if (ret < 0)
  826. return ret;
  827. BUG_ON(ret);
  828. ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
  829. struct btrfs_extent_item);
  830. BUG_ON(ei->refs == 0);
  831. refs = btrfs_extent_refs(ei) - 1;
  832. btrfs_set_extent_refs(ei, refs);
  833. btrfs_mark_buffer_dirty(path->nodes[0]);
  834. if (refs == 0) {
  835. u64 super_blocks_used;
  836. if (pin) {
  837. ret = pin_down_block(root, blocknr, 0);
  838. BUG_ON(ret);
  839. }
  840. super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
  841. btrfs_set_super_blocks_used(&info->super_copy,
  842. super_blocks_used - num_blocks);
  843. ret = btrfs_del_item(trans, extent_root, path);
  844. if (ret) {
  845. return ret;
  846. }
  847. ret = update_block_group(trans, root, blocknr, num_blocks, 0,
  848. mark_free, 0);
  849. BUG_ON(ret);
  850. }
  851. btrfs_free_path(path);
  852. finish_current_insert(trans, extent_root);
  853. return ret;
  854. }
  855. /*
  856. * find all the blocks marked as pending in the radix tree and remove
  857. * them from the extent map
  858. */
  859. static int del_pending_extents(struct btrfs_trans_handle *trans, struct
  860. btrfs_root *extent_root)
  861. {
  862. int ret;
  863. int wret;
  864. int err = 0;
  865. unsigned long gang[4];
  866. int i;
  867. struct radix_tree_root *pending_radix;
  868. struct radix_tree_root *pinned_radix;
  869. struct btrfs_block_group_cache *cache;
  870. pending_radix = &extent_root->fs_info->pending_del_radix;
  871. pinned_radix = &extent_root->fs_info->pinned_radix;
  872. while(1) {
  873. ret = find_first_radix_bit(pending_radix, gang, 0,
  874. ARRAY_SIZE(gang));
  875. if (!ret)
  876. break;
  877. for (i = 0; i < ret; i++) {
  878. wret = set_radix_bit(pinned_radix, gang[i]);
  879. if (wret == 0) {
  880. cache =
  881. btrfs_lookup_block_group(extent_root->fs_info,
  882. gang[i]);
  883. if (cache)
  884. cache->pinned++;
  885. }
  886. if (wret < 0) {
  887. printk(KERN_CRIT "set_radix_bit, err %d\n",
  888. wret);
  889. BUG_ON(wret < 0);
  890. }
  891. wret = clear_radix_bit(pending_radix, gang[i]);
  892. BUG_ON(wret);
  893. wret = __free_extent(trans, extent_root,
  894. gang[i], 1, 0, 0);
  895. if (wret)
  896. err = wret;
  897. }
  898. }
  899. return err;
  900. }
  901. /*
  902. * remove an extent from the root, returns 0 on success
  903. */
  904. int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  905. *root, u64 blocknr, u64 num_blocks, int pin)
  906. {
  907. struct btrfs_root *extent_root = root->fs_info->extent_root;
  908. int pending_ret;
  909. int ret;
  910. if (root == extent_root) {
  911. pin_down_block(root, blocknr, 1);
  912. return 0;
  913. }
  914. ret = __free_extent(trans, root, blocknr, num_blocks, pin, pin == 0);
  915. pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
  916. return ret ? ret : pending_ret;
  917. }
  918. /*
  919. * walks the btree of allocated extents and find a hole of a given size.
  920. * The key ins is changed to record the hole:
  921. * ins->objectid == block start
  922. * ins->flags = BTRFS_EXTENT_ITEM_KEY
  923. * ins->offset == number of blocks
  924. * Any available blocks before search_start are skipped.
  925. */
  926. static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
  927. *orig_root, u64 num_blocks, u64 empty_size,
  928. u64 search_start, u64 search_end, u64 hint_block,
  929. struct btrfs_key *ins, u64 exclude_start,
  930. u64 exclude_nr, int data)
  931. {
  932. struct btrfs_path *path;
  933. struct btrfs_key key;
  934. int ret;
  935. u64 hole_size = 0;
  936. int slot = 0;
  937. u64 last_block = 0;
  938. u64 test_block;
  939. u64 orig_search_start = search_start;
  940. int start_found;
  941. struct btrfs_leaf *l;
  942. struct btrfs_root * root = orig_root->fs_info->extent_root;
  943. struct btrfs_fs_info *info = root->fs_info;
  944. int total_needed = num_blocks;
  945. int total_found = 0;
  946. int fill_prealloc = 0;
  947. int level;
  948. struct btrfs_block_group_cache *block_group;
  949. int full_scan = 0;
  950. int wrapped = 0;
  951. u64 limit;
  952. ins->flags = 0;
  953. btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
  954. level = btrfs_header_level(btrfs_buffer_header(root->node));
  955. if (num_blocks == 0) {
  956. fill_prealloc = 1;
  957. num_blocks = 1;
  958. total_needed = (min(level + 1, BTRFS_MAX_LEVEL)) * 6;
  959. }
  960. if (fill_prealloc) {
  961. u64 first;
  962. int nr = info->extent_tree_prealloc_nr;
  963. first = info->extent_tree_prealloc[nr - 1];
  964. if (info->extent_tree_prealloc_nr >= total_needed &&
  965. first >= search_start) {
  966. ins->objectid = info->extent_tree_prealloc[0];
  967. ins->offset = 1;
  968. return 0;
  969. }
  970. info->extent_tree_prealloc_nr = 0;
  971. }
  972. if (search_end == (u64)-1)
  973. search_end = btrfs_super_total_blocks(&info->super_copy);
  974. if (hint_block) {
  975. block_group = btrfs_lookup_block_group(info, hint_block);
  976. block_group = btrfs_find_block_group(root, block_group,
  977. hint_block, data, 1);
  978. } else {
  979. block_group = btrfs_find_block_group(root,
  980. trans->block_group, 0,
  981. data, 1);
  982. }
  983. total_needed += empty_size;
  984. path = btrfs_alloc_path();
  985. check_failed:
  986. if (!block_group->data)
  987. search_start = find_search_start(root, &block_group,
  988. search_start, total_needed);
  989. else if (!full_scan)
  990. search_start = max(block_group->last_alloc, search_start);
  991. btrfs_init_path(path);
  992. ins->objectid = search_start;
  993. ins->offset = 0;
  994. start_found = 0;
  995. ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
  996. if (ret < 0)
  997. goto error;
  998. if (path->slots[0] > 0) {
  999. path->slots[0]--;
  1000. }
  1001. l = btrfs_buffer_leaf(path->nodes[0]);
  1002. btrfs_disk_key_to_cpu(&key, &l->items[path->slots[0]].key);
  1003. /*
  1004. * a rare case, go back one key if we hit a block group item
  1005. * instead of an extent item
  1006. */
  1007. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
  1008. key.objectid + key.offset >= search_start) {
  1009. ins->objectid = key.objectid;
  1010. ins->offset = key.offset - 1;
  1011. btrfs_release_path(root, path);
  1012. ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
  1013. if (ret < 0)
  1014. goto error;
  1015. if (path->slots[0] > 0) {
  1016. path->slots[0]--;
  1017. }
  1018. }
  1019. while (1) {
  1020. l = btrfs_buffer_leaf(path->nodes[0]);
  1021. slot = path->slots[0];
  1022. if (slot >= btrfs_header_nritems(&l->header)) {
  1023. if (fill_prealloc) {
  1024. info->extent_tree_prealloc_nr = 0;
  1025. total_found = 0;
  1026. }
  1027. if (start_found)
  1028. limit = last_block +
  1029. (block_group->key.offset >> 1);
  1030. else
  1031. limit = search_start +
  1032. (block_group->key.offset >> 1);
  1033. ret = btrfs_next_leaf(root, path);
  1034. if (ret == 0)
  1035. continue;
  1036. if (ret < 0)
  1037. goto error;
  1038. if (!start_found) {
  1039. ins->objectid = search_start;
  1040. ins->offset = search_end - search_start;
  1041. start_found = 1;
  1042. goto check_pending;
  1043. }
  1044. ins->objectid = last_block > search_start ?
  1045. last_block : search_start;
  1046. ins->offset = search_end - ins->objectid;
  1047. goto check_pending;
  1048. }
  1049. btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
  1050. if (key.objectid >= search_start && key.objectid > last_block &&
  1051. start_found) {
  1052. if (last_block < search_start)
  1053. last_block = search_start;
  1054. hole_size = key.objectid - last_block;
  1055. if (hole_size >= num_blocks) {
  1056. ins->objectid = last_block;
  1057. ins->offset = hole_size;
  1058. goto check_pending;
  1059. }
  1060. }
  1061. if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
  1062. goto next;
  1063. start_found = 1;
  1064. last_block = key.objectid + key.offset;
  1065. if (!full_scan && last_block >= block_group->key.objectid +
  1066. block_group->key.offset) {
  1067. btrfs_release_path(root, path);
  1068. search_start = block_group->key.objectid +
  1069. block_group->key.offset * 2;
  1070. goto new_group;
  1071. }
  1072. next:
  1073. path->slots[0]++;
  1074. cond_resched();
  1075. }
  1076. check_pending:
  1077. /* we have to make sure we didn't find an extent that has already
  1078. * been allocated by the map tree or the original allocation
  1079. */
  1080. btrfs_release_path(root, path);
  1081. BUG_ON(ins->objectid < search_start);
  1082. if (ins->objectid + num_blocks >= search_end) {
  1083. if (full_scan) {
  1084. ret = -ENOSPC;
  1085. goto error;
  1086. }
  1087. search_start = orig_search_start;
  1088. if (wrapped) {
  1089. if (!full_scan)
  1090. total_needed -= empty_size;
  1091. full_scan = 1;
  1092. } else
  1093. wrapped = 1;
  1094. goto new_group;
  1095. }
  1096. for (test_block = ins->objectid;
  1097. test_block < ins->objectid + num_blocks; test_block++) {
  1098. if (test_radix_bit(&info->pinned_radix, test_block)) {
  1099. search_start = test_block + 1;
  1100. goto new_group;
  1101. }
  1102. }
  1103. if (!fill_prealloc && info->extent_tree_insert_nr) {
  1104. u64 last =
  1105. info->extent_tree_insert[info->extent_tree_insert_nr - 1];
  1106. if (ins->objectid + num_blocks >
  1107. info->extent_tree_insert[0] &&
  1108. ins->objectid <= last) {
  1109. search_start = last + 1;
  1110. WARN_ON(!full_scan);
  1111. goto new_group;
  1112. }
  1113. }
  1114. if (!fill_prealloc && info->extent_tree_prealloc_nr) {
  1115. u64 first =
  1116. info->extent_tree_prealloc[info->extent_tree_prealloc_nr - 1];
  1117. if (ins->objectid + num_blocks > first &&
  1118. ins->objectid <= info->extent_tree_prealloc[0]) {
  1119. search_start = info->extent_tree_prealloc[0] + 1;
  1120. goto new_group;
  1121. }
  1122. }
  1123. if (exclude_nr > 0 && (ins->objectid + num_blocks > exclude_start &&
  1124. ins->objectid < exclude_start + exclude_nr)) {
  1125. search_start = exclude_start + exclude_nr;
  1126. goto new_group;
  1127. }
  1128. if (fill_prealloc) {
  1129. int nr;
  1130. test_block = ins->objectid;
  1131. if (test_block - info->extent_tree_prealloc[total_needed - 1] >=
  1132. leaf_range(root)) {
  1133. total_found = 0;
  1134. info->extent_tree_prealloc_nr = total_found;
  1135. }
  1136. while(test_block < ins->objectid + ins->offset &&
  1137. total_found < total_needed) {
  1138. nr = total_needed - total_found - 1;
  1139. BUG_ON(nr < 0);
  1140. info->extent_tree_prealloc[nr] = test_block;
  1141. total_found++;
  1142. test_block++;
  1143. }
  1144. if (total_found < total_needed) {
  1145. search_start = test_block;
  1146. goto new_group;
  1147. }
  1148. info->extent_tree_prealloc_nr = total_found;
  1149. }
  1150. if (!data) {
  1151. block_group = btrfs_lookup_block_group(info, ins->objectid);
  1152. if (block_group) {
  1153. if (fill_prealloc)
  1154. block_group->last_prealloc =
  1155. info->extent_tree_prealloc[total_needed-1];
  1156. else
  1157. trans->block_group = block_group;
  1158. }
  1159. }
  1160. ins->offset = num_blocks;
  1161. btrfs_free_path(path);
  1162. return 0;
  1163. new_group:
  1164. if (search_start + num_blocks >= search_end) {
  1165. search_start = orig_search_start;
  1166. if (full_scan) {
  1167. ret = -ENOSPC;
  1168. goto error;
  1169. }
  1170. if (wrapped) {
  1171. if (!full_scan)
  1172. total_needed -= empty_size;
  1173. full_scan = 1;
  1174. } else
  1175. wrapped = 1;
  1176. }
  1177. block_group = btrfs_lookup_block_group(info, search_start);
  1178. cond_resched();
  1179. if (!full_scan)
  1180. block_group = btrfs_find_block_group(root, block_group,
  1181. search_start, data, 0);
  1182. goto check_failed;
  1183. error:
  1184. btrfs_release_path(root, path);
  1185. btrfs_free_path(path);
  1186. return ret;
  1187. }
  1188. /*
  1189. * finds a free extent and does all the dirty work required for allocation
  1190. * returns the key for the extent through ins, and a tree buffer for
  1191. * the first block of the extent through buf.
  1192. *
  1193. * returns 0 if everything worked, non-zero otherwise.
  1194. */
  1195. int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
  1196. struct btrfs_root *root, u64 owner,
  1197. u64 num_blocks, u64 empty_size, u64 hint_block,
  1198. u64 search_end, struct btrfs_key *ins, int data)
  1199. {
  1200. int ret;
  1201. int pending_ret;
  1202. u64 super_blocks_used;
  1203. u64 search_start = 0;
  1204. u64 exclude_start = 0;
  1205. u64 exclude_nr = 0;
  1206. struct btrfs_fs_info *info = root->fs_info;
  1207. struct btrfs_root *extent_root = info->extent_root;
  1208. struct btrfs_extent_item extent_item;
  1209. struct btrfs_key prealloc_key;
  1210. btrfs_set_extent_refs(&extent_item, 1);
  1211. btrfs_set_extent_owner(&extent_item, owner);
  1212. if (root == extent_root) {
  1213. int nr;
  1214. BUG_ON(info->extent_tree_prealloc_nr == 0);
  1215. BUG_ON(num_blocks != 1);
  1216. ins->offset = 1;
  1217. info->extent_tree_prealloc_nr--;
  1218. nr = info->extent_tree_prealloc_nr;
  1219. ins->objectid = info->extent_tree_prealloc[nr];
  1220. info->extent_tree_insert[info->extent_tree_insert_nr++] =
  1221. ins->objectid;
  1222. ret = update_block_group(trans, root,
  1223. ins->objectid, ins->offset, 1, 0, 0);
  1224. WARN_ON(info->extent_tree_insert_nr >
  1225. ARRAY_SIZE(info->extent_tree_insert));
  1226. BUG_ON(ret);
  1227. return 0;
  1228. }
  1229. /*
  1230. * if we're doing a data allocation, preallocate room in the
  1231. * extent tree first. This way the extent tree blocks end up
  1232. * in the correct block group.
  1233. */
  1234. if (data) {
  1235. ret = find_free_extent(trans, root, 0, 0, 0,
  1236. search_end, 0, &prealloc_key, 0, 0, 0);
  1237. BUG_ON(ret);
  1238. if (ret)
  1239. return ret;
  1240. exclude_nr = info->extent_tree_prealloc_nr;
  1241. exclude_start = info->extent_tree_prealloc[exclude_nr - 1];
  1242. }
  1243. /* do the real allocation */
  1244. ret = find_free_extent(trans, root, num_blocks, empty_size,
  1245. search_start, search_end, hint_block, ins,
  1246. exclude_start, exclude_nr, data);
  1247. BUG_ON(ret);
  1248. if (ret)
  1249. return ret;
  1250. /*
  1251. * if we're doing a metadata allocation, preallocate space in the
  1252. * extent tree second. This way, we don't create a tiny hole
  1253. * in the allocation map between any unused preallocation blocks
  1254. * and the metadata block we're actually allocating. On disk,
  1255. * it'll go:
  1256. * [block we've allocated], [used prealloc 1], [ unused prealloc ]
  1257. * The unused prealloc will get reused the next time around.
  1258. */
  1259. if (!data) {
  1260. exclude_start = ins->objectid;
  1261. exclude_nr = ins->offset;
  1262. hint_block = exclude_start + exclude_nr;
  1263. ret = find_free_extent(trans, root, 0, 0, search_start,
  1264. search_end, hint_block,
  1265. &prealloc_key, exclude_start,
  1266. exclude_nr, 0);
  1267. BUG_ON(ret);
  1268. if (ret)
  1269. return ret;
  1270. }
  1271. super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
  1272. btrfs_set_super_blocks_used(&info->super_copy, super_blocks_used +
  1273. num_blocks);
  1274. ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
  1275. sizeof(extent_item));
  1276. BUG_ON(ret);
  1277. finish_current_insert(trans, extent_root);
  1278. pending_ret = del_pending_extents(trans, extent_root);
  1279. if (ret) {
  1280. return ret;
  1281. }
  1282. if (pending_ret) {
  1283. return pending_ret;
  1284. }
  1285. ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
  1286. data);
  1287. BUG_ON(ret);
  1288. return 0;
  1289. }
  1290. /*
  1291. * helper function to allocate a block for a given tree
  1292. * returns the tree buffer or NULL.
  1293. */
  1294. struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
  1295. struct btrfs_root *root, u64 hint,
  1296. u64 empty_size)
  1297. {
  1298. struct btrfs_key ins;
  1299. int ret;
  1300. struct buffer_head *buf;
  1301. ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
  1302. 1, empty_size, hint,
  1303. (unsigned long)-1, &ins, 0);
  1304. if (ret) {
  1305. BUG_ON(ret > 0);
  1306. return ERR_PTR(ret);
  1307. }
  1308. buf = btrfs_find_create_tree_block(root, ins.objectid);
  1309. if (!buf) {
  1310. btrfs_free_extent(trans, root, ins.objectid, 1, 0);
  1311. return ERR_PTR(-ENOMEM);
  1312. }
  1313. WARN_ON(buffer_dirty(buf));
  1314. set_buffer_uptodate(buf);
  1315. set_buffer_checked(buf);
  1316. set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
  1317. return buf;
  1318. }
  1319. static int drop_leaf_ref(struct btrfs_trans_handle *trans,
  1320. struct btrfs_root *root, struct buffer_head *cur)
  1321. {
  1322. struct btrfs_disk_key *key;
  1323. struct btrfs_leaf *leaf;
  1324. struct btrfs_file_extent_item *fi;
  1325. int i;
  1326. int nritems;
  1327. int ret;
  1328. BUG_ON(!btrfs_is_leaf(btrfs_buffer_node(cur)));
  1329. leaf = btrfs_buffer_leaf(cur);
  1330. nritems = btrfs_header_nritems(&leaf->header);
  1331. for (i = 0; i < nritems; i++) {
  1332. u64 disk_blocknr;
  1333. key = &leaf->items[i].key;
  1334. if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
  1335. continue;
  1336. fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
  1337. if (btrfs_file_extent_type(fi) == BTRFS_FILE_EXTENT_INLINE)
  1338. continue;
  1339. /*
  1340. * FIXME make sure to insert a trans record that
  1341. * repeats the snapshot del on crash
  1342. */
  1343. disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
  1344. if (disk_blocknr == 0)
  1345. continue;
  1346. ret = btrfs_free_extent(trans, root, disk_blocknr,
  1347. btrfs_file_extent_disk_num_blocks(fi),
  1348. 0);
  1349. BUG_ON(ret);
  1350. }
  1351. return 0;
  1352. }
  1353. static void reada_walk_down(struct btrfs_root *root,
  1354. struct btrfs_node *node)
  1355. {
  1356. int i;
  1357. u32 nritems;
  1358. u64 blocknr;
  1359. int ret;
  1360. u32 refs;
  1361. nritems = btrfs_header_nritems(&node->header);
  1362. for (i = 0; i < nritems; i++) {
  1363. blocknr = btrfs_node_blockptr(node, i);
  1364. ret = lookup_extent_ref(NULL, root, blocknr, 1, &refs);
  1365. BUG_ON(ret);
  1366. if (refs != 1)
  1367. continue;
  1368. ret = readahead_tree_block(root, blocknr);
  1369. if (ret)
  1370. break;
  1371. }
  1372. }
  1373. /*
  1374. * helper function for drop_snapshot, this walks down the tree dropping ref
  1375. * counts as it goes.
  1376. */
  1377. static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
  1378. *root, struct btrfs_path *path, int *level)
  1379. {
  1380. struct buffer_head *next;
  1381. struct buffer_head *cur;
  1382. u64 blocknr;
  1383. int ret;
  1384. u32 refs;
  1385. WARN_ON(*level < 0);
  1386. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1387. ret = lookup_extent_ref(trans, root, bh_blocknr(path->nodes[*level]),
  1388. 1, &refs);
  1389. BUG_ON(ret);
  1390. if (refs > 1)
  1391. goto out;
  1392. /*
  1393. * walk down to the last node level and free all the leaves
  1394. */
  1395. while(*level >= 0) {
  1396. WARN_ON(*level < 0);
  1397. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1398. cur = path->nodes[*level];
  1399. if (*level > 0 && path->slots[*level] == 0)
  1400. reada_walk_down(root, btrfs_buffer_node(cur));
  1401. if (btrfs_header_level(btrfs_buffer_header(cur)) != *level)
  1402. WARN_ON(1);
  1403. if (path->slots[*level] >=
  1404. btrfs_header_nritems(btrfs_buffer_header(cur)))
  1405. break;
  1406. if (*level == 0) {
  1407. ret = drop_leaf_ref(trans, root, cur);
  1408. BUG_ON(ret);
  1409. break;
  1410. }
  1411. blocknr = btrfs_node_blockptr(btrfs_buffer_node(cur),
  1412. path->slots[*level]);
  1413. ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
  1414. BUG_ON(ret);
  1415. if (refs != 1) {
  1416. path->slots[*level]++;
  1417. ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
  1418. BUG_ON(ret);
  1419. continue;
  1420. }
  1421. next = read_tree_block(root, blocknr);
  1422. WARN_ON(*level <= 0);
  1423. if (path->nodes[*level-1])
  1424. btrfs_block_release(root, path->nodes[*level-1]);
  1425. path->nodes[*level-1] = next;
  1426. *level = btrfs_header_level(btrfs_buffer_header(next));
  1427. path->slots[*level] = 0;
  1428. }
  1429. out:
  1430. WARN_ON(*level < 0);
  1431. WARN_ON(*level >= BTRFS_MAX_LEVEL);
  1432. ret = btrfs_free_extent(trans, root,
  1433. bh_blocknr(path->nodes[*level]), 1, 1);
  1434. btrfs_block_release(root, path->nodes[*level]);
  1435. path->nodes[*level] = NULL;
  1436. *level += 1;
  1437. BUG_ON(ret);
  1438. return 0;
  1439. }
  1440. /*
  1441. * helper for dropping snapshots. This walks back up the tree in the path
  1442. * to find the first node higher up where we haven't yet gone through
  1443. * all the slots
  1444. */
  1445. static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
  1446. *root, struct btrfs_path *path, int *level)
  1447. {
  1448. int i;
  1449. int slot;
  1450. int ret;
  1451. struct btrfs_root_item *root_item = &root->root_item;
  1452. for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
  1453. slot = path->slots[i];
  1454. if (slot < btrfs_header_nritems(
  1455. btrfs_buffer_header(path->nodes[i])) - 1) {
  1456. struct btrfs_node *node;
  1457. node = btrfs_buffer_node(path->nodes[i]);
  1458. path->slots[i]++;
  1459. *level = i;
  1460. WARN_ON(*level == 0);
  1461. memcpy(&root_item->drop_progress,
  1462. &node->ptrs[path->slots[i]].key,
  1463. sizeof(root_item->drop_progress));
  1464. root_item->drop_level = i;
  1465. return 0;
  1466. } else {
  1467. ret = btrfs_free_extent(trans, root,
  1468. bh_blocknr(path->nodes[*level]),
  1469. 1, 1);
  1470. BUG_ON(ret);
  1471. btrfs_block_release(root, path->nodes[*level]);
  1472. path->nodes[*level] = NULL;
  1473. *level = i + 1;
  1474. }
  1475. }
  1476. return 1;
  1477. }
  1478. /*
  1479. * drop the reference count on the tree rooted at 'snap'. This traverses
  1480. * the tree freeing any blocks that have a ref count of zero after being
  1481. * decremented.
  1482. */
  1483. int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
  1484. *root)
  1485. {
  1486. int ret = 0;
  1487. int wret;
  1488. int level;
  1489. struct btrfs_path *path;
  1490. int i;
  1491. int orig_level;
  1492. int num_walks = 0;
  1493. struct btrfs_root_item *root_item = &root->root_item;
  1494. path = btrfs_alloc_path();
  1495. BUG_ON(!path);
  1496. level = btrfs_header_level(btrfs_buffer_header(root->node));
  1497. orig_level = level;
  1498. if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
  1499. path->nodes[level] = root->node;
  1500. path->slots[level] = 0;
  1501. } else {
  1502. struct btrfs_key key;
  1503. struct btrfs_disk_key *found_key;
  1504. struct btrfs_node *node;
  1505. btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
  1506. level = root_item->drop_level;
  1507. path->lowest_level = level;
  1508. wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1509. if (wret < 0) {
  1510. ret = wret;
  1511. goto out;
  1512. }
  1513. node = btrfs_buffer_node(path->nodes[level]);
  1514. found_key = &node->ptrs[path->slots[level]].key;
  1515. WARN_ON(memcmp(found_key, &root_item->drop_progress,
  1516. sizeof(*found_key)));
  1517. }
  1518. while(1) {
  1519. wret = walk_down_tree(trans, root, path, &level);
  1520. if (wret > 0)
  1521. break;
  1522. if (wret < 0)
  1523. ret = wret;
  1524. wret = walk_up_tree(trans, root, path, &level);
  1525. if (wret > 0)
  1526. break;
  1527. if (wret < 0)
  1528. ret = wret;
  1529. num_walks++;
  1530. if (num_walks > 2) {
  1531. ret = -EAGAIN;
  1532. get_bh(root->node);
  1533. break;
  1534. }
  1535. }
  1536. for (i = 0; i <= orig_level; i++) {
  1537. if (path->nodes[i]) {
  1538. btrfs_block_release(root, path->nodes[i]);
  1539. path->nodes[i] = 0;
  1540. }
  1541. }
  1542. out:
  1543. btrfs_free_path(path);
  1544. return ret;
  1545. }
  1546. static int free_block_group_radix(struct radix_tree_root *radix)
  1547. {
  1548. int ret;
  1549. struct btrfs_block_group_cache *cache[8];
  1550. int i;
  1551. while(1) {
  1552. ret = radix_tree_gang_lookup(radix, (void **)cache, 0,
  1553. ARRAY_SIZE(cache));
  1554. if (!ret)
  1555. break;
  1556. for (i = 0; i < ret; i++) {
  1557. radix_tree_delete(radix, cache[i]->key.objectid +
  1558. cache[i]->key.offset - 1);
  1559. kfree(cache[i]);
  1560. }
  1561. }
  1562. return 0;
  1563. }
  1564. int btrfs_free_block_groups(struct btrfs_fs_info *info)
  1565. {
  1566. int ret;
  1567. int ret2;
  1568. unsigned long gang[16];
  1569. int i;
  1570. ret = free_block_group_radix(&info->block_group_radix);
  1571. ret2 = free_block_group_radix(&info->block_group_data_radix);
  1572. if (ret)
  1573. return ret;
  1574. if (ret2)
  1575. return ret2;
  1576. while(1) {
  1577. ret = find_first_radix_bit(&info->extent_map_radix,
  1578. gang, 0, ARRAY_SIZE(gang));
  1579. if (!ret)
  1580. break;
  1581. for (i = 0; i < ret; i++) {
  1582. clear_radix_bit(&info->extent_map_radix, gang[i]);
  1583. }
  1584. }
  1585. return 0;
  1586. }
  1587. int btrfs_read_block_groups(struct btrfs_root *root)
  1588. {
  1589. struct btrfs_path *path;
  1590. int ret;
  1591. int err = 0;
  1592. struct btrfs_block_group_item *bi;
  1593. struct btrfs_block_group_cache *cache;
  1594. struct btrfs_fs_info *info = root->fs_info;
  1595. struct radix_tree_root *radix;
  1596. struct btrfs_key key;
  1597. struct btrfs_key found_key;
  1598. struct btrfs_leaf *leaf;
  1599. u64 group_size_blocks;
  1600. u64 used;
  1601. group_size_blocks = BTRFS_BLOCK_GROUP_SIZE >>
  1602. root->fs_info->sb->s_blocksize_bits;
  1603. root = info->extent_root;
  1604. key.objectid = 0;
  1605. key.offset = group_size_blocks;
  1606. key.flags = 0;
  1607. btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
  1608. path = btrfs_alloc_path();
  1609. if (!path)
  1610. return -ENOMEM;
  1611. while(1) {
  1612. ret = btrfs_search_slot(NULL, info->extent_root,
  1613. &key, path, 0, 0);
  1614. if (ret != 0) {
  1615. err = ret;
  1616. break;
  1617. }
  1618. leaf = btrfs_buffer_leaf(path->nodes[0]);
  1619. btrfs_disk_key_to_cpu(&found_key,
  1620. &leaf->items[path->slots[0]].key);
  1621. cache = kmalloc(sizeof(*cache), GFP_NOFS);
  1622. if (!cache) {
  1623. err = -1;
  1624. break;
  1625. }
  1626. bi = btrfs_item_ptr(leaf, path->slots[0],
  1627. struct btrfs_block_group_item);
  1628. if (bi->flags & BTRFS_BLOCK_GROUP_DATA) {
  1629. radix = &info->block_group_data_radix;
  1630. cache->data = 1;
  1631. } else {
  1632. radix = &info->block_group_radix;
  1633. cache->data = 0;
  1634. }
  1635. memcpy(&cache->item, bi, sizeof(*bi));
  1636. memcpy(&cache->key, &found_key, sizeof(found_key));
  1637. cache->last_alloc = cache->key.objectid;
  1638. cache->first_free = cache->key.objectid;
  1639. cache->last_prealloc = cache->key.objectid;
  1640. cache->pinned = 0;
  1641. cache->cached = 0;
  1642. cache->radix = radix;
  1643. key.objectid = found_key.objectid + found_key.offset;
  1644. btrfs_release_path(root, path);
  1645. ret = radix_tree_insert(radix, found_key.objectid +
  1646. found_key.offset - 1,
  1647. (void *)cache);
  1648. BUG_ON(ret);
  1649. used = btrfs_block_group_used(bi);
  1650. if (used < div_factor(key.offset, 8)) {
  1651. radix_tree_tag_set(radix, found_key.objectid +
  1652. found_key.offset - 1,
  1653. BTRFS_BLOCK_GROUP_AVAIL);
  1654. }
  1655. if (key.objectid >=
  1656. btrfs_super_total_blocks(&info->super_copy))
  1657. break;
  1658. }
  1659. btrfs_free_path(path);
  1660. return 0;
  1661. }