backref.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440
  1. /*
  2. * Copyright (C) 2011 STRATO. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include "ctree.h"
  19. #include "disk-io.h"
  20. #include "backref.h"
  21. #include "ulist.h"
  22. #include "transaction.h"
  23. #include "delayed-ref.h"
  24. #include "locking.h"
  25. /*
  26. * this structure records all encountered refs on the way up to the root
  27. */
  28. struct __prelim_ref {
  29. struct list_head list;
  30. u64 root_id;
  31. struct btrfs_key key;
  32. int level;
  33. int count;
  34. u64 parent;
  35. u64 wanted_disk_byte;
  36. };
  37. static int __add_prelim_ref(struct list_head *head, u64 root_id,
  38. struct btrfs_key *key, int level, u64 parent,
  39. u64 wanted_disk_byte, int count)
  40. {
  41. struct __prelim_ref *ref;
  42. /* in case we're adding delayed refs, we're holding the refs spinlock */
  43. ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
  44. if (!ref)
  45. return -ENOMEM;
  46. ref->root_id = root_id;
  47. if (key)
  48. ref->key = *key;
  49. else
  50. memset(&ref->key, 0, sizeof(ref->key));
  51. ref->level = level;
  52. ref->count = count;
  53. ref->parent = parent;
  54. ref->wanted_disk_byte = wanted_disk_byte;
  55. list_add_tail(&ref->list, head);
  56. return 0;
  57. }
  58. static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
  59. struct ulist *parents,
  60. struct extent_buffer *eb, int level,
  61. u64 wanted_objectid, u64 wanted_disk_byte)
  62. {
  63. int ret;
  64. int slot;
  65. struct btrfs_file_extent_item *fi;
  66. struct btrfs_key key;
  67. u64 disk_byte;
  68. add_parent:
  69. ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
  70. if (ret < 0)
  71. return ret;
  72. if (level != 0)
  73. return 0;
  74. /*
  75. * if the current leaf is full with EXTENT_DATA items, we must
  76. * check the next one if that holds a reference as well.
  77. * ref->count cannot be used to skip this check.
  78. * repeat this until we don't find any additional EXTENT_DATA items.
  79. */
  80. while (1) {
  81. ret = btrfs_next_leaf(root, path);
  82. if (ret < 0)
  83. return ret;
  84. if (ret)
  85. return 0;
  86. eb = path->nodes[0];
  87. for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
  88. btrfs_item_key_to_cpu(eb, &key, slot);
  89. if (key.objectid != wanted_objectid ||
  90. key.type != BTRFS_EXTENT_DATA_KEY)
  91. return 0;
  92. fi = btrfs_item_ptr(eb, slot,
  93. struct btrfs_file_extent_item);
  94. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  95. if (disk_byte == wanted_disk_byte)
  96. goto add_parent;
  97. }
  98. }
  99. return 0;
  100. }
  101. /*
  102. * resolve an indirect backref in the form (root_id, key, level)
  103. * to a logical address
  104. */
  105. static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
  106. int search_commit_root,
  107. struct __prelim_ref *ref,
  108. struct ulist *parents)
  109. {
  110. struct btrfs_path *path;
  111. struct btrfs_root *root;
  112. struct btrfs_key root_key;
  113. struct btrfs_key key = {0};
  114. struct extent_buffer *eb;
  115. int ret = 0;
  116. int root_level;
  117. int level = ref->level;
  118. path = btrfs_alloc_path();
  119. if (!path)
  120. return -ENOMEM;
  121. path->search_commit_root = !!search_commit_root;
  122. root_key.objectid = ref->root_id;
  123. root_key.type = BTRFS_ROOT_ITEM_KEY;
  124. root_key.offset = (u64)-1;
  125. root = btrfs_read_fs_root_no_name(fs_info, &root_key);
  126. if (IS_ERR(root)) {
  127. ret = PTR_ERR(root);
  128. goto out;
  129. }
  130. rcu_read_lock();
  131. root_level = btrfs_header_level(root->node);
  132. rcu_read_unlock();
  133. if (root_level + 1 == level)
  134. goto out;
  135. path->lowest_level = level;
  136. ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
  137. pr_debug("search slot in root %llu (level %d, ref count %d) returned "
  138. "%d for key (%llu %u %llu)\n",
  139. (unsigned long long)ref->root_id, level, ref->count, ret,
  140. (unsigned long long)ref->key.objectid, ref->key.type,
  141. (unsigned long long)ref->key.offset);
  142. if (ret < 0)
  143. goto out;
  144. eb = path->nodes[level];
  145. if (!eb) {
  146. WARN_ON(1);
  147. ret = 1;
  148. goto out;
  149. }
  150. if (level == 0) {
  151. if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
  152. ret = btrfs_next_leaf(root, path);
  153. if (ret)
  154. goto out;
  155. eb = path->nodes[0];
  156. }
  157. btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
  158. }
  159. /* the last two parameters will only be used for level == 0 */
  160. ret = add_all_parents(root, path, parents, eb, level, key.objectid,
  161. ref->wanted_disk_byte);
  162. out:
  163. btrfs_free_path(path);
  164. return ret;
  165. }
  166. /*
  167. * resolve all indirect backrefs from the list
  168. */
  169. static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
  170. int search_commit_root,
  171. struct list_head *head)
  172. {
  173. int err;
  174. int ret = 0;
  175. struct __prelim_ref *ref;
  176. struct __prelim_ref *ref_safe;
  177. struct __prelim_ref *new_ref;
  178. struct ulist *parents;
  179. struct ulist_node *node;
  180. struct ulist_iterator uiter;
  181. parents = ulist_alloc(GFP_NOFS);
  182. if (!parents)
  183. return -ENOMEM;
  184. /*
  185. * _safe allows us to insert directly after the current item without
  186. * iterating over the newly inserted items.
  187. * we're also allowed to re-assign ref during iteration.
  188. */
  189. list_for_each_entry_safe(ref, ref_safe, head, list) {
  190. if (ref->parent) /* already direct */
  191. continue;
  192. if (ref->count == 0)
  193. continue;
  194. err = __resolve_indirect_ref(fs_info, search_commit_root,
  195. ref, parents);
  196. if (err) {
  197. if (ret == 0)
  198. ret = err;
  199. continue;
  200. }
  201. /* we put the first parent into the ref at hand */
  202. ULIST_ITER_INIT(&uiter);
  203. node = ulist_next(parents, &uiter);
  204. ref->parent = node ? node->val : 0;
  205. /* additional parents require new refs being added here */
  206. while ((node = ulist_next(parents, &uiter))) {
  207. new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
  208. if (!new_ref) {
  209. ret = -ENOMEM;
  210. break;
  211. }
  212. memcpy(new_ref, ref, sizeof(*ref));
  213. new_ref->parent = node->val;
  214. list_add(&new_ref->list, &ref->list);
  215. }
  216. ulist_reinit(parents);
  217. }
  218. ulist_free(parents);
  219. return ret;
  220. }
  221. /*
  222. * merge two lists of backrefs and adjust counts accordingly
  223. *
  224. * mode = 1: merge identical keys, if key is set
  225. * mode = 2: merge identical parents
  226. */
  227. static int __merge_refs(struct list_head *head, int mode)
  228. {
  229. struct list_head *pos1;
  230. list_for_each(pos1, head) {
  231. struct list_head *n2;
  232. struct list_head *pos2;
  233. struct __prelim_ref *ref1;
  234. ref1 = list_entry(pos1, struct __prelim_ref, list);
  235. if (mode == 1 && ref1->key.type == 0)
  236. continue;
  237. for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
  238. pos2 = n2, n2 = pos2->next) {
  239. struct __prelim_ref *ref2;
  240. ref2 = list_entry(pos2, struct __prelim_ref, list);
  241. if (mode == 1) {
  242. if (memcmp(&ref1->key, &ref2->key,
  243. sizeof(ref1->key)) ||
  244. ref1->level != ref2->level ||
  245. ref1->root_id != ref2->root_id)
  246. continue;
  247. ref1->count += ref2->count;
  248. } else {
  249. if (ref1->parent != ref2->parent)
  250. continue;
  251. ref1->count += ref2->count;
  252. }
  253. list_del(&ref2->list);
  254. kfree(ref2);
  255. }
  256. }
  257. return 0;
  258. }
  259. /*
  260. * add all currently queued delayed refs from this head whose seq nr is
  261. * smaller or equal that seq to the list
  262. */
  263. static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
  264. struct btrfs_key *info_key,
  265. struct list_head *prefs)
  266. {
  267. struct btrfs_delayed_extent_op *extent_op = head->extent_op;
  268. struct rb_node *n = &head->node.rb_node;
  269. int sgn;
  270. int ret = 0;
  271. if (extent_op && extent_op->update_key)
  272. btrfs_disk_key_to_cpu(info_key, &extent_op->key);
  273. while ((n = rb_prev(n))) {
  274. struct btrfs_delayed_ref_node *node;
  275. node = rb_entry(n, struct btrfs_delayed_ref_node,
  276. rb_node);
  277. if (node->bytenr != head->node.bytenr)
  278. break;
  279. WARN_ON(node->is_head);
  280. if (node->seq > seq)
  281. continue;
  282. switch (node->action) {
  283. case BTRFS_ADD_DELAYED_EXTENT:
  284. case BTRFS_UPDATE_DELAYED_HEAD:
  285. WARN_ON(1);
  286. continue;
  287. case BTRFS_ADD_DELAYED_REF:
  288. sgn = 1;
  289. break;
  290. case BTRFS_DROP_DELAYED_REF:
  291. sgn = -1;
  292. break;
  293. default:
  294. BUG_ON(1);
  295. }
  296. switch (node->type) {
  297. case BTRFS_TREE_BLOCK_REF_KEY: {
  298. struct btrfs_delayed_tree_ref *ref;
  299. ref = btrfs_delayed_node_to_tree_ref(node);
  300. ret = __add_prelim_ref(prefs, ref->root, info_key,
  301. ref->level + 1, 0, node->bytenr,
  302. node->ref_mod * sgn);
  303. break;
  304. }
  305. case BTRFS_SHARED_BLOCK_REF_KEY: {
  306. struct btrfs_delayed_tree_ref *ref;
  307. ref = btrfs_delayed_node_to_tree_ref(node);
  308. ret = __add_prelim_ref(prefs, ref->root, info_key,
  309. ref->level + 1, ref->parent,
  310. node->bytenr,
  311. node->ref_mod * sgn);
  312. break;
  313. }
  314. case BTRFS_EXTENT_DATA_REF_KEY: {
  315. struct btrfs_delayed_data_ref *ref;
  316. struct btrfs_key key;
  317. ref = btrfs_delayed_node_to_data_ref(node);
  318. key.objectid = ref->objectid;
  319. key.type = BTRFS_EXTENT_DATA_KEY;
  320. key.offset = ref->offset;
  321. ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
  322. node->bytenr,
  323. node->ref_mod * sgn);
  324. break;
  325. }
  326. case BTRFS_SHARED_DATA_REF_KEY: {
  327. struct btrfs_delayed_data_ref *ref;
  328. struct btrfs_key key;
  329. ref = btrfs_delayed_node_to_data_ref(node);
  330. key.objectid = ref->objectid;
  331. key.type = BTRFS_EXTENT_DATA_KEY;
  332. key.offset = ref->offset;
  333. ret = __add_prelim_ref(prefs, ref->root, &key, 0,
  334. ref->parent, node->bytenr,
  335. node->ref_mod * sgn);
  336. break;
  337. }
  338. default:
  339. WARN_ON(1);
  340. }
  341. BUG_ON(ret);
  342. }
  343. return 0;
  344. }
  345. /*
  346. * add all inline backrefs for bytenr to the list
  347. */
  348. static int __add_inline_refs(struct btrfs_fs_info *fs_info,
  349. struct btrfs_path *path, u64 bytenr,
  350. struct btrfs_key *info_key, int *info_level,
  351. struct list_head *prefs)
  352. {
  353. int ret = 0;
  354. int slot;
  355. struct extent_buffer *leaf;
  356. struct btrfs_key key;
  357. unsigned long ptr;
  358. unsigned long end;
  359. struct btrfs_extent_item *ei;
  360. u64 flags;
  361. u64 item_size;
  362. /*
  363. * enumerate all inline refs
  364. */
  365. leaf = path->nodes[0];
  366. slot = path->slots[0] - 1;
  367. item_size = btrfs_item_size_nr(leaf, slot);
  368. BUG_ON(item_size < sizeof(*ei));
  369. ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
  370. flags = btrfs_extent_flags(leaf, ei);
  371. ptr = (unsigned long)(ei + 1);
  372. end = (unsigned long)ei + item_size;
  373. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  374. struct btrfs_tree_block_info *info;
  375. struct btrfs_disk_key disk_key;
  376. info = (struct btrfs_tree_block_info *)ptr;
  377. *info_level = btrfs_tree_block_level(leaf, info);
  378. btrfs_tree_block_key(leaf, info, &disk_key);
  379. btrfs_disk_key_to_cpu(info_key, &disk_key);
  380. ptr += sizeof(struct btrfs_tree_block_info);
  381. BUG_ON(ptr > end);
  382. } else {
  383. BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
  384. }
  385. while (ptr < end) {
  386. struct btrfs_extent_inline_ref *iref;
  387. u64 offset;
  388. int type;
  389. iref = (struct btrfs_extent_inline_ref *)ptr;
  390. type = btrfs_extent_inline_ref_type(leaf, iref);
  391. offset = btrfs_extent_inline_ref_offset(leaf, iref);
  392. switch (type) {
  393. case BTRFS_SHARED_BLOCK_REF_KEY:
  394. ret = __add_prelim_ref(prefs, 0, info_key,
  395. *info_level + 1, offset,
  396. bytenr, 1);
  397. break;
  398. case BTRFS_SHARED_DATA_REF_KEY: {
  399. struct btrfs_shared_data_ref *sdref;
  400. int count;
  401. sdref = (struct btrfs_shared_data_ref *)(iref + 1);
  402. count = btrfs_shared_data_ref_count(leaf, sdref);
  403. ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
  404. bytenr, count);
  405. break;
  406. }
  407. case BTRFS_TREE_BLOCK_REF_KEY:
  408. ret = __add_prelim_ref(prefs, offset, info_key,
  409. *info_level + 1, 0, bytenr, 1);
  410. break;
  411. case BTRFS_EXTENT_DATA_REF_KEY: {
  412. struct btrfs_extent_data_ref *dref;
  413. int count;
  414. u64 root;
  415. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  416. count = btrfs_extent_data_ref_count(leaf, dref);
  417. key.objectid = btrfs_extent_data_ref_objectid(leaf,
  418. dref);
  419. key.type = BTRFS_EXTENT_DATA_KEY;
  420. key.offset = btrfs_extent_data_ref_offset(leaf, dref);
  421. root = btrfs_extent_data_ref_root(leaf, dref);
  422. ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
  423. count);
  424. break;
  425. }
  426. default:
  427. WARN_ON(1);
  428. }
  429. BUG_ON(ret);
  430. ptr += btrfs_extent_inline_ref_size(type);
  431. }
  432. return 0;
  433. }
  434. /*
  435. * add all non-inline backrefs for bytenr to the list
  436. */
  437. static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
  438. struct btrfs_path *path, u64 bytenr,
  439. struct btrfs_key *info_key, int info_level,
  440. struct list_head *prefs)
  441. {
  442. struct btrfs_root *extent_root = fs_info->extent_root;
  443. int ret;
  444. int slot;
  445. struct extent_buffer *leaf;
  446. struct btrfs_key key;
  447. while (1) {
  448. ret = btrfs_next_item(extent_root, path);
  449. if (ret < 0)
  450. break;
  451. if (ret) {
  452. ret = 0;
  453. break;
  454. }
  455. slot = path->slots[0];
  456. leaf = path->nodes[0];
  457. btrfs_item_key_to_cpu(leaf, &key, slot);
  458. if (key.objectid != bytenr)
  459. break;
  460. if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
  461. continue;
  462. if (key.type > BTRFS_SHARED_DATA_REF_KEY)
  463. break;
  464. switch (key.type) {
  465. case BTRFS_SHARED_BLOCK_REF_KEY:
  466. ret = __add_prelim_ref(prefs, 0, info_key,
  467. info_level + 1, key.offset,
  468. bytenr, 1);
  469. break;
  470. case BTRFS_SHARED_DATA_REF_KEY: {
  471. struct btrfs_shared_data_ref *sdref;
  472. int count;
  473. sdref = btrfs_item_ptr(leaf, slot,
  474. struct btrfs_shared_data_ref);
  475. count = btrfs_shared_data_ref_count(leaf, sdref);
  476. ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
  477. bytenr, count);
  478. break;
  479. }
  480. case BTRFS_TREE_BLOCK_REF_KEY:
  481. ret = __add_prelim_ref(prefs, key.offset, info_key,
  482. info_level + 1, 0, bytenr, 1);
  483. break;
  484. case BTRFS_EXTENT_DATA_REF_KEY: {
  485. struct btrfs_extent_data_ref *dref;
  486. int count;
  487. u64 root;
  488. dref = btrfs_item_ptr(leaf, slot,
  489. struct btrfs_extent_data_ref);
  490. count = btrfs_extent_data_ref_count(leaf, dref);
  491. key.objectid = btrfs_extent_data_ref_objectid(leaf,
  492. dref);
  493. key.type = BTRFS_EXTENT_DATA_KEY;
  494. key.offset = btrfs_extent_data_ref_offset(leaf, dref);
  495. root = btrfs_extent_data_ref_root(leaf, dref);
  496. ret = __add_prelim_ref(prefs, root, &key, 0, 0,
  497. bytenr, count);
  498. break;
  499. }
  500. default:
  501. WARN_ON(1);
  502. }
  503. BUG_ON(ret);
  504. }
  505. return ret;
  506. }
  507. /*
  508. * this adds all existing backrefs (inline backrefs, backrefs and delayed
  509. * refs) for the given bytenr to the refs list, merges duplicates and resolves
  510. * indirect refs to their parent bytenr.
  511. * When roots are found, they're added to the roots list
  512. *
  513. * FIXME some caching might speed things up
  514. */
  515. static int find_parent_nodes(struct btrfs_trans_handle *trans,
  516. struct btrfs_fs_info *fs_info, u64 bytenr,
  517. u64 seq, struct ulist *refs, struct ulist *roots)
  518. {
  519. struct btrfs_key key;
  520. struct btrfs_path *path;
  521. struct btrfs_key info_key = { 0 };
  522. struct btrfs_delayed_ref_root *delayed_refs = NULL;
  523. struct btrfs_delayed_ref_head *head;
  524. int info_level = 0;
  525. int ret;
  526. int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
  527. struct list_head prefs_delayed;
  528. struct list_head prefs;
  529. struct __prelim_ref *ref;
  530. INIT_LIST_HEAD(&prefs);
  531. INIT_LIST_HEAD(&prefs_delayed);
  532. key.objectid = bytenr;
  533. key.type = BTRFS_EXTENT_ITEM_KEY;
  534. key.offset = (u64)-1;
  535. path = btrfs_alloc_path();
  536. if (!path)
  537. return -ENOMEM;
  538. path->search_commit_root = !!search_commit_root;
  539. /*
  540. * grab both a lock on the path and a lock on the delayed ref head.
  541. * We need both to get a consistent picture of how the refs look
  542. * at a specified point in time
  543. */
  544. again:
  545. head = NULL;
  546. ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
  547. if (ret < 0)
  548. goto out;
  549. BUG_ON(ret == 0);
  550. if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
  551. /*
  552. * look if there are updates for this ref queued and lock the
  553. * head
  554. */
  555. delayed_refs = &trans->transaction->delayed_refs;
  556. spin_lock(&delayed_refs->lock);
  557. head = btrfs_find_delayed_ref_head(trans, bytenr);
  558. if (head) {
  559. if (!mutex_trylock(&head->mutex)) {
  560. atomic_inc(&head->node.refs);
  561. spin_unlock(&delayed_refs->lock);
  562. btrfs_release_path(path);
  563. /*
  564. * Mutex was contended, block until it's
  565. * released and try again
  566. */
  567. mutex_lock(&head->mutex);
  568. mutex_unlock(&head->mutex);
  569. btrfs_put_delayed_ref(&head->node);
  570. goto again;
  571. }
  572. ret = __add_delayed_refs(head, seq, &info_key,
  573. &prefs_delayed);
  574. if (ret) {
  575. spin_unlock(&delayed_refs->lock);
  576. goto out;
  577. }
  578. }
  579. spin_unlock(&delayed_refs->lock);
  580. }
  581. if (path->slots[0]) {
  582. struct extent_buffer *leaf;
  583. int slot;
  584. leaf = path->nodes[0];
  585. slot = path->slots[0] - 1;
  586. btrfs_item_key_to_cpu(leaf, &key, slot);
  587. if (key.objectid == bytenr &&
  588. key.type == BTRFS_EXTENT_ITEM_KEY) {
  589. ret = __add_inline_refs(fs_info, path, bytenr,
  590. &info_key, &info_level, &prefs);
  591. if (ret)
  592. goto out;
  593. ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
  594. info_level, &prefs);
  595. if (ret)
  596. goto out;
  597. }
  598. }
  599. btrfs_release_path(path);
  600. /*
  601. * when adding the delayed refs above, the info_key might not have
  602. * been known yet. Go over the list and replace the missing keys
  603. */
  604. list_for_each_entry(ref, &prefs_delayed, list) {
  605. if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
  606. memcpy(&ref->key, &info_key, sizeof(ref->key));
  607. }
  608. list_splice_init(&prefs_delayed, &prefs);
  609. ret = __merge_refs(&prefs, 1);
  610. if (ret)
  611. goto out;
  612. ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
  613. if (ret)
  614. goto out;
  615. ret = __merge_refs(&prefs, 2);
  616. if (ret)
  617. goto out;
  618. while (!list_empty(&prefs)) {
  619. ref = list_first_entry(&prefs, struct __prelim_ref, list);
  620. list_del(&ref->list);
  621. if (ref->count < 0)
  622. WARN_ON(1);
  623. if (ref->count && ref->root_id && ref->parent == 0) {
  624. /* no parent == root of tree */
  625. ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
  626. BUG_ON(ret < 0);
  627. }
  628. if (ref->count && ref->parent) {
  629. ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
  630. BUG_ON(ret < 0);
  631. }
  632. kfree(ref);
  633. }
  634. out:
  635. if (head)
  636. mutex_unlock(&head->mutex);
  637. btrfs_free_path(path);
  638. while (!list_empty(&prefs)) {
  639. ref = list_first_entry(&prefs, struct __prelim_ref, list);
  640. list_del(&ref->list);
  641. kfree(ref);
  642. }
  643. while (!list_empty(&prefs_delayed)) {
  644. ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
  645. list);
  646. list_del(&ref->list);
  647. kfree(ref);
  648. }
  649. return ret;
  650. }
  651. /*
  652. * Finds all leafs with a reference to the specified combination of bytenr and
  653. * offset. key_list_head will point to a list of corresponding keys (caller must
  654. * free each list element). The leafs will be stored in the leafs ulist, which
  655. * must be freed with ulist_free.
  656. *
  657. * returns 0 on success, <0 on error
  658. */
  659. static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
  660. struct btrfs_fs_info *fs_info, u64 bytenr,
  661. u64 num_bytes, u64 seq, struct ulist **leafs)
  662. {
  663. struct ulist *tmp;
  664. int ret;
  665. tmp = ulist_alloc(GFP_NOFS);
  666. if (!tmp)
  667. return -ENOMEM;
  668. *leafs = ulist_alloc(GFP_NOFS);
  669. if (!*leafs) {
  670. ulist_free(tmp);
  671. return -ENOMEM;
  672. }
  673. ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
  674. ulist_free(tmp);
  675. if (ret < 0 && ret != -ENOENT) {
  676. ulist_free(*leafs);
  677. return ret;
  678. }
  679. return 0;
  680. }
  681. /*
  682. * walk all backrefs for a given extent to find all roots that reference this
  683. * extent. Walking a backref means finding all extents that reference this
  684. * extent and in turn walk the backrefs of those, too. Naturally this is a
  685. * recursive process, but here it is implemented in an iterative fashion: We
  686. * find all referencing extents for the extent in question and put them on a
  687. * list. In turn, we find all referencing extents for those, further appending
  688. * to the list. The way we iterate the list allows adding more elements after
  689. * the current while iterating. The process stops when we reach the end of the
  690. * list. Found roots are added to the roots list.
  691. *
  692. * returns 0 on success, < 0 on error.
  693. */
  694. int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
  695. struct btrfs_fs_info *fs_info, u64 bytenr,
  696. u64 num_bytes, u64 seq, struct ulist **roots)
  697. {
  698. struct ulist *tmp;
  699. struct ulist_node *node = NULL;
  700. struct ulist_iterator uiter;
  701. int ret;
  702. tmp = ulist_alloc(GFP_NOFS);
  703. if (!tmp)
  704. return -ENOMEM;
  705. *roots = ulist_alloc(GFP_NOFS);
  706. if (!*roots) {
  707. ulist_free(tmp);
  708. return -ENOMEM;
  709. }
  710. ULIST_ITER_INIT(&uiter);
  711. while (1) {
  712. ret = find_parent_nodes(trans, fs_info, bytenr, seq,
  713. tmp, *roots);
  714. if (ret < 0 && ret != -ENOENT) {
  715. ulist_free(tmp);
  716. ulist_free(*roots);
  717. return ret;
  718. }
  719. node = ulist_next(tmp, &uiter);
  720. if (!node)
  721. break;
  722. bytenr = node->val;
  723. }
  724. ulist_free(tmp);
  725. return 0;
  726. }
  727. static int __inode_info(u64 inum, u64 ioff, u8 key_type,
  728. struct btrfs_root *fs_root, struct btrfs_path *path,
  729. struct btrfs_key *found_key)
  730. {
  731. int ret;
  732. struct btrfs_key key;
  733. struct extent_buffer *eb;
  734. key.type = key_type;
  735. key.objectid = inum;
  736. key.offset = ioff;
  737. ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
  738. if (ret < 0)
  739. return ret;
  740. eb = path->nodes[0];
  741. if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
  742. ret = btrfs_next_leaf(fs_root, path);
  743. if (ret)
  744. return ret;
  745. eb = path->nodes[0];
  746. }
  747. btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
  748. if (found_key->type != key.type || found_key->objectid != key.objectid)
  749. return 1;
  750. return 0;
  751. }
  752. /*
  753. * this makes the path point to (inum INODE_ITEM ioff)
  754. */
  755. int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
  756. struct btrfs_path *path)
  757. {
  758. struct btrfs_key key;
  759. return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
  760. &key);
  761. }
  762. static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
  763. struct btrfs_path *path,
  764. struct btrfs_key *found_key)
  765. {
  766. return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
  767. found_key);
  768. }
  769. /*
  770. * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
  771. * of the path are separated by '/' and the path is guaranteed to be
  772. * 0-terminated. the path is only given within the current file system.
  773. * Therefore, it never starts with a '/'. the caller is responsible to provide
  774. * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
  775. * the start point of the resulting string is returned. this pointer is within
  776. * dest, normally.
  777. * in case the path buffer would overflow, the pointer is decremented further
  778. * as if output was written to the buffer, though no more output is actually
  779. * generated. that way, the caller can determine how much space would be
  780. * required for the path to fit into the buffer. in that case, the returned
  781. * value will be smaller than dest. callers must check this!
  782. */
  783. static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
  784. struct btrfs_inode_ref *iref,
  785. struct extent_buffer *eb_in, u64 parent,
  786. char *dest, u32 size)
  787. {
  788. u32 len;
  789. int slot;
  790. u64 next_inum;
  791. int ret;
  792. s64 bytes_left = size - 1;
  793. struct extent_buffer *eb = eb_in;
  794. struct btrfs_key found_key;
  795. int leave_spinning = path->leave_spinning;
  796. if (bytes_left >= 0)
  797. dest[bytes_left] = '\0';
  798. path->leave_spinning = 1;
  799. while (1) {
  800. len = btrfs_inode_ref_name_len(eb, iref);
  801. bytes_left -= len;
  802. if (bytes_left >= 0)
  803. read_extent_buffer(eb, dest + bytes_left,
  804. (unsigned long)(iref + 1), len);
  805. if (eb != eb_in) {
  806. btrfs_tree_read_unlock_blocking(eb);
  807. free_extent_buffer(eb);
  808. }
  809. ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
  810. if (ret > 0)
  811. ret = -ENOENT;
  812. if (ret)
  813. break;
  814. next_inum = found_key.offset;
  815. /* regular exit ahead */
  816. if (parent == next_inum)
  817. break;
  818. slot = path->slots[0];
  819. eb = path->nodes[0];
  820. /* make sure we can use eb after releasing the path */
  821. if (eb != eb_in) {
  822. atomic_inc(&eb->refs);
  823. btrfs_tree_read_lock(eb);
  824. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  825. }
  826. btrfs_release_path(path);
  827. iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
  828. parent = next_inum;
  829. --bytes_left;
  830. if (bytes_left >= 0)
  831. dest[bytes_left] = '/';
  832. }
  833. btrfs_release_path(path);
  834. path->leave_spinning = leave_spinning;
  835. if (ret)
  836. return ERR_PTR(ret);
  837. return dest + bytes_left;
  838. }
  839. /*
  840. * this makes the path point to (logical EXTENT_ITEM *)
  841. * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
  842. * tree blocks and <0 on error.
  843. */
  844. int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
  845. struct btrfs_path *path, struct btrfs_key *found_key)
  846. {
  847. int ret;
  848. u64 flags;
  849. u32 item_size;
  850. struct extent_buffer *eb;
  851. struct btrfs_extent_item *ei;
  852. struct btrfs_key key;
  853. key.type = BTRFS_EXTENT_ITEM_KEY;
  854. key.objectid = logical;
  855. key.offset = (u64)-1;
  856. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  857. if (ret < 0)
  858. return ret;
  859. ret = btrfs_previous_item(fs_info->extent_root, path,
  860. 0, BTRFS_EXTENT_ITEM_KEY);
  861. if (ret < 0)
  862. return ret;
  863. btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
  864. if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
  865. found_key->objectid > logical ||
  866. found_key->objectid + found_key->offset <= logical) {
  867. pr_debug("logical %llu is not within any extent\n",
  868. (unsigned long long)logical);
  869. return -ENOENT;
  870. }
  871. eb = path->nodes[0];
  872. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  873. BUG_ON(item_size < sizeof(*ei));
  874. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  875. flags = btrfs_extent_flags(eb, ei);
  876. pr_debug("logical %llu is at position %llu within the extent (%llu "
  877. "EXTENT_ITEM %llu) flags %#llx size %u\n",
  878. (unsigned long long)logical,
  879. (unsigned long long)(logical - found_key->objectid),
  880. (unsigned long long)found_key->objectid,
  881. (unsigned long long)found_key->offset,
  882. (unsigned long long)flags, item_size);
  883. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  884. return BTRFS_EXTENT_FLAG_TREE_BLOCK;
  885. if (flags & BTRFS_EXTENT_FLAG_DATA)
  886. return BTRFS_EXTENT_FLAG_DATA;
  887. return -EIO;
  888. }
  889. /*
  890. * helper function to iterate extent inline refs. ptr must point to a 0 value
  891. * for the first call and may be modified. it is used to track state.
  892. * if more refs exist, 0 is returned and the next call to
  893. * __get_extent_inline_ref must pass the modified ptr parameter to get the
  894. * next ref. after the last ref was processed, 1 is returned.
  895. * returns <0 on error
  896. */
  897. static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
  898. struct btrfs_extent_item *ei, u32 item_size,
  899. struct btrfs_extent_inline_ref **out_eiref,
  900. int *out_type)
  901. {
  902. unsigned long end;
  903. u64 flags;
  904. struct btrfs_tree_block_info *info;
  905. if (!*ptr) {
  906. /* first call */
  907. flags = btrfs_extent_flags(eb, ei);
  908. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  909. info = (struct btrfs_tree_block_info *)(ei + 1);
  910. *out_eiref =
  911. (struct btrfs_extent_inline_ref *)(info + 1);
  912. } else {
  913. *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
  914. }
  915. *ptr = (unsigned long)*out_eiref;
  916. if ((void *)*ptr >= (void *)ei + item_size)
  917. return -ENOENT;
  918. }
  919. end = (unsigned long)ei + item_size;
  920. *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
  921. *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
  922. *ptr += btrfs_extent_inline_ref_size(*out_type);
  923. WARN_ON(*ptr > end);
  924. if (*ptr == end)
  925. return 1; /* last */
  926. return 0;
  927. }
  928. /*
  929. * reads the tree block backref for an extent. tree level and root are returned
  930. * through out_level and out_root. ptr must point to a 0 value for the first
  931. * call and may be modified (see __get_extent_inline_ref comment).
  932. * returns 0 if data was provided, 1 if there was no more data to provide or
  933. * <0 on error.
  934. */
  935. int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
  936. struct btrfs_extent_item *ei, u32 item_size,
  937. u64 *out_root, u8 *out_level)
  938. {
  939. int ret;
  940. int type;
  941. struct btrfs_tree_block_info *info;
  942. struct btrfs_extent_inline_ref *eiref;
  943. if (*ptr == (unsigned long)-1)
  944. return 1;
  945. while (1) {
  946. ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
  947. &eiref, &type);
  948. if (ret < 0)
  949. return ret;
  950. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  951. type == BTRFS_SHARED_BLOCK_REF_KEY)
  952. break;
  953. if (ret == 1)
  954. return 1;
  955. }
  956. /* we can treat both ref types equally here */
  957. info = (struct btrfs_tree_block_info *)(ei + 1);
  958. *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
  959. *out_level = btrfs_tree_block_level(eb, info);
  960. if (ret == 1)
  961. *ptr = (unsigned long)-1;
  962. return 0;
  963. }
  964. static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
  965. u64 orig_extent_item_objectid,
  966. u64 extent_item_pos, u64 root,
  967. iterate_extent_inodes_t *iterate, void *ctx)
  968. {
  969. u64 disk_byte;
  970. struct btrfs_key key;
  971. struct btrfs_file_extent_item *fi;
  972. struct extent_buffer *eb;
  973. int slot;
  974. int nritems;
  975. int ret = 0;
  976. int extent_type;
  977. u64 data_offset;
  978. u64 data_len;
  979. eb = read_tree_block(fs_info->tree_root, logical,
  980. fs_info->tree_root->leafsize, 0);
  981. if (!eb)
  982. return -EIO;
  983. /*
  984. * from the shared data ref, we only have the leaf but we need
  985. * the key. thus, we must look into all items and see that we
  986. * find one (some) with a reference to our extent item.
  987. */
  988. nritems = btrfs_header_nritems(eb);
  989. for (slot = 0; slot < nritems; ++slot) {
  990. btrfs_item_key_to_cpu(eb, &key, slot);
  991. if (key.type != BTRFS_EXTENT_DATA_KEY)
  992. continue;
  993. fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  994. extent_type = btrfs_file_extent_type(eb, fi);
  995. if (extent_type == BTRFS_FILE_EXTENT_INLINE)
  996. continue;
  997. /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
  998. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  999. if (disk_byte != orig_extent_item_objectid)
  1000. continue;
  1001. data_offset = btrfs_file_extent_offset(eb, fi);
  1002. data_len = btrfs_file_extent_num_bytes(eb, fi);
  1003. if (extent_item_pos < data_offset ||
  1004. extent_item_pos >= data_offset + data_len)
  1005. continue;
  1006. pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
  1007. "root %llu\n", orig_extent_item_objectid,
  1008. key.objectid, key.offset, root);
  1009. ret = iterate(key.objectid,
  1010. key.offset + (extent_item_pos - data_offset),
  1011. root, ctx);
  1012. if (ret) {
  1013. pr_debug("stopping iteration because ret=%d\n", ret);
  1014. break;
  1015. }
  1016. }
  1017. free_extent_buffer(eb);
  1018. return ret;
  1019. }
  1020. /*
  1021. * calls iterate() for every inode that references the extent identified by
  1022. * the given parameters.
  1023. * when the iterator function returns a non-zero value, iteration stops.
  1024. */
  1025. int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
  1026. u64 extent_item_objectid, u64 extent_item_pos,
  1027. int search_commit_root,
  1028. iterate_extent_inodes_t *iterate, void *ctx)
  1029. {
  1030. int ret;
  1031. struct list_head data_refs = LIST_HEAD_INIT(data_refs);
  1032. struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
  1033. struct btrfs_trans_handle *trans;
  1034. struct ulist *refs = NULL;
  1035. struct ulist *roots = NULL;
  1036. struct ulist_node *ref_node = NULL;
  1037. struct ulist_node *root_node = NULL;
  1038. struct seq_list seq_elem;
  1039. struct ulist_iterator ref_uiter;
  1040. struct ulist_iterator root_uiter;
  1041. struct btrfs_delayed_ref_root *delayed_refs = NULL;
  1042. pr_debug("resolving all inodes for extent %llu\n",
  1043. extent_item_objectid);
  1044. if (search_commit_root) {
  1045. trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
  1046. } else {
  1047. trans = btrfs_join_transaction(fs_info->extent_root);
  1048. if (IS_ERR(trans))
  1049. return PTR_ERR(trans);
  1050. delayed_refs = &trans->transaction->delayed_refs;
  1051. spin_lock(&delayed_refs->lock);
  1052. btrfs_get_delayed_seq(delayed_refs, &seq_elem);
  1053. spin_unlock(&delayed_refs->lock);
  1054. }
  1055. ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
  1056. extent_item_pos, seq_elem.seq,
  1057. &refs);
  1058. if (ret)
  1059. goto out;
  1060. ULIST_ITER_INIT(&ref_uiter);
  1061. while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
  1062. ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1,
  1063. seq_elem.seq, &roots);
  1064. if (ret)
  1065. break;
  1066. ULIST_ITER_INIT(&root_uiter);
  1067. while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
  1068. pr_debug("root %llu references leaf %llu\n",
  1069. root_node->val, ref_node->val);
  1070. ret = iterate_leaf_refs(fs_info, ref_node->val,
  1071. extent_item_objectid,
  1072. extent_item_pos, root_node->val,
  1073. iterate, ctx);
  1074. }
  1075. }
  1076. ulist_free(refs);
  1077. ulist_free(roots);
  1078. out:
  1079. if (!search_commit_root) {
  1080. btrfs_put_delayed_seq(delayed_refs, &seq_elem);
  1081. btrfs_end_transaction(trans, fs_info->extent_root);
  1082. }
  1083. return ret;
  1084. }
  1085. int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
  1086. struct btrfs_path *path,
  1087. iterate_extent_inodes_t *iterate, void *ctx)
  1088. {
  1089. int ret;
  1090. u64 extent_item_pos;
  1091. struct btrfs_key found_key;
  1092. int search_commit_root = path->search_commit_root;
  1093. ret = extent_from_logical(fs_info, logical, path,
  1094. &found_key);
  1095. btrfs_release_path(path);
  1096. if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  1097. ret = -EINVAL;
  1098. if (ret < 0)
  1099. return ret;
  1100. extent_item_pos = logical - found_key.objectid;
  1101. ret = iterate_extent_inodes(fs_info, found_key.objectid,
  1102. extent_item_pos, search_commit_root,
  1103. iterate, ctx);
  1104. return ret;
  1105. }
  1106. static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
  1107. struct btrfs_path *path,
  1108. iterate_irefs_t *iterate, void *ctx)
  1109. {
  1110. int ret = 0;
  1111. int slot;
  1112. u32 cur;
  1113. u32 len;
  1114. u32 name_len;
  1115. u64 parent = 0;
  1116. int found = 0;
  1117. struct extent_buffer *eb;
  1118. struct btrfs_item *item;
  1119. struct btrfs_inode_ref *iref;
  1120. struct btrfs_key found_key;
  1121. while (!ret) {
  1122. path->leave_spinning = 1;
  1123. ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
  1124. &found_key);
  1125. if (ret < 0)
  1126. break;
  1127. if (ret) {
  1128. ret = found ? 0 : -ENOENT;
  1129. break;
  1130. }
  1131. ++found;
  1132. parent = found_key.offset;
  1133. slot = path->slots[0];
  1134. eb = path->nodes[0];
  1135. /* make sure we can use eb after releasing the path */
  1136. atomic_inc(&eb->refs);
  1137. btrfs_tree_read_lock(eb);
  1138. btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
  1139. btrfs_release_path(path);
  1140. item = btrfs_item_nr(eb, slot);
  1141. iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
  1142. for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
  1143. name_len = btrfs_inode_ref_name_len(eb, iref);
  1144. /* path must be released before calling iterate()! */
  1145. pr_debug("following ref at offset %u for inode %llu in "
  1146. "tree %llu\n", cur,
  1147. (unsigned long long)found_key.objectid,
  1148. (unsigned long long)fs_root->objectid);
  1149. ret = iterate(parent, iref, eb, ctx);
  1150. if (ret)
  1151. break;
  1152. len = sizeof(*iref) + name_len;
  1153. iref = (struct btrfs_inode_ref *)((char *)iref + len);
  1154. }
  1155. btrfs_tree_read_unlock_blocking(eb);
  1156. free_extent_buffer(eb);
  1157. }
  1158. btrfs_release_path(path);
  1159. return ret;
  1160. }
  1161. /*
  1162. * returns 0 if the path could be dumped (probably truncated)
  1163. * returns <0 in case of an error
  1164. */
  1165. static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
  1166. struct extent_buffer *eb, void *ctx)
  1167. {
  1168. struct inode_fs_paths *ipath = ctx;
  1169. char *fspath;
  1170. char *fspath_min;
  1171. int i = ipath->fspath->elem_cnt;
  1172. const int s_ptr = sizeof(char *);
  1173. u32 bytes_left;
  1174. bytes_left = ipath->fspath->bytes_left > s_ptr ?
  1175. ipath->fspath->bytes_left - s_ptr : 0;
  1176. fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
  1177. fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
  1178. inum, fspath_min, bytes_left);
  1179. if (IS_ERR(fspath))
  1180. return PTR_ERR(fspath);
  1181. if (fspath > fspath_min) {
  1182. pr_debug("path resolved: %s\n", fspath);
  1183. ipath->fspath->val[i] = (u64)(unsigned long)fspath;
  1184. ++ipath->fspath->elem_cnt;
  1185. ipath->fspath->bytes_left = fspath - fspath_min;
  1186. } else {
  1187. pr_debug("missed path, not enough space. missing bytes: %lu, "
  1188. "constructed so far: %s\n",
  1189. (unsigned long)(fspath_min - fspath), fspath_min);
  1190. ++ipath->fspath->elem_missed;
  1191. ipath->fspath->bytes_missing += fspath_min - fspath;
  1192. ipath->fspath->bytes_left = 0;
  1193. }
  1194. return 0;
  1195. }
  1196. /*
  1197. * this dumps all file system paths to the inode into the ipath struct, provided
  1198. * is has been created large enough. each path is zero-terminated and accessed
  1199. * from ipath->fspath->val[i].
  1200. * when it returns, there are ipath->fspath->elem_cnt number of paths available
  1201. * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
  1202. * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
  1203. * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
  1204. * have been needed to return all paths.
  1205. */
  1206. int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
  1207. {
  1208. return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
  1209. inode_to_path, ipath);
  1210. }
  1211. struct btrfs_data_container *init_data_container(u32 total_bytes)
  1212. {
  1213. struct btrfs_data_container *data;
  1214. size_t alloc_bytes;
  1215. alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
  1216. data = kmalloc(alloc_bytes, GFP_NOFS);
  1217. if (!data)
  1218. return ERR_PTR(-ENOMEM);
  1219. if (total_bytes >= sizeof(*data)) {
  1220. data->bytes_left = total_bytes - sizeof(*data);
  1221. data->bytes_missing = 0;
  1222. } else {
  1223. data->bytes_missing = sizeof(*data) - total_bytes;
  1224. data->bytes_left = 0;
  1225. }
  1226. data->elem_cnt = 0;
  1227. data->elem_missed = 0;
  1228. return data;
  1229. }
  1230. /*
  1231. * allocates space to return multiple file system paths for an inode.
  1232. * total_bytes to allocate are passed, note that space usable for actual path
  1233. * information will be total_bytes - sizeof(struct inode_fs_paths).
  1234. * the returned pointer must be freed with free_ipath() in the end.
  1235. */
  1236. struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
  1237. struct btrfs_path *path)
  1238. {
  1239. struct inode_fs_paths *ifp;
  1240. struct btrfs_data_container *fspath;
  1241. fspath = init_data_container(total_bytes);
  1242. if (IS_ERR(fspath))
  1243. return (void *)fspath;
  1244. ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
  1245. if (!ifp) {
  1246. kfree(fspath);
  1247. return ERR_PTR(-ENOMEM);
  1248. }
  1249. ifp->btrfs_path = path;
  1250. ifp->fspath = fspath;
  1251. ifp->fs_root = fs_root;
  1252. return ifp;
  1253. }
  1254. void free_ipath(struct inode_fs_paths *ipath)
  1255. {
  1256. if (!ipath)
  1257. return;
  1258. kfree(ipath->fspath);
  1259. kfree(ipath);
  1260. }