backref.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright (C) 2011 STRATO. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include "ctree.h"
  19. #include "disk-io.h"
  20. #include "backref.h"
  21. struct __data_ref {
  22. struct list_head list;
  23. u64 inum;
  24. u64 root;
  25. u64 extent_data_item_offset;
  26. };
  27. struct __shared_ref {
  28. struct list_head list;
  29. u64 disk_byte;
  30. };
  31. static int __inode_info(u64 inum, u64 ioff, u8 key_type,
  32. struct btrfs_root *fs_root, struct btrfs_path *path,
  33. struct btrfs_key *found_key)
  34. {
  35. int ret;
  36. struct btrfs_key key;
  37. struct extent_buffer *eb;
  38. key.type = key_type;
  39. key.objectid = inum;
  40. key.offset = ioff;
  41. ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
  42. if (ret < 0)
  43. return ret;
  44. eb = path->nodes[0];
  45. if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
  46. ret = btrfs_next_leaf(fs_root, path);
  47. if (ret)
  48. return ret;
  49. eb = path->nodes[0];
  50. }
  51. btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
  52. if (found_key->type != key.type || found_key->objectid != key.objectid)
  53. return 1;
  54. return 0;
  55. }
  56. /*
  57. * this makes the path point to (inum INODE_ITEM ioff)
  58. */
  59. int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
  60. struct btrfs_path *path)
  61. {
  62. struct btrfs_key key;
  63. return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
  64. &key);
  65. }
  66. static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
  67. struct btrfs_path *path,
  68. struct btrfs_key *found_key)
  69. {
  70. return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
  71. found_key);
  72. }
  73. /*
  74. * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
  75. * of the path are separated by '/' and the path is guaranteed to be
  76. * 0-terminated. the path is only given within the current file system.
  77. * Therefore, it never starts with a '/'. the caller is responsible to provide
  78. * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
  79. * the start point of the resulting string is returned. this pointer is within
  80. * dest, normally.
  81. * in case the path buffer would overflow, the pointer is decremented further
  82. * as if output was written to the buffer, though no more output is actually
  83. * generated. that way, the caller can determine how much space would be
  84. * required for the path to fit into the buffer. in that case, the returned
  85. * value will be smaller than dest. callers must check this!
  86. */
  87. static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
  88. struct btrfs_inode_ref *iref,
  89. struct extent_buffer *eb_in, u64 parent,
  90. char *dest, u32 size)
  91. {
  92. u32 len;
  93. int slot;
  94. u64 next_inum;
  95. int ret;
  96. s64 bytes_left = size - 1;
  97. struct extent_buffer *eb = eb_in;
  98. struct btrfs_key found_key;
  99. if (bytes_left >= 0)
  100. dest[bytes_left] = '\0';
  101. while (1) {
  102. len = btrfs_inode_ref_name_len(eb, iref);
  103. bytes_left -= len;
  104. if (bytes_left >= 0)
  105. read_extent_buffer(eb, dest + bytes_left,
  106. (unsigned long)(iref + 1), len);
  107. if (eb != eb_in)
  108. free_extent_buffer(eb);
  109. ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
  110. if (ret)
  111. break;
  112. next_inum = found_key.offset;
  113. /* regular exit ahead */
  114. if (parent == next_inum)
  115. break;
  116. slot = path->slots[0];
  117. eb = path->nodes[0];
  118. /* make sure we can use eb after releasing the path */
  119. if (eb != eb_in)
  120. atomic_inc(&eb->refs);
  121. btrfs_release_path(path);
  122. iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
  123. parent = next_inum;
  124. --bytes_left;
  125. if (bytes_left >= 0)
  126. dest[bytes_left] = '/';
  127. }
  128. btrfs_release_path(path);
  129. if (ret)
  130. return ERR_PTR(ret);
  131. return dest + bytes_left;
  132. }
  133. /*
  134. * this makes the path point to (logical EXTENT_ITEM *)
  135. * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
  136. * tree blocks and <0 on error.
  137. */
  138. int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
  139. struct btrfs_path *path, struct btrfs_key *found_key)
  140. {
  141. int ret;
  142. u64 flags;
  143. u32 item_size;
  144. struct extent_buffer *eb;
  145. struct btrfs_extent_item *ei;
  146. struct btrfs_key key;
  147. key.type = BTRFS_EXTENT_ITEM_KEY;
  148. key.objectid = logical;
  149. key.offset = (u64)-1;
  150. ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
  151. if (ret < 0)
  152. return ret;
  153. ret = btrfs_previous_item(fs_info->extent_root, path,
  154. 0, BTRFS_EXTENT_ITEM_KEY);
  155. if (ret < 0)
  156. return ret;
  157. btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
  158. if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
  159. found_key->objectid > logical ||
  160. found_key->objectid + found_key->offset <= logical)
  161. return -ENOENT;
  162. eb = path->nodes[0];
  163. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  164. BUG_ON(item_size < sizeof(*ei));
  165. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  166. flags = btrfs_extent_flags(eb, ei);
  167. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  168. return BTRFS_EXTENT_FLAG_TREE_BLOCK;
  169. if (flags & BTRFS_EXTENT_FLAG_DATA)
  170. return BTRFS_EXTENT_FLAG_DATA;
  171. return -EIO;
  172. }
  173. /*
  174. * helper function to iterate extent inline refs. ptr must point to a 0 value
  175. * for the first call and may be modified. it is used to track state.
  176. * if more refs exist, 0 is returned and the next call to
  177. * __get_extent_inline_ref must pass the modified ptr parameter to get the
  178. * next ref. after the last ref was processed, 1 is returned.
  179. * returns <0 on error
  180. */
  181. static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
  182. struct btrfs_extent_item *ei, u32 item_size,
  183. struct btrfs_extent_inline_ref **out_eiref,
  184. int *out_type)
  185. {
  186. unsigned long end;
  187. u64 flags;
  188. struct btrfs_tree_block_info *info;
  189. if (!*ptr) {
  190. /* first call */
  191. flags = btrfs_extent_flags(eb, ei);
  192. if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
  193. info = (struct btrfs_tree_block_info *)(ei + 1);
  194. *out_eiref =
  195. (struct btrfs_extent_inline_ref *)(info + 1);
  196. } else {
  197. *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
  198. }
  199. *ptr = (unsigned long)*out_eiref;
  200. if ((void *)*ptr >= (void *)ei + item_size)
  201. return -ENOENT;
  202. }
  203. end = (unsigned long)ei + item_size;
  204. *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
  205. *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
  206. *ptr += btrfs_extent_inline_ref_size(*out_type);
  207. WARN_ON(*ptr > end);
  208. if (*ptr == end)
  209. return 1; /* last */
  210. return 0;
  211. }
  212. /*
  213. * reads the tree block backref for an extent. tree level and root are returned
  214. * through out_level and out_root. ptr must point to a 0 value for the first
  215. * call and may be modified (see __get_extent_inline_ref comment).
  216. * returns 0 if data was provided, 1 if there was no more data to provide or
  217. * <0 on error.
  218. */
  219. int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
  220. struct btrfs_extent_item *ei, u32 item_size,
  221. u64 *out_root, u8 *out_level)
  222. {
  223. int ret;
  224. int type;
  225. struct btrfs_tree_block_info *info;
  226. struct btrfs_extent_inline_ref *eiref;
  227. if (*ptr == (unsigned long)-1)
  228. return 1;
  229. while (1) {
  230. ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
  231. &eiref, &type);
  232. if (ret < 0)
  233. return ret;
  234. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  235. type == BTRFS_SHARED_BLOCK_REF_KEY)
  236. break;
  237. if (ret == 1)
  238. return 1;
  239. }
  240. /* we can treat both ref types equally here */
  241. info = (struct btrfs_tree_block_info *)(ei + 1);
  242. *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
  243. *out_level = btrfs_tree_block_level(eb, info);
  244. if (ret == 1)
  245. *ptr = (unsigned long)-1;
  246. return 0;
  247. }
  248. static int __data_list_add(struct list_head *head, u64 inum,
  249. u64 extent_data_item_offset, u64 root)
  250. {
  251. struct __data_ref *ref;
  252. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  253. if (!ref)
  254. return -ENOMEM;
  255. ref->inum = inum;
  256. ref->extent_data_item_offset = extent_data_item_offset;
  257. ref->root = root;
  258. list_add_tail(&ref->list, head);
  259. return 0;
  260. }
  261. static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
  262. struct btrfs_extent_data_ref *dref)
  263. {
  264. return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
  265. btrfs_extent_data_ref_offset(eb, dref),
  266. btrfs_extent_data_ref_root(eb, dref));
  267. }
  268. static int __shared_list_add(struct list_head *head, u64 disk_byte)
  269. {
  270. struct __shared_ref *ref;
  271. ref = kmalloc(sizeof(*ref), GFP_NOFS);
  272. if (!ref)
  273. return -ENOMEM;
  274. ref->disk_byte = disk_byte;
  275. list_add_tail(&ref->list, head);
  276. return 0;
  277. }
  278. static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
  279. u64 logical, u64 inum,
  280. u64 extent_data_item_offset,
  281. u64 extent_offset,
  282. struct btrfs_path *path,
  283. struct list_head *data_refs,
  284. iterate_extent_inodes_t *iterate,
  285. void *ctx)
  286. {
  287. u64 ref_root;
  288. u32 item_size;
  289. struct btrfs_key key;
  290. struct extent_buffer *eb;
  291. struct btrfs_extent_item *ei;
  292. struct btrfs_extent_inline_ref *eiref;
  293. struct __data_ref *ref;
  294. int ret;
  295. int type;
  296. int last;
  297. unsigned long ptr = 0;
  298. WARN_ON(!list_empty(data_refs));
  299. ret = extent_from_logical(fs_info, logical, path, &key);
  300. if (ret & BTRFS_EXTENT_FLAG_DATA)
  301. ret = -EIO;
  302. if (ret < 0)
  303. goto out;
  304. eb = path->nodes[0];
  305. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  306. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  307. ret = 0;
  308. ref_root = 0;
  309. /*
  310. * as done in iterate_extent_inodes, we first build a list of refs to
  311. * iterate, then free the path and then iterate them to avoid deadlocks.
  312. */
  313. do {
  314. last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
  315. &eiref, &type);
  316. if (last < 0) {
  317. ret = last;
  318. goto out;
  319. }
  320. if (type == BTRFS_TREE_BLOCK_REF_KEY ||
  321. type == BTRFS_SHARED_BLOCK_REF_KEY) {
  322. ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
  323. ret = __data_list_add(data_refs, inum,
  324. extent_data_item_offset,
  325. ref_root);
  326. }
  327. } while (!ret && !last);
  328. btrfs_release_path(path);
  329. if (ref_root == 0) {
  330. printk(KERN_ERR "btrfs: failed to find tree block ref "
  331. "for shared data backref %llu\n", logical);
  332. WARN_ON(1);
  333. ret = -EIO;
  334. }
  335. out:
  336. while (!list_empty(data_refs)) {
  337. ref = list_first_entry(data_refs, struct __data_ref, list);
  338. list_del(&ref->list);
  339. if (!ret)
  340. ret = iterate(ref->inum, extent_offset +
  341. ref->extent_data_item_offset,
  342. ref->root, ctx);
  343. kfree(ref);
  344. }
  345. return ret;
  346. }
  347. static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
  348. u64 logical, u64 orig_extent_item_objectid,
  349. u64 extent_offset, struct btrfs_path *path,
  350. struct list_head *data_refs,
  351. iterate_extent_inodes_t *iterate,
  352. void *ctx)
  353. {
  354. u64 disk_byte;
  355. struct btrfs_key key;
  356. struct btrfs_file_extent_item *fi;
  357. struct extent_buffer *eb;
  358. int slot;
  359. int nritems;
  360. int ret;
  361. int found = 0;
  362. eb = read_tree_block(fs_info->tree_root, logical,
  363. fs_info->tree_root->leafsize, 0);
  364. if (!eb)
  365. return -EIO;
  366. /*
  367. * from the shared data ref, we only have the leaf but we need
  368. * the key. thus, we must look into all items and see that we
  369. * find one (some) with a reference to our extent item.
  370. */
  371. nritems = btrfs_header_nritems(eb);
  372. for (slot = 0; slot < nritems; ++slot) {
  373. btrfs_item_key_to_cpu(eb, &key, slot);
  374. if (key.type != BTRFS_EXTENT_DATA_KEY)
  375. continue;
  376. fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  377. if (!fi) {
  378. free_extent_buffer(eb);
  379. return -EIO;
  380. }
  381. disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
  382. if (disk_byte != orig_extent_item_objectid) {
  383. if (found)
  384. break;
  385. else
  386. continue;
  387. }
  388. ++found;
  389. ret = __iter_shared_inline_ref_inodes(fs_info, logical,
  390. key.objectid,
  391. key.offset,
  392. extent_offset, path,
  393. data_refs,
  394. iterate, ctx);
  395. if (ret)
  396. break;
  397. }
  398. if (!found) {
  399. printk(KERN_ERR "btrfs: failed to follow shared data backref "
  400. "to parent %llu\n", logical);
  401. WARN_ON(1);
  402. ret = -EIO;
  403. }
  404. free_extent_buffer(eb);
  405. return ret;
  406. }
  407. /*
  408. * calls iterate() for every inode that references the extent identified by
  409. * the given parameters. will use the path given as a parameter and return it
  410. * released.
  411. * when the iterator function returns a non-zero value, iteration stops.
  412. */
  413. int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
  414. struct btrfs_path *path,
  415. u64 extent_item_objectid,
  416. u64 extent_offset,
  417. iterate_extent_inodes_t *iterate, void *ctx)
  418. {
  419. unsigned long ptr = 0;
  420. int last;
  421. int ret;
  422. int type;
  423. u64 logical;
  424. u32 item_size;
  425. struct btrfs_extent_inline_ref *eiref;
  426. struct btrfs_extent_data_ref *dref;
  427. struct extent_buffer *eb;
  428. struct btrfs_extent_item *ei;
  429. struct btrfs_key key;
  430. struct list_head data_refs = LIST_HEAD_INIT(data_refs);
  431. struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
  432. struct __data_ref *ref_d;
  433. struct __shared_ref *ref_s;
  434. eb = path->nodes[0];
  435. ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
  436. item_size = btrfs_item_size_nr(eb, path->slots[0]);
  437. /* first we iterate the inline refs, ... */
  438. do {
  439. last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
  440. &eiref, &type);
  441. if (last == -ENOENT) {
  442. ret = 0;
  443. break;
  444. }
  445. if (last < 0) {
  446. ret = last;
  447. break;
  448. }
  449. if (type == BTRFS_EXTENT_DATA_REF_KEY) {
  450. dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
  451. ret = __data_list_add_eb(&data_refs, eb, dref);
  452. } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
  453. logical = btrfs_extent_inline_ref_offset(eb, eiref);
  454. ret = __shared_list_add(&shared_refs, logical);
  455. }
  456. } while (!ret && !last);
  457. /* ... then we proceed to in-tree references and ... */
  458. while (!ret) {
  459. ++path->slots[0];
  460. if (path->slots[0] > btrfs_header_nritems(eb)) {
  461. ret = btrfs_next_leaf(fs_info->extent_root, path);
  462. if (ret) {
  463. if (ret == 1)
  464. ret = 0; /* we're done */
  465. break;
  466. }
  467. eb = path->nodes[0];
  468. }
  469. btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
  470. if (key.objectid != extent_item_objectid)
  471. break;
  472. if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
  473. dref = btrfs_item_ptr(eb, path->slots[0],
  474. struct btrfs_extent_data_ref);
  475. ret = __data_list_add_eb(&data_refs, eb, dref);
  476. } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
  477. ret = __shared_list_add(&shared_refs, key.offset);
  478. }
  479. }
  480. btrfs_release_path(path);
  481. /*
  482. * ... only at the very end we can process the refs we found. this is
  483. * because the iterator function we call is allowed to make tree lookups
  484. * and we have to avoid deadlocks. additionally, we need more tree
  485. * lookups ourselves for shared data refs.
  486. */
  487. while (!list_empty(&data_refs)) {
  488. ref_d = list_first_entry(&data_refs, struct __data_ref, list);
  489. list_del(&ref_d->list);
  490. if (!ret)
  491. ret = iterate(ref_d->inum, extent_offset +
  492. ref_d->extent_data_item_offset,
  493. ref_d->root, ctx);
  494. kfree(ref_d);
  495. }
  496. while (!list_empty(&shared_refs)) {
  497. ref_s = list_first_entry(&shared_refs, struct __shared_ref,
  498. list);
  499. list_del(&ref_s->list);
  500. if (!ret)
  501. ret = __iter_shared_inline_ref(fs_info,
  502. ref_s->disk_byte,
  503. extent_item_objectid,
  504. extent_offset, path,
  505. &data_refs,
  506. iterate, ctx);
  507. kfree(ref_s);
  508. }
  509. return ret;
  510. }
  511. int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
  512. struct btrfs_path *path,
  513. iterate_extent_inodes_t *iterate, void *ctx)
  514. {
  515. int ret;
  516. u64 offset;
  517. struct btrfs_key found_key;
  518. ret = extent_from_logical(fs_info, logical, path,
  519. &found_key);
  520. if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
  521. ret = -EINVAL;
  522. if (ret < 0)
  523. return ret;
  524. offset = logical - found_key.objectid;
  525. ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
  526. offset, iterate, ctx);
  527. return ret;
  528. }
  529. static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
  530. struct btrfs_path *path,
  531. iterate_irefs_t *iterate, void *ctx)
  532. {
  533. int ret;
  534. int slot;
  535. u32 cur;
  536. u32 len;
  537. u32 name_len;
  538. u64 parent = 0;
  539. int found = 0;
  540. struct extent_buffer *eb;
  541. struct btrfs_item *item;
  542. struct btrfs_inode_ref *iref;
  543. struct btrfs_key found_key;
  544. while (1) {
  545. ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
  546. &found_key);
  547. if (ret < 0)
  548. break;
  549. if (ret) {
  550. ret = found ? 0 : -ENOENT;
  551. break;
  552. }
  553. ++found;
  554. parent = found_key.offset;
  555. slot = path->slots[0];
  556. eb = path->nodes[0];
  557. /* make sure we can use eb after releasing the path */
  558. atomic_inc(&eb->refs);
  559. btrfs_release_path(path);
  560. item = btrfs_item_nr(eb, slot);
  561. iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
  562. for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
  563. name_len = btrfs_inode_ref_name_len(eb, iref);
  564. /* path must be released before calling iterate()! */
  565. ret = iterate(parent, iref, eb, ctx);
  566. if (ret) {
  567. free_extent_buffer(eb);
  568. break;
  569. }
  570. len = sizeof(*iref) + name_len;
  571. iref = (struct btrfs_inode_ref *)((char *)iref + len);
  572. }
  573. free_extent_buffer(eb);
  574. }
  575. btrfs_release_path(path);
  576. return ret;
  577. }
  578. /*
  579. * returns 0 if the path could be dumped (probably truncated)
  580. * returns <0 in case of an error
  581. */
  582. static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
  583. struct extent_buffer *eb, void *ctx)
  584. {
  585. struct inode_fs_paths *ipath = ctx;
  586. char *fspath;
  587. char *fspath_min;
  588. int i = ipath->fspath->elem_cnt;
  589. const int s_ptr = sizeof(char *);
  590. u32 bytes_left;
  591. bytes_left = ipath->fspath->bytes_left > s_ptr ?
  592. ipath->fspath->bytes_left - s_ptr : 0;
  593. fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
  594. fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
  595. inum, fspath_min, bytes_left);
  596. if (IS_ERR(fspath))
  597. return PTR_ERR(fspath);
  598. if (fspath > fspath_min) {
  599. ipath->fspath->val[i] = (u64)fspath;
  600. ++ipath->fspath->elem_cnt;
  601. ipath->fspath->bytes_left = fspath - fspath_min;
  602. } else {
  603. ++ipath->fspath->elem_missed;
  604. ipath->fspath->bytes_missing += fspath_min - fspath;
  605. ipath->fspath->bytes_left = 0;
  606. }
  607. return 0;
  608. }
  609. /*
  610. * this dumps all file system paths to the inode into the ipath struct, provided
  611. * is has been created large enough. each path is zero-terminated and accessed
  612. * from ipath->fspath->val[i].
  613. * when it returns, there are ipath->fspath->elem_cnt number of paths available
  614. * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
  615. * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
  616. * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
  617. * have been needed to return all paths.
  618. */
  619. int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
  620. {
  621. return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
  622. inode_to_path, ipath);
  623. }
  624. /*
  625. * allocates space to return multiple file system paths for an inode.
  626. * total_bytes to allocate are passed, note that space usable for actual path
  627. * information will be total_bytes - sizeof(struct inode_fs_paths).
  628. * the returned pointer must be freed with free_ipath() in the end.
  629. */
  630. struct btrfs_data_container *init_data_container(u32 total_bytes)
  631. {
  632. struct btrfs_data_container *data;
  633. size_t alloc_bytes;
  634. alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
  635. data = kmalloc(alloc_bytes, GFP_NOFS);
  636. if (!data)
  637. return ERR_PTR(-ENOMEM);
  638. if (total_bytes >= sizeof(*data)) {
  639. data->bytes_left = total_bytes - sizeof(*data);
  640. data->bytes_missing = 0;
  641. } else {
  642. data->bytes_missing = sizeof(*data) - total_bytes;
  643. data->bytes_left = 0;
  644. }
  645. data->elem_cnt = 0;
  646. data->elem_missed = 0;
  647. return data;
  648. }
  649. /*
  650. * allocates space to return multiple file system paths for an inode.
  651. * total_bytes to allocate are passed, note that space usable for actual path
  652. * information will be total_bytes - sizeof(struct inode_fs_paths).
  653. * the returned pointer must be freed with free_ipath() in the end.
  654. */
  655. struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
  656. struct btrfs_path *path)
  657. {
  658. struct inode_fs_paths *ifp;
  659. struct btrfs_data_container *fspath;
  660. fspath = init_data_container(total_bytes);
  661. if (IS_ERR(fspath))
  662. return (void *)fspath;
  663. ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
  664. if (!ifp) {
  665. kfree(fspath);
  666. return ERR_PTR(-ENOMEM);
  667. }
  668. ifp->btrfs_path = path;
  669. ifp->fspath = fspath;
  670. ifp->fs_root = fs_root;
  671. return ifp;
  672. }
  673. void free_ipath(struct inode_fs_paths *ipath)
  674. {
  675. kfree(ipath);
  676. }