extent_map.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * extent_map.c
  5. *
  6. * Block/Cluster mapping functions
  7. *
  8. * Copyright (C) 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License, version 2, as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public
  20. * License along with this program; if not, write to the
  21. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  22. * Boston, MA 021110-1307, USA.
  23. */
  24. #include <linux/fs.h>
  25. #include <linux/init.h>
  26. #include <linux/types.h>
  27. #include <linux/fiemap.h>
  28. #define MLOG_MASK_PREFIX ML_EXTENT_MAP
  29. #include <cluster/masklog.h>
  30. #include "ocfs2.h"
  31. #include "alloc.h"
  32. #include "dlmglue.h"
  33. #include "extent_map.h"
  34. #include "inode.h"
  35. #include "super.h"
  36. #include "symlink.h"
  37. #include "buffer_head_io.h"
  38. /*
  39. * The extent caching implementation is intentionally trivial.
  40. *
  41. * We only cache a small number of extents stored directly on the
  42. * inode, so linear order operations are acceptable. If we ever want
  43. * to increase the size of the extent map, then these algorithms must
  44. * get smarter.
  45. */
  46. void ocfs2_extent_map_init(struct inode *inode)
  47. {
  48. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  49. oi->ip_extent_map.em_num_items = 0;
  50. INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
  51. }
  52. static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
  53. unsigned int cpos,
  54. struct ocfs2_extent_map_item **ret_emi)
  55. {
  56. unsigned int range;
  57. struct ocfs2_extent_map_item *emi;
  58. *ret_emi = NULL;
  59. list_for_each_entry(emi, &em->em_list, ei_list) {
  60. range = emi->ei_cpos + emi->ei_clusters;
  61. if (cpos >= emi->ei_cpos && cpos < range) {
  62. list_move(&emi->ei_list, &em->em_list);
  63. *ret_emi = emi;
  64. break;
  65. }
  66. }
  67. }
  68. static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
  69. unsigned int *phys, unsigned int *len,
  70. unsigned int *flags)
  71. {
  72. unsigned int coff;
  73. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  74. struct ocfs2_extent_map_item *emi;
  75. spin_lock(&oi->ip_lock);
  76. __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
  77. if (emi) {
  78. coff = cpos - emi->ei_cpos;
  79. *phys = emi->ei_phys + coff;
  80. if (len)
  81. *len = emi->ei_clusters - coff;
  82. if (flags)
  83. *flags = emi->ei_flags;
  84. }
  85. spin_unlock(&oi->ip_lock);
  86. if (emi == NULL)
  87. return -ENOENT;
  88. return 0;
  89. }
  90. /*
  91. * Forget about all clusters equal to or greater than cpos.
  92. */
  93. void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
  94. {
  95. struct ocfs2_extent_map_item *emi, *n;
  96. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  97. struct ocfs2_extent_map *em = &oi->ip_extent_map;
  98. LIST_HEAD(tmp_list);
  99. unsigned int range;
  100. spin_lock(&oi->ip_lock);
  101. list_for_each_entry_safe(emi, n, &em->em_list, ei_list) {
  102. if (emi->ei_cpos >= cpos) {
  103. /* Full truncate of this record. */
  104. list_move(&emi->ei_list, &tmp_list);
  105. BUG_ON(em->em_num_items == 0);
  106. em->em_num_items--;
  107. continue;
  108. }
  109. range = emi->ei_cpos + emi->ei_clusters;
  110. if (range > cpos) {
  111. /* Partial truncate */
  112. emi->ei_clusters = cpos - emi->ei_cpos;
  113. }
  114. }
  115. spin_unlock(&oi->ip_lock);
  116. list_for_each_entry_safe(emi, n, &tmp_list, ei_list) {
  117. list_del(&emi->ei_list);
  118. kfree(emi);
  119. }
  120. }
  121. /*
  122. * Is any part of emi2 contained within emi1
  123. */
  124. static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
  125. struct ocfs2_extent_map_item *emi2)
  126. {
  127. unsigned int range1, range2;
  128. /*
  129. * Check if logical start of emi2 is inside emi1
  130. */
  131. range1 = emi1->ei_cpos + emi1->ei_clusters;
  132. if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
  133. return 1;
  134. /*
  135. * Check if logical end of emi2 is inside emi1
  136. */
  137. range2 = emi2->ei_cpos + emi2->ei_clusters;
  138. if (range2 > emi1->ei_cpos && range2 <= range1)
  139. return 1;
  140. return 0;
  141. }
  142. static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
  143. struct ocfs2_extent_map_item *src)
  144. {
  145. dest->ei_cpos = src->ei_cpos;
  146. dest->ei_phys = src->ei_phys;
  147. dest->ei_clusters = src->ei_clusters;
  148. dest->ei_flags = src->ei_flags;
  149. }
  150. /*
  151. * Try to merge emi with ins. Returns 1 if merge succeeds, zero
  152. * otherwise.
  153. */
  154. static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
  155. struct ocfs2_extent_map_item *ins)
  156. {
  157. /*
  158. * Handle contiguousness
  159. */
  160. if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
  161. ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
  162. ins->ei_flags == emi->ei_flags) {
  163. emi->ei_clusters += ins->ei_clusters;
  164. return 1;
  165. } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
  166. (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
  167. ins->ei_flags == emi->ei_flags) {
  168. emi->ei_phys = ins->ei_phys;
  169. emi->ei_cpos = ins->ei_cpos;
  170. emi->ei_clusters += ins->ei_clusters;
  171. return 1;
  172. }
  173. /*
  174. * Overlapping extents - this shouldn't happen unless we've
  175. * split an extent to change it's flags. That is exceedingly
  176. * rare, so there's no sense in trying to optimize it yet.
  177. */
  178. if (ocfs2_ei_is_contained(emi, ins) ||
  179. ocfs2_ei_is_contained(ins, emi)) {
  180. ocfs2_copy_emi_fields(emi, ins);
  181. return 1;
  182. }
  183. /* No merge was possible. */
  184. return 0;
  185. }
  186. /*
  187. * In order to reduce complexity on the caller, this insert function
  188. * is intentionally liberal in what it will accept.
  189. *
  190. * The only rule is that the truncate call *must* be used whenever
  191. * records have been deleted. This avoids inserting overlapping
  192. * records with different physical mappings.
  193. */
  194. void ocfs2_extent_map_insert_rec(struct inode *inode,
  195. struct ocfs2_extent_rec *rec)
  196. {
  197. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  198. struct ocfs2_extent_map *em = &oi->ip_extent_map;
  199. struct ocfs2_extent_map_item *emi, *new_emi = NULL;
  200. struct ocfs2_extent_map_item ins;
  201. ins.ei_cpos = le32_to_cpu(rec->e_cpos);
  202. ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
  203. le64_to_cpu(rec->e_blkno));
  204. ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
  205. ins.ei_flags = rec->e_flags;
  206. search:
  207. spin_lock(&oi->ip_lock);
  208. list_for_each_entry(emi, &em->em_list, ei_list) {
  209. if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
  210. list_move(&emi->ei_list, &em->em_list);
  211. spin_unlock(&oi->ip_lock);
  212. goto out;
  213. }
  214. }
  215. /*
  216. * No item could be merged.
  217. *
  218. * Either allocate and add a new item, or overwrite the last recently
  219. * inserted.
  220. */
  221. if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
  222. if (new_emi == NULL) {
  223. spin_unlock(&oi->ip_lock);
  224. new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
  225. if (new_emi == NULL)
  226. goto out;
  227. goto search;
  228. }
  229. ocfs2_copy_emi_fields(new_emi, &ins);
  230. list_add(&new_emi->ei_list, &em->em_list);
  231. em->em_num_items++;
  232. new_emi = NULL;
  233. } else {
  234. BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
  235. emi = list_entry(em->em_list.prev,
  236. struct ocfs2_extent_map_item, ei_list);
  237. list_move(&emi->ei_list, &em->em_list);
  238. ocfs2_copy_emi_fields(emi, &ins);
  239. }
  240. spin_unlock(&oi->ip_lock);
  241. out:
  242. if (new_emi)
  243. kfree(new_emi);
  244. }
  245. static int ocfs2_last_eb_is_empty(struct inode *inode,
  246. struct ocfs2_dinode *di)
  247. {
  248. int ret, next_free;
  249. u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk);
  250. struct buffer_head *eb_bh = NULL;
  251. struct ocfs2_extent_block *eb;
  252. struct ocfs2_extent_list *el;
  253. ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh);
  254. if (ret) {
  255. mlog_errno(ret);
  256. goto out;
  257. }
  258. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  259. el = &eb->h_list;
  260. if (el->l_tree_depth) {
  261. ocfs2_error(inode->i_sb,
  262. "Inode %lu has non zero tree depth in "
  263. "leaf block %llu\n", inode->i_ino,
  264. (unsigned long long)eb_bh->b_blocknr);
  265. ret = -EROFS;
  266. goto out;
  267. }
  268. next_free = le16_to_cpu(el->l_next_free_rec);
  269. if (next_free == 0 ||
  270. (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0])))
  271. ret = 1;
  272. out:
  273. brelse(eb_bh);
  274. return ret;
  275. }
  276. /*
  277. * Return the 1st index within el which contains an extent start
  278. * larger than v_cluster.
  279. */
  280. static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
  281. u32 v_cluster)
  282. {
  283. int i;
  284. struct ocfs2_extent_rec *rec;
  285. for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  286. rec = &el->l_recs[i];
  287. if (v_cluster < le32_to_cpu(rec->e_cpos))
  288. break;
  289. }
  290. return i;
  291. }
  292. /*
  293. * Figure out the size of a hole which starts at v_cluster within the given
  294. * extent list.
  295. *
  296. * If there is no more allocation past v_cluster, we return the maximum
  297. * cluster size minus v_cluster.
  298. *
  299. * If we have in-inode extents, then el points to the dinode list and
  300. * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
  301. * containing el.
  302. */
  303. int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
  304. struct ocfs2_extent_list *el,
  305. struct buffer_head *eb_bh,
  306. u32 v_cluster,
  307. u32 *num_clusters)
  308. {
  309. int ret, i;
  310. struct buffer_head *next_eb_bh = NULL;
  311. struct ocfs2_extent_block *eb, *next_eb;
  312. i = ocfs2_search_for_hole_index(el, v_cluster);
  313. if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
  314. eb = (struct ocfs2_extent_block *)eb_bh->b_data;
  315. /*
  316. * Check the next leaf for any extents.
  317. */
  318. if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
  319. goto no_more_extents;
  320. ret = ocfs2_read_extent_block(ci,
  321. le64_to_cpu(eb->h_next_leaf_blk),
  322. &next_eb_bh);
  323. if (ret) {
  324. mlog_errno(ret);
  325. goto out;
  326. }
  327. next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
  328. el = &next_eb->h_list;
  329. i = ocfs2_search_for_hole_index(el, v_cluster);
  330. }
  331. no_more_extents:
  332. if (i == le16_to_cpu(el->l_next_free_rec)) {
  333. /*
  334. * We're at the end of our existing allocation. Just
  335. * return the maximum number of clusters we could
  336. * possibly allocate.
  337. */
  338. *num_clusters = UINT_MAX - v_cluster;
  339. } else {
  340. *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
  341. }
  342. ret = 0;
  343. out:
  344. brelse(next_eb_bh);
  345. return ret;
  346. }
  347. static int ocfs2_get_clusters_nocache(struct inode *inode,
  348. struct buffer_head *di_bh,
  349. u32 v_cluster, unsigned int *hole_len,
  350. struct ocfs2_extent_rec *ret_rec,
  351. unsigned int *is_last)
  352. {
  353. int i, ret, tree_height, len;
  354. struct ocfs2_dinode *di;
  355. struct ocfs2_extent_block *uninitialized_var(eb);
  356. struct ocfs2_extent_list *el;
  357. struct ocfs2_extent_rec *rec;
  358. struct buffer_head *eb_bh = NULL;
  359. memset(ret_rec, 0, sizeof(*ret_rec));
  360. if (is_last)
  361. *is_last = 0;
  362. di = (struct ocfs2_dinode *) di_bh->b_data;
  363. el = &di->id2.i_list;
  364. tree_height = le16_to_cpu(el->l_tree_depth);
  365. if (tree_height > 0) {
  366. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
  367. &eb_bh);
  368. if (ret) {
  369. mlog_errno(ret);
  370. goto out;
  371. }
  372. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  373. el = &eb->h_list;
  374. if (el->l_tree_depth) {
  375. ocfs2_error(inode->i_sb,
  376. "Inode %lu has non zero tree depth in "
  377. "leaf block %llu\n", inode->i_ino,
  378. (unsigned long long)eb_bh->b_blocknr);
  379. ret = -EROFS;
  380. goto out;
  381. }
  382. }
  383. i = ocfs2_search_extent_list(el, v_cluster);
  384. if (i == -1) {
  385. /*
  386. * Holes can be larger than the maximum size of an
  387. * extent, so we return their lengths in a seperate
  388. * field.
  389. */
  390. if (hole_len) {
  391. ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode),
  392. el, eb_bh,
  393. v_cluster, &len);
  394. if (ret) {
  395. mlog_errno(ret);
  396. goto out;
  397. }
  398. *hole_len = len;
  399. }
  400. goto out_hole;
  401. }
  402. rec = &el->l_recs[i];
  403. BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
  404. if (!rec->e_blkno) {
  405. ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
  406. "record (%u, %u, 0)", inode->i_ino,
  407. le32_to_cpu(rec->e_cpos),
  408. ocfs2_rec_clusters(el, rec));
  409. ret = -EROFS;
  410. goto out;
  411. }
  412. *ret_rec = *rec;
  413. /*
  414. * Checking for last extent is potentially expensive - we
  415. * might have to look at the next leaf over to see if it's
  416. * empty.
  417. *
  418. * The first two checks are to see whether the caller even
  419. * cares for this information, and if the extent is at least
  420. * the last in it's list.
  421. *
  422. * If those hold true, then the extent is last if any of the
  423. * additional conditions hold true:
  424. * - Extent list is in-inode
  425. * - Extent list is right-most
  426. * - Extent list is 2nd to rightmost, with empty right-most
  427. */
  428. if (is_last) {
  429. if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) {
  430. if (tree_height == 0)
  431. *is_last = 1;
  432. else if (eb->h_blkno == di->i_last_eb_blk)
  433. *is_last = 1;
  434. else if (eb->h_next_leaf_blk == di->i_last_eb_blk) {
  435. ret = ocfs2_last_eb_is_empty(inode, di);
  436. if (ret < 0) {
  437. mlog_errno(ret);
  438. goto out;
  439. }
  440. if (ret == 1)
  441. *is_last = 1;
  442. }
  443. }
  444. }
  445. out_hole:
  446. ret = 0;
  447. out:
  448. brelse(eb_bh);
  449. return ret;
  450. }
  451. static void ocfs2_relative_extent_offsets(struct super_block *sb,
  452. u32 v_cluster,
  453. struct ocfs2_extent_rec *rec,
  454. u32 *p_cluster, u32 *num_clusters)
  455. {
  456. u32 coff = v_cluster - le32_to_cpu(rec->e_cpos);
  457. *p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno));
  458. *p_cluster = *p_cluster + coff;
  459. if (num_clusters)
  460. *num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff;
  461. }
  462. int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
  463. u32 *p_cluster, u32 *num_clusters,
  464. struct ocfs2_extent_list *el,
  465. unsigned int *extent_flags)
  466. {
  467. int ret = 0, i;
  468. struct buffer_head *eb_bh = NULL;
  469. struct ocfs2_extent_block *eb;
  470. struct ocfs2_extent_rec *rec;
  471. u32 coff;
  472. if (el->l_tree_depth) {
  473. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
  474. &eb_bh);
  475. if (ret) {
  476. mlog_errno(ret);
  477. goto out;
  478. }
  479. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  480. el = &eb->h_list;
  481. if (el->l_tree_depth) {
  482. ocfs2_error(inode->i_sb,
  483. "Inode %lu has non zero tree depth in "
  484. "xattr leaf block %llu\n", inode->i_ino,
  485. (unsigned long long)eb_bh->b_blocknr);
  486. ret = -EROFS;
  487. goto out;
  488. }
  489. }
  490. i = ocfs2_search_extent_list(el, v_cluster);
  491. if (i == -1) {
  492. ret = -EROFS;
  493. mlog_errno(ret);
  494. goto out;
  495. } else {
  496. rec = &el->l_recs[i];
  497. BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
  498. if (!rec->e_blkno) {
  499. ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
  500. "record (%u, %u, 0) in xattr", inode->i_ino,
  501. le32_to_cpu(rec->e_cpos),
  502. ocfs2_rec_clusters(el, rec));
  503. ret = -EROFS;
  504. goto out;
  505. }
  506. coff = v_cluster - le32_to_cpu(rec->e_cpos);
  507. *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
  508. le64_to_cpu(rec->e_blkno));
  509. *p_cluster = *p_cluster + coff;
  510. if (num_clusters)
  511. *num_clusters = ocfs2_rec_clusters(el, rec) - coff;
  512. if (extent_flags)
  513. *extent_flags = rec->e_flags;
  514. }
  515. out:
  516. if (eb_bh)
  517. brelse(eb_bh);
  518. return ret;
  519. }
  520. int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
  521. u32 *p_cluster, u32 *num_clusters,
  522. unsigned int *extent_flags)
  523. {
  524. int ret;
  525. unsigned int uninitialized_var(hole_len), flags = 0;
  526. struct buffer_head *di_bh = NULL;
  527. struct ocfs2_extent_rec rec;
  528. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  529. ret = -ERANGE;
  530. mlog_errno(ret);
  531. goto out;
  532. }
  533. ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
  534. num_clusters, extent_flags);
  535. if (ret == 0)
  536. goto out;
  537. ret = ocfs2_read_inode_block(inode, &di_bh);
  538. if (ret) {
  539. mlog_errno(ret);
  540. goto out;
  541. }
  542. ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len,
  543. &rec, NULL);
  544. if (ret) {
  545. mlog_errno(ret);
  546. goto out;
  547. }
  548. if (rec.e_blkno == 0ULL) {
  549. /*
  550. * A hole was found. Return some canned values that
  551. * callers can key on. If asked for, num_clusters will
  552. * be populated with the size of the hole.
  553. */
  554. *p_cluster = 0;
  555. if (num_clusters) {
  556. *num_clusters = hole_len;
  557. }
  558. } else {
  559. ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec,
  560. p_cluster, num_clusters);
  561. flags = rec.e_flags;
  562. ocfs2_extent_map_insert_rec(inode, &rec);
  563. }
  564. if (extent_flags)
  565. *extent_flags = flags;
  566. out:
  567. brelse(di_bh);
  568. return ret;
  569. }
  570. /*
  571. * This expects alloc_sem to be held. The allocation cannot change at
  572. * all while the map is in the process of being updated.
  573. */
  574. int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
  575. u64 *ret_count, unsigned int *extent_flags)
  576. {
  577. int ret;
  578. int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
  579. u32 cpos, num_clusters, p_cluster;
  580. u64 boff = 0;
  581. cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
  582. ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
  583. extent_flags);
  584. if (ret) {
  585. mlog_errno(ret);
  586. goto out;
  587. }
  588. /*
  589. * p_cluster == 0 indicates a hole.
  590. */
  591. if (p_cluster) {
  592. boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
  593. boff += (v_blkno & (u64)(bpc - 1));
  594. }
  595. *p_blkno = boff;
  596. if (ret_count) {
  597. *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
  598. *ret_count -= v_blkno & (u64)(bpc - 1);
  599. }
  600. out:
  601. return ret;
  602. }
  603. /*
  604. * The ocfs2_fiemap_inline() may be a little bit misleading, since
  605. * it not only handles the fiemap for inlined files, but also deals
  606. * with the fast symlink, cause they have no difference for extent
  607. * mapping per se.
  608. */
  609. static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
  610. struct fiemap_extent_info *fieinfo,
  611. u64 map_start)
  612. {
  613. int ret;
  614. unsigned int id_count;
  615. struct ocfs2_dinode *di;
  616. u64 phys;
  617. u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
  618. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  619. di = (struct ocfs2_dinode *)di_bh->b_data;
  620. if (ocfs2_inode_is_fast_symlink(inode))
  621. id_count = ocfs2_fast_symlink_chars(inode->i_sb);
  622. else
  623. id_count = le16_to_cpu(di->id2.i_data.id_count);
  624. if (map_start < id_count) {
  625. phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
  626. if (ocfs2_inode_is_fast_symlink(inode))
  627. phys += offsetof(struct ocfs2_dinode, id2.i_symlink);
  628. else
  629. phys += offsetof(struct ocfs2_dinode,
  630. id2.i_data.id_data);
  631. ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
  632. flags);
  633. if (ret < 0)
  634. return ret;
  635. }
  636. return 0;
  637. }
  638. #define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
  639. int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  640. u64 map_start, u64 map_len)
  641. {
  642. int ret, is_last;
  643. u32 mapping_end, cpos;
  644. unsigned int hole_size;
  645. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  646. u64 len_bytes, phys_bytes, virt_bytes;
  647. struct buffer_head *di_bh = NULL;
  648. struct ocfs2_extent_rec rec;
  649. ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS);
  650. if (ret)
  651. return ret;
  652. ret = ocfs2_inode_lock(inode, &di_bh, 0);
  653. if (ret) {
  654. mlog_errno(ret);
  655. goto out;
  656. }
  657. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  658. /*
  659. * Handle inline-data and fast symlink separately.
  660. */
  661. if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
  662. ocfs2_inode_is_fast_symlink(inode)) {
  663. ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
  664. goto out_unlock;
  665. }
  666. cpos = map_start >> osb->s_clustersize_bits;
  667. mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
  668. map_start + map_len);
  669. mapping_end -= cpos;
  670. is_last = 0;
  671. while (cpos < mapping_end && !is_last) {
  672. u32 fe_flags;
  673. ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
  674. &hole_size, &rec, &is_last);
  675. if (ret) {
  676. mlog_errno(ret);
  677. goto out;
  678. }
  679. if (rec.e_blkno == 0ULL) {
  680. cpos += hole_size;
  681. continue;
  682. }
  683. fe_flags = 0;
  684. if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
  685. fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
  686. if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
  687. fe_flags |= FIEMAP_EXTENT_SHARED;
  688. if (is_last)
  689. fe_flags |= FIEMAP_EXTENT_LAST;
  690. len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
  691. phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
  692. virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
  693. ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
  694. len_bytes, fe_flags);
  695. if (ret)
  696. break;
  697. cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters);
  698. }
  699. if (ret > 0)
  700. ret = 0;
  701. out_unlock:
  702. brelse(di_bh);
  703. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  704. ocfs2_inode_unlock(inode, 0);
  705. out:
  706. return ret;
  707. }
  708. int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
  709. struct buffer_head *bhs[], int flags,
  710. int (*validate)(struct super_block *sb,
  711. struct buffer_head *bh))
  712. {
  713. int rc = 0;
  714. u64 p_block, p_count;
  715. int i, count, done = 0;
  716. mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, "
  717. "flags = %x, validate = %p)\n",
  718. inode, (unsigned long long)v_block, nr, bhs, flags,
  719. validate);
  720. if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
  721. i_size_read(inode)) {
  722. BUG_ON(!(flags & OCFS2_BH_READAHEAD));
  723. goto out;
  724. }
  725. while (done < nr) {
  726. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  727. rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
  728. &p_block, &p_count, NULL);
  729. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  730. if (rc) {
  731. mlog_errno(rc);
  732. break;
  733. }
  734. if (!p_block) {
  735. rc = -EIO;
  736. mlog(ML_ERROR,
  737. "Inode #%llu contains a hole at offset %llu\n",
  738. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  739. (unsigned long long)(v_block + done) <<
  740. inode->i_sb->s_blocksize_bits);
  741. break;
  742. }
  743. count = nr - done;
  744. if (p_count < count)
  745. count = p_count;
  746. /*
  747. * If the caller passed us bhs, they should have come
  748. * from a previous readahead call to this function. Thus,
  749. * they should have the right b_blocknr.
  750. */
  751. for (i = 0; i < count; i++) {
  752. if (!bhs[done + i])
  753. continue;
  754. BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
  755. }
  756. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count,
  757. bhs + done, flags, validate);
  758. if (rc) {
  759. mlog_errno(rc);
  760. break;
  761. }
  762. done += count;
  763. }
  764. out:
  765. mlog_exit(rc);
  766. return rc;
  767. }