move_extents.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * move_extents.c
  5. *
  6. * Copyright (C) 2011 Oracle. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public
  10. * License version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/fs.h>
  18. #include <linux/types.h>
  19. #include <linux/mount.h>
  20. #include <linux/swap.h>
  21. #include <cluster/masklog.h>
  22. #include "ocfs2.h"
  23. #include "ocfs2_ioctl.h"
  24. #include "alloc.h"
  25. #include "aops.h"
  26. #include "dlmglue.h"
  27. #include "extent_map.h"
  28. #include "inode.h"
  29. #include "journal.h"
  30. #include "suballoc.h"
  31. #include "uptodate.h"
  32. #include "super.h"
  33. #include "dir.h"
  34. #include "buffer_head_io.h"
  35. #include "sysfile.h"
  36. #include "suballoc.h"
  37. #include "refcounttree.h"
  38. #include "move_extents.h"
  39. struct ocfs2_move_extents_context {
  40. struct inode *inode;
  41. struct file *file;
  42. int auto_defrag;
  43. int partial;
  44. int credits;
  45. u32 new_phys_cpos;
  46. u32 clusters_moved;
  47. u64 refcount_loc;
  48. struct ocfs2_move_extents *range;
  49. struct ocfs2_extent_tree et;
  50. struct ocfs2_alloc_context *meta_ac;
  51. struct ocfs2_alloc_context *data_ac;
  52. struct ocfs2_cached_dealloc_ctxt dealloc;
  53. };
  54. static int __ocfs2_move_extent(handle_t *handle,
  55. struct ocfs2_move_extents_context *context,
  56. u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
  57. int ext_flags)
  58. {
  59. int ret = 0, index;
  60. struct inode *inode = context->inode;
  61. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  62. struct ocfs2_extent_rec *rec, replace_rec;
  63. struct ocfs2_path *path = NULL;
  64. struct ocfs2_extent_list *el;
  65. u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
  66. u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
  67. ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
  68. p_cpos, new_p_cpos, len);
  69. if (ret) {
  70. mlog_errno(ret);
  71. goto out;
  72. }
  73. memset(&replace_rec, 0, sizeof(replace_rec));
  74. replace_rec.e_cpos = cpu_to_le32(cpos);
  75. replace_rec.e_leaf_clusters = cpu_to_le16(len);
  76. replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
  77. new_p_cpos));
  78. path = ocfs2_new_path_from_et(&context->et);
  79. if (!path) {
  80. ret = -ENOMEM;
  81. mlog_errno(ret);
  82. goto out;
  83. }
  84. ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
  85. if (ret) {
  86. mlog_errno(ret);
  87. goto out;
  88. }
  89. el = path_leaf_el(path);
  90. index = ocfs2_search_extent_list(el, cpos);
  91. if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
  92. ocfs2_error(inode->i_sb,
  93. "Inode %llu has an extent at cpos %u which can no "
  94. "longer be found.\n",
  95. (unsigned long long)ino, cpos);
  96. ret = -EROFS;
  97. goto out;
  98. }
  99. rec = &el->l_recs[index];
  100. BUG_ON(ext_flags != rec->e_flags);
  101. /*
  102. * after moving/defraging to new location, the extent is not going
  103. * to be refcounted anymore.
  104. */
  105. replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
  106. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
  107. context->et.et_root_bh,
  108. OCFS2_JOURNAL_ACCESS_WRITE);
  109. if (ret) {
  110. mlog_errno(ret);
  111. goto out;
  112. }
  113. ret = ocfs2_split_extent(handle, &context->et, path, index,
  114. &replace_rec, context->meta_ac,
  115. &context->dealloc);
  116. if (ret) {
  117. mlog_errno(ret);
  118. goto out;
  119. }
  120. ocfs2_journal_dirty(handle, context->et.et_root_bh);
  121. context->new_phys_cpos = new_p_cpos;
  122. /*
  123. * need I to append truncate log for old clusters?
  124. */
  125. if (old_blkno) {
  126. if (ext_flags & OCFS2_EXT_REFCOUNTED)
  127. ret = ocfs2_decrease_refcount(inode, handle,
  128. ocfs2_blocks_to_clusters(osb->sb,
  129. old_blkno),
  130. len, context->meta_ac,
  131. &context->dealloc, 1);
  132. else
  133. ret = ocfs2_truncate_log_append(osb, handle,
  134. old_blkno, len);
  135. }
  136. out:
  137. return ret;
  138. }
  139. /*
  140. * lock allocators, and reserving appropriate number of bits for
  141. * meta blocks and data clusters.
  142. *
  143. * in some cases, we don't need to reserve clusters, just let data_ac
  144. * be NULL.
  145. */
  146. static int ocfs2_lock_allocators_move_extents(struct inode *inode,
  147. struct ocfs2_extent_tree *et,
  148. u32 clusters_to_move,
  149. u32 extents_to_split,
  150. struct ocfs2_alloc_context **meta_ac,
  151. struct ocfs2_alloc_context **data_ac,
  152. int extra_blocks,
  153. int *credits)
  154. {
  155. int ret, num_free_extents;
  156. unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
  157. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  158. num_free_extents = ocfs2_num_free_extents(osb, et);
  159. if (num_free_extents < 0) {
  160. ret = num_free_extents;
  161. mlog_errno(ret);
  162. goto out;
  163. }
  164. if (!num_free_extents ||
  165. (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
  166. extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
  167. ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
  168. if (ret) {
  169. mlog_errno(ret);
  170. goto out;
  171. }
  172. if (data_ac) {
  173. ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
  174. if (ret) {
  175. mlog_errno(ret);
  176. goto out;
  177. }
  178. }
  179. *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el,
  180. clusters_to_move + 2);
  181. mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
  182. extra_blocks, clusters_to_move, *credits);
  183. out:
  184. if (ret) {
  185. if (*meta_ac) {
  186. ocfs2_free_alloc_context(*meta_ac);
  187. *meta_ac = NULL;
  188. }
  189. }
  190. return ret;
  191. }
  192. /*
  193. * Using one journal handle to guarantee the data consistency in case
  194. * crash happens anywhere.
  195. */
  196. static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
  197. u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
  198. {
  199. int ret, credits = 0, extra_blocks = 0, partial = context->partial;
  200. handle_t *handle;
  201. struct inode *inode = context->inode;
  202. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  203. struct inode *tl_inode = osb->osb_tl_inode;
  204. struct ocfs2_refcount_tree *ref_tree = NULL;
  205. u32 new_phys_cpos, new_len;
  206. u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  207. if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
  208. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
  209. OCFS2_HAS_REFCOUNT_FL));
  210. BUG_ON(!context->refcount_loc);
  211. ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
  212. &ref_tree, NULL);
  213. if (ret) {
  214. mlog_errno(ret);
  215. return ret;
  216. }
  217. ret = ocfs2_prepare_refcount_change_for_del(inode,
  218. context->refcount_loc,
  219. phys_blkno,
  220. *len,
  221. &credits,
  222. &extra_blocks);
  223. if (ret) {
  224. mlog_errno(ret);
  225. goto out;
  226. }
  227. }
  228. ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
  229. &context->meta_ac,
  230. &context->data_ac,
  231. extra_blocks, &credits);
  232. if (ret) {
  233. mlog_errno(ret);
  234. goto out;
  235. }
  236. /*
  237. * should be using allocation reservation strategy there?
  238. *
  239. * if (context->data_ac)
  240. * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
  241. */
  242. mutex_lock(&tl_inode->i_mutex);
  243. if (ocfs2_truncate_log_needs_flush(osb)) {
  244. ret = __ocfs2_flush_truncate_log(osb);
  245. if (ret < 0) {
  246. mlog_errno(ret);
  247. goto out_unlock_mutex;
  248. }
  249. }
  250. handle = ocfs2_start_trans(osb, credits);
  251. if (IS_ERR(handle)) {
  252. ret = PTR_ERR(handle);
  253. mlog_errno(ret);
  254. goto out_unlock_mutex;
  255. }
  256. ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
  257. &new_phys_cpos, &new_len);
  258. if (ret) {
  259. mlog_errno(ret);
  260. goto out_commit;
  261. }
  262. /*
  263. * allowing partial extent moving is kind of 'pros and cons', it makes
  264. * whole defragmentation less likely to fail, on the contrary, the bad
  265. * thing is it may make the fs even more fragmented after moving, let
  266. * userspace make a good decision here.
  267. */
  268. if (new_len != *len) {
  269. mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
  270. if (!partial) {
  271. context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
  272. ret = -ENOSPC;
  273. goto out_commit;
  274. }
  275. }
  276. mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
  277. phys_cpos, new_phys_cpos);
  278. ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
  279. new_phys_cpos, ext_flags);
  280. if (ret)
  281. mlog_errno(ret);
  282. if (partial && (new_len != *len))
  283. *len = new_len;
  284. /*
  285. * Here we should write the new page out first if we are
  286. * in write-back mode.
  287. */
  288. ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
  289. if (ret)
  290. mlog_errno(ret);
  291. out_commit:
  292. ocfs2_commit_trans(osb, handle);
  293. out_unlock_mutex:
  294. mutex_unlock(&tl_inode->i_mutex);
  295. if (context->data_ac) {
  296. ocfs2_free_alloc_context(context->data_ac);
  297. context->data_ac = NULL;
  298. }
  299. if (context->meta_ac) {
  300. ocfs2_free_alloc_context(context->meta_ac);
  301. context->meta_ac = NULL;
  302. }
  303. out:
  304. if (ref_tree)
  305. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  306. return ret;
  307. }
  308. /*
  309. * find the victim alloc group, where #blkno fits.
  310. */
  311. static int ocfs2_find_victim_alloc_group(struct inode *inode,
  312. u64 vict_blkno,
  313. int type, int slot,
  314. int *vict_bit,
  315. struct buffer_head **ret_bh)
  316. {
  317. int ret, i, blocks_per_unit = 1;
  318. u64 blkno;
  319. char namebuf[40];
  320. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  321. struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
  322. struct ocfs2_chain_list *cl;
  323. struct ocfs2_chain_rec *rec;
  324. struct ocfs2_dinode *ac_dinode;
  325. struct ocfs2_group_desc *bg;
  326. ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
  327. ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
  328. strlen(namebuf), &blkno);
  329. if (ret) {
  330. ret = -ENOENT;
  331. goto out;
  332. }
  333. ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
  334. if (ret) {
  335. mlog_errno(ret);
  336. goto out;
  337. }
  338. ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
  339. cl = &(ac_dinode->id2.i_chain);
  340. rec = &(cl->cl_recs[0]);
  341. if (type == GLOBAL_BITMAP_SYSTEM_INODE)
  342. blocks_per_unit <<= (osb->s_clustersize_bits -
  343. inode->i_sb->s_blocksize_bits);
  344. /*
  345. * 'vict_blkno' was out of the valid range.
  346. */
  347. if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
  348. (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) *
  349. blocks_per_unit))) {
  350. ret = -EINVAL;
  351. goto out;
  352. }
  353. for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
  354. rec = &(cl->cl_recs[i]);
  355. if (!rec)
  356. continue;
  357. bg = NULL;
  358. do {
  359. if (!bg)
  360. blkno = le64_to_cpu(rec->c_blkno);
  361. else
  362. blkno = le64_to_cpu(bg->bg_next_group);
  363. if (gd_bh) {
  364. brelse(gd_bh);
  365. gd_bh = NULL;
  366. }
  367. ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
  368. if (ret) {
  369. mlog_errno(ret);
  370. goto out;
  371. }
  372. bg = (struct ocfs2_group_desc *)gd_bh->b_data;
  373. if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
  374. le16_to_cpu(bg->bg_bits))) {
  375. *ret_bh = gd_bh;
  376. *vict_bit = (vict_blkno - blkno) /
  377. blocks_per_unit;
  378. mlog(0, "find the victim group: #%llu, "
  379. "total_bits: %u, vict_bit: %u\n",
  380. blkno, le16_to_cpu(bg->bg_bits),
  381. *vict_bit);
  382. goto out;
  383. }
  384. } while (le64_to_cpu(bg->bg_next_group));
  385. }
  386. ret = -EINVAL;
  387. out:
  388. brelse(ac_bh);
  389. /*
  390. * caller has to release the gd_bh properly.
  391. */
  392. return ret;
  393. }
  394. /*
  395. * XXX: helper to validate and adjust moving goal.
  396. */
  397. static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
  398. struct ocfs2_move_extents *range)
  399. {
  400. int ret, goal_bit = 0;
  401. struct buffer_head *gd_bh = NULL;
  402. struct ocfs2_group_desc *bg;
  403. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  404. int c_to_b = 1 << (osb->s_clustersize_bits -
  405. inode->i_sb->s_blocksize_bits);
  406. /*
  407. * validate goal sits within global_bitmap, and return the victim
  408. * group desc
  409. */
  410. ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
  411. GLOBAL_BITMAP_SYSTEM_INODE,
  412. OCFS2_INVALID_SLOT,
  413. &goal_bit, &gd_bh);
  414. if (ret)
  415. goto out;
  416. bg = (struct ocfs2_group_desc *)gd_bh->b_data;
  417. /*
  418. * make goal become cluster aligned.
  419. */
  420. if (range->me_goal % c_to_b)
  421. range->me_goal = range->me_goal / c_to_b * c_to_b;
  422. /*
  423. * moving goal is not allowd to start with a group desc blok(#0 blk)
  424. * let's compromise to the latter cluster.
  425. */
  426. if (range->me_goal == le64_to_cpu(bg->bg_blkno))
  427. range->me_goal += c_to_b;
  428. /*
  429. * movement is not gonna cross two groups.
  430. */
  431. if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
  432. range->me_len) {
  433. ret = -EINVAL;
  434. goto out;
  435. }
  436. /*
  437. * more exact validations/adjustments will be performed later during
  438. * moving operation for each extent range.
  439. */
  440. mlog(0, "extents get ready to be moved to #%llu block\n",
  441. range->me_goal);
  442. out:
  443. brelse(gd_bh);
  444. return ret;
  445. }
  446. static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
  447. int *goal_bit, u32 move_len, u32 max_hop,
  448. u32 *phys_cpos)
  449. {
  450. int i, used, last_free_bits = 0, base_bit = *goal_bit;
  451. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  452. u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
  453. le64_to_cpu(gd->bg_blkno));
  454. for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
  455. used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
  456. if (used) {
  457. /*
  458. * we even tried searching the free chunk by jumping
  459. * a 'max_hop' distance, but still failed.
  460. */
  461. if ((i - base_bit) > max_hop) {
  462. *phys_cpos = 0;
  463. break;
  464. }
  465. if (last_free_bits)
  466. last_free_bits = 0;
  467. continue;
  468. } else
  469. last_free_bits++;
  470. if (last_free_bits == move_len) {
  471. *goal_bit = i;
  472. *phys_cpos = base_cpos + i;
  473. break;
  474. }
  475. }
  476. mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
  477. }
  478. static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
  479. handle_t *handle,
  480. struct buffer_head *di_bh,
  481. u32 num_bits,
  482. u16 chain)
  483. {
  484. int ret;
  485. u32 tmp_used;
  486. struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
  487. struct ocfs2_chain_list *cl =
  488. (struct ocfs2_chain_list *) &di->id2.i_chain;
  489. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  490. OCFS2_JOURNAL_ACCESS_WRITE);
  491. if (ret < 0) {
  492. mlog_errno(ret);
  493. goto out;
  494. }
  495. tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
  496. di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
  497. le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
  498. ocfs2_journal_dirty(handle, di_bh);
  499. out:
  500. return ret;
  501. }
  502. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  503. struct inode *alloc_inode,
  504. struct ocfs2_group_desc *bg,
  505. struct buffer_head *group_bh,
  506. unsigned int bit_off,
  507. unsigned int num_bits)
  508. {
  509. int status;
  510. void *bitmap = bg->bg_bitmap;
  511. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  512. /* All callers get the descriptor via
  513. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  514. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  515. BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
  516. mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
  517. num_bits);
  518. if (ocfs2_is_cluster_bitmap(alloc_inode))
  519. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  520. status = ocfs2_journal_access_gd(handle,
  521. INODE_CACHE(alloc_inode),
  522. group_bh,
  523. journal_type);
  524. if (status < 0) {
  525. mlog_errno(status);
  526. goto bail;
  527. }
  528. le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
  529. if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
  530. ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
  531. " count %u but claims %u are freed. num_bits %d",
  532. (unsigned long long)le64_to_cpu(bg->bg_blkno),
  533. le16_to_cpu(bg->bg_bits),
  534. le16_to_cpu(bg->bg_free_bits_count), num_bits);
  535. return -EROFS;
  536. }
  537. while (num_bits--)
  538. ocfs2_set_bit(bit_off++, bitmap);
  539. ocfs2_journal_dirty(handle, group_bh);
  540. bail:
  541. return status;
  542. }
  543. static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
  544. u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
  545. u32 len, int ext_flags)
  546. {
  547. int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
  548. handle_t *handle;
  549. struct inode *inode = context->inode;
  550. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  551. struct inode *tl_inode = osb->osb_tl_inode;
  552. struct inode *gb_inode = NULL;
  553. struct buffer_head *gb_bh = NULL;
  554. struct buffer_head *gd_bh = NULL;
  555. struct ocfs2_group_desc *gd;
  556. struct ocfs2_refcount_tree *ref_tree = NULL;
  557. u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
  558. context->range->me_threshold);
  559. u64 phys_blkno, new_phys_blkno;
  560. phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
  561. if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
  562. BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
  563. OCFS2_HAS_REFCOUNT_FL));
  564. BUG_ON(!context->refcount_loc);
  565. ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
  566. &ref_tree, NULL);
  567. if (ret) {
  568. mlog_errno(ret);
  569. return ret;
  570. }
  571. ret = ocfs2_prepare_refcount_change_for_del(inode,
  572. context->refcount_loc,
  573. phys_blkno,
  574. len,
  575. &credits,
  576. &extra_blocks);
  577. if (ret) {
  578. mlog_errno(ret);
  579. goto out;
  580. }
  581. }
  582. ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
  583. &context->meta_ac,
  584. NULL, extra_blocks, &credits);
  585. if (ret) {
  586. mlog_errno(ret);
  587. goto out;
  588. }
  589. /*
  590. * need to count 2 extra credits for global_bitmap inode and
  591. * group descriptor.
  592. */
  593. credits += OCFS2_INODE_UPDATE_CREDITS + 1;
  594. /*
  595. * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
  596. * logic, while we still need to lock the global_bitmap.
  597. */
  598. gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
  599. OCFS2_INVALID_SLOT);
  600. if (!gb_inode) {
  601. mlog(ML_ERROR, "unable to get global_bitmap inode\n");
  602. ret = -EIO;
  603. goto out;
  604. }
  605. mutex_lock(&gb_inode->i_mutex);
  606. ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
  607. if (ret) {
  608. mlog_errno(ret);
  609. goto out_unlock_gb_mutex;
  610. }
  611. mutex_lock(&tl_inode->i_mutex);
  612. handle = ocfs2_start_trans(osb, credits);
  613. if (IS_ERR(handle)) {
  614. ret = PTR_ERR(handle);
  615. mlog_errno(ret);
  616. goto out_unlock_tl_inode;
  617. }
  618. new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
  619. ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
  620. GLOBAL_BITMAP_SYSTEM_INODE,
  621. OCFS2_INVALID_SLOT,
  622. &goal_bit, &gd_bh);
  623. if (ret) {
  624. mlog_errno(ret);
  625. goto out_commit;
  626. }
  627. /*
  628. * probe the victim cluster group to find a proper
  629. * region to fit wanted movement, it even will perfrom
  630. * a best-effort attempt by compromising to a threshold
  631. * around the goal.
  632. */
  633. ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
  634. new_phys_cpos);
  635. if (!new_phys_cpos) {
  636. ret = -ENOSPC;
  637. goto out_commit;
  638. }
  639. ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
  640. *new_phys_cpos, ext_flags);
  641. if (ret) {
  642. mlog_errno(ret);
  643. goto out_commit;
  644. }
  645. gd = (struct ocfs2_group_desc *)gd_bh->b_data;
  646. ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
  647. le16_to_cpu(gd->bg_chain));
  648. if (ret) {
  649. mlog_errno(ret);
  650. goto out_commit;
  651. }
  652. ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
  653. goal_bit, len);
  654. if (ret)
  655. mlog_errno(ret);
  656. /*
  657. * Here we should write the new page out first if we are
  658. * in write-back mode.
  659. */
  660. ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
  661. if (ret)
  662. mlog_errno(ret);
  663. out_commit:
  664. ocfs2_commit_trans(osb, handle);
  665. brelse(gd_bh);
  666. out_unlock_tl_inode:
  667. mutex_unlock(&tl_inode->i_mutex);
  668. ocfs2_inode_unlock(gb_inode, 1);
  669. out_unlock_gb_mutex:
  670. mutex_unlock(&gb_inode->i_mutex);
  671. brelse(gb_bh);
  672. iput(gb_inode);
  673. out:
  674. if (context->meta_ac) {
  675. ocfs2_free_alloc_context(context->meta_ac);
  676. context->meta_ac = NULL;
  677. }
  678. if (ref_tree)
  679. ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
  680. return ret;
  681. }
  682. /*
  683. * Helper to calculate the defraging length in one run according to threshold.
  684. */
  685. static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
  686. u32 threshold, int *skip)
  687. {
  688. if ((*alloc_size + *len_defraged) < threshold) {
  689. /*
  690. * proceed defragmentation until we meet the thresh
  691. */
  692. *len_defraged += *alloc_size;
  693. } else if (*len_defraged == 0) {
  694. /*
  695. * XXX: skip a large extent.
  696. */
  697. *skip = 1;
  698. } else {
  699. /*
  700. * split this extent to coalesce with former pieces as
  701. * to reach the threshold.
  702. *
  703. * we're done here with one cycle of defragmentation
  704. * in a size of 'thresh', resetting 'len_defraged'
  705. * forces a new defragmentation.
  706. */
  707. *alloc_size = threshold - *len_defraged;
  708. *len_defraged = 0;
  709. }
  710. }
  711. static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
  712. struct ocfs2_move_extents_context *context)
  713. {
  714. int ret = 0, flags, do_defrag, skip = 0;
  715. u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
  716. u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
  717. struct inode *inode = context->inode;
  718. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  719. struct ocfs2_move_extents *range = context->range;
  720. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  721. if ((inode->i_size == 0) || (range->me_len == 0))
  722. return 0;
  723. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  724. return 0;
  725. context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
  726. ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
  727. ocfs2_init_dealloc_ctxt(&context->dealloc);
  728. /*
  729. * TO-DO XXX:
  730. *
  731. * - xattr extents.
  732. */
  733. do_defrag = context->auto_defrag;
  734. /*
  735. * extents moving happens in unit of clusters, for the sake
  736. * of simplicity, we may ignore two clusters where 'byte_start'
  737. * and 'byte_start + len' were within.
  738. */
  739. move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
  740. len_to_move = (range->me_start + range->me_len) >>
  741. osb->s_clustersize_bits;
  742. if (len_to_move >= move_start)
  743. len_to_move -= move_start;
  744. else
  745. len_to_move = 0;
  746. if (do_defrag)
  747. defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
  748. else
  749. new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
  750. range->me_goal);
  751. mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
  752. "thresh: %u\n",
  753. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  754. (unsigned long long)range->me_start,
  755. (unsigned long long)range->me_len,
  756. move_start, len_to_move, defrag_thresh);
  757. cpos = move_start;
  758. while (len_to_move) {
  759. ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
  760. &flags);
  761. if (ret) {
  762. mlog_errno(ret);
  763. goto out;
  764. }
  765. if (alloc_size > len_to_move)
  766. alloc_size = len_to_move;
  767. /*
  768. * XXX: how to deal with a hole:
  769. *
  770. * - skip the hole of course
  771. * - force a new defragmentation
  772. */
  773. if (!phys_cpos) {
  774. if (do_defrag)
  775. len_defraged = 0;
  776. goto next;
  777. }
  778. if (do_defrag) {
  779. ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
  780. defrag_thresh, &skip);
  781. /*
  782. * skip large extents
  783. */
  784. if (skip) {
  785. skip = 0;
  786. goto next;
  787. }
  788. mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
  789. "alloc_size: %u, len_defraged: %u\n",
  790. cpos, phys_cpos, alloc_size, len_defraged);
  791. ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
  792. &alloc_size, flags);
  793. } else {
  794. ret = ocfs2_move_extent(context, cpos, phys_cpos,
  795. &new_phys_cpos, alloc_size,
  796. flags);
  797. new_phys_cpos += alloc_size;
  798. }
  799. if (ret < 0) {
  800. mlog_errno(ret);
  801. goto out;
  802. }
  803. context->clusters_moved += alloc_size;
  804. next:
  805. cpos += alloc_size;
  806. len_to_move -= alloc_size;
  807. }
  808. range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
  809. out:
  810. range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
  811. context->clusters_moved);
  812. range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
  813. context->new_phys_cpos);
  814. ocfs2_schedule_truncate_log_flush(osb, 1);
  815. ocfs2_run_deallocs(osb, &context->dealloc);
  816. return ret;
  817. }
  818. static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
  819. {
  820. int status;
  821. handle_t *handle;
  822. struct inode *inode = context->inode;
  823. struct ocfs2_dinode *di;
  824. struct buffer_head *di_bh = NULL;
  825. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  826. if (!inode)
  827. return -ENOENT;
  828. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  829. return -EROFS;
  830. mutex_lock(&inode->i_mutex);
  831. /*
  832. * This prevents concurrent writes from other nodes
  833. */
  834. status = ocfs2_rw_lock(inode, 1);
  835. if (status) {
  836. mlog_errno(status);
  837. goto out;
  838. }
  839. status = ocfs2_inode_lock(inode, &di_bh, 1);
  840. if (status) {
  841. mlog_errno(status);
  842. goto out_rw_unlock;
  843. }
  844. /*
  845. * rememer ip_xattr_sem also needs to be held if necessary
  846. */
  847. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  848. status = __ocfs2_move_extents_range(di_bh, context);
  849. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  850. if (status) {
  851. mlog_errno(status);
  852. goto out_inode_unlock;
  853. }
  854. /*
  855. * We update ctime for these changes
  856. */
  857. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  858. if (IS_ERR(handle)) {
  859. status = PTR_ERR(handle);
  860. mlog_errno(status);
  861. goto out_inode_unlock;
  862. }
  863. status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  864. OCFS2_JOURNAL_ACCESS_WRITE);
  865. if (status) {
  866. mlog_errno(status);
  867. goto out_commit;
  868. }
  869. di = (struct ocfs2_dinode *)di_bh->b_data;
  870. inode->i_ctime = CURRENT_TIME;
  871. di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  872. di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  873. ocfs2_journal_dirty(handle, di_bh);
  874. out_commit:
  875. ocfs2_commit_trans(osb, handle);
  876. out_inode_unlock:
  877. brelse(di_bh);
  878. ocfs2_inode_unlock(inode, 1);
  879. out_rw_unlock:
  880. ocfs2_rw_unlock(inode, 1);
  881. out:
  882. mutex_unlock(&inode->i_mutex);
  883. return status;
  884. }
  885. int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
  886. {
  887. int status;
  888. struct inode *inode = filp->f_path.dentry->d_inode;
  889. struct ocfs2_move_extents range;
  890. struct ocfs2_move_extents_context *context = NULL;
  891. status = mnt_want_write(filp->f_path.mnt);
  892. if (status)
  893. return status;
  894. if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE))
  895. goto out;
  896. if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
  897. status = -EPERM;
  898. goto out;
  899. }
  900. context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
  901. if (!context) {
  902. status = -ENOMEM;
  903. mlog_errno(status);
  904. goto out;
  905. }
  906. context->inode = inode;
  907. context->file = filp;
  908. if (argp) {
  909. if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
  910. sizeof(range))) {
  911. status = -EFAULT;
  912. goto out;
  913. }
  914. } else {
  915. status = -EINVAL;
  916. goto out;
  917. }
  918. if (range.me_start > i_size_read(inode))
  919. goto out;
  920. if (range.me_start + range.me_len > i_size_read(inode))
  921. range.me_len = i_size_read(inode) - range.me_start;
  922. context->range = &range;
  923. if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
  924. context->auto_defrag = 1;
  925. if (!range.me_threshold)
  926. /*
  927. * ok, the default theshold for the defragmentation
  928. * is 1M, since our maximum clustersize was 1M also.
  929. * any thought?
  930. */
  931. range.me_threshold = 1024 * 1024;
  932. if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
  933. context->partial = 1;
  934. } else {
  935. /*
  936. * first best-effort attempt to validate and adjust the goal
  937. * (physical address in block), while it can't guarantee later
  938. * operation can succeed all the time since global_bitmap may
  939. * change a bit over time.
  940. */
  941. status = ocfs2_validate_and_adjust_move_goal(inode, &range);
  942. if (status)
  943. goto out;
  944. }
  945. status = ocfs2_move_extents(context);
  946. if (status)
  947. mlog_errno(status);
  948. out:
  949. /*
  950. * movement/defragmentation may end up being partially completed,
  951. * that's the reason why we need to return userspace the finished
  952. * length and new_offset even if failure happens somewhere.
  953. */
  954. if (argp) {
  955. if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
  956. sizeof(range)))
  957. status = -EFAULT;
  958. }
  959. kfree(context);
  960. mnt_drop_write(filp->f_path.mnt);
  961. return status;
  962. }