suballoc.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * suballoc.c
  5. *
  6. * metadata alloc and free
  7. * Inspired by ext3 block groups.
  8. *
  9. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2 of the License, or (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public
  22. * License along with this program; if not, write to the
  23. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  24. * Boston, MA 021110-1307, USA.
  25. */
  26. #include <linux/fs.h>
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/highmem.h>
  30. #define MLOG_MASK_PREFIX ML_DISK_ALLOC
  31. #include <cluster/masklog.h>
  32. #include "ocfs2.h"
  33. #include "alloc.h"
  34. #include "dlmglue.h"
  35. #include "inode.h"
  36. #include "journal.h"
  37. #include "localalloc.h"
  38. #include "suballoc.h"
  39. #include "super.h"
  40. #include "sysfile.h"
  41. #include "uptodate.h"
  42. #include "buffer_head_io.h"
  43. #define NOT_ALLOC_NEW_GROUP 0
  44. #define ALLOC_NEW_GROUP 1
  45. #define OCFS2_MAX_INODES_TO_STEAL 1024
  46. static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
  47. static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
  48. static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
  49. static int ocfs2_block_group_fill(handle_t *handle,
  50. struct inode *alloc_inode,
  51. struct buffer_head *bg_bh,
  52. u64 group_blkno,
  53. u16 my_chain,
  54. struct ocfs2_chain_list *cl);
  55. static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
  56. struct inode *alloc_inode,
  57. struct buffer_head *bh,
  58. u64 max_block);
  59. static int ocfs2_cluster_group_search(struct inode *inode,
  60. struct buffer_head *group_bh,
  61. u32 bits_wanted, u32 min_bits,
  62. u64 max_block,
  63. u16 *bit_off, u16 *bits_found);
  64. static int ocfs2_block_group_search(struct inode *inode,
  65. struct buffer_head *group_bh,
  66. u32 bits_wanted, u32 min_bits,
  67. u64 max_block,
  68. u16 *bit_off, u16 *bits_found);
  69. static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
  70. struct ocfs2_alloc_context *ac,
  71. handle_t *handle,
  72. u32 bits_wanted,
  73. u32 min_bits,
  74. u16 *bit_off,
  75. unsigned int *num_bits,
  76. u64 *bg_blkno);
  77. static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
  78. int nr);
  79. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  80. struct inode *alloc_inode,
  81. struct ocfs2_group_desc *bg,
  82. struct buffer_head *group_bh,
  83. unsigned int bit_off,
  84. unsigned int num_bits);
  85. static inline int ocfs2_block_group_clear_bits(handle_t *handle,
  86. struct inode *alloc_inode,
  87. struct ocfs2_group_desc *bg,
  88. struct buffer_head *group_bh,
  89. unsigned int bit_off,
  90. unsigned int num_bits);
  91. static int ocfs2_relink_block_group(handle_t *handle,
  92. struct inode *alloc_inode,
  93. struct buffer_head *fe_bh,
  94. struct buffer_head *bg_bh,
  95. struct buffer_head *prev_bg_bh,
  96. u16 chain);
  97. static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
  98. u32 wanted);
  99. static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
  100. u64 bg_blkno,
  101. u16 bg_bit_off);
  102. static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  103. u64 data_blkno,
  104. u64 *bg_blkno,
  105. u16 *bg_bit_off);
  106. static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
  107. u32 bits_wanted, u64 max_block,
  108. struct ocfs2_alloc_context **ac);
  109. void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
  110. {
  111. struct inode *inode = ac->ac_inode;
  112. if (inode) {
  113. if (ac->ac_which != OCFS2_AC_USE_LOCAL)
  114. ocfs2_inode_unlock(inode, 1);
  115. mutex_unlock(&inode->i_mutex);
  116. iput(inode);
  117. ac->ac_inode = NULL;
  118. }
  119. brelse(ac->ac_bh);
  120. ac->ac_bh = NULL;
  121. }
  122. void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
  123. {
  124. ocfs2_free_ac_resource(ac);
  125. kfree(ac);
  126. }
  127. static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
  128. {
  129. return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
  130. }
  131. #define do_error(fmt, ...) \
  132. do{ \
  133. if (clean_error) \
  134. mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
  135. else \
  136. ocfs2_error(sb, fmt, ##__VA_ARGS__); \
  137. } while (0)
  138. static int ocfs2_validate_gd_self(struct super_block *sb,
  139. struct buffer_head *bh,
  140. int clean_error)
  141. {
  142. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  143. if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
  144. do_error("Group descriptor #%llu has bad signature %.*s",
  145. (unsigned long long)bh->b_blocknr, 7,
  146. gd->bg_signature);
  147. return -EINVAL;
  148. }
  149. if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
  150. do_error("Group descriptor #%llu has an invalid bg_blkno "
  151. "of %llu",
  152. (unsigned long long)bh->b_blocknr,
  153. (unsigned long long)le64_to_cpu(gd->bg_blkno));
  154. return -EINVAL;
  155. }
  156. if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
  157. do_error("Group descriptor #%llu has an invalid "
  158. "fs_generation of #%u",
  159. (unsigned long long)bh->b_blocknr,
  160. le32_to_cpu(gd->bg_generation));
  161. return -EINVAL;
  162. }
  163. if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
  164. do_error("Group descriptor #%llu has bit count %u but "
  165. "claims that %u are free",
  166. (unsigned long long)bh->b_blocknr,
  167. le16_to_cpu(gd->bg_bits),
  168. le16_to_cpu(gd->bg_free_bits_count));
  169. return -EINVAL;
  170. }
  171. if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
  172. do_error("Group descriptor #%llu has bit count %u but "
  173. "max bitmap bits of %u",
  174. (unsigned long long)bh->b_blocknr,
  175. le16_to_cpu(gd->bg_bits),
  176. 8 * le16_to_cpu(gd->bg_size));
  177. return -EINVAL;
  178. }
  179. return 0;
  180. }
  181. static int ocfs2_validate_gd_parent(struct super_block *sb,
  182. struct ocfs2_dinode *di,
  183. struct buffer_head *bh,
  184. int clean_error)
  185. {
  186. unsigned int max_bits;
  187. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  188. if (di->i_blkno != gd->bg_parent_dinode) {
  189. do_error("Group descriptor #%llu has bad parent "
  190. "pointer (%llu, expected %llu)",
  191. (unsigned long long)bh->b_blocknr,
  192. (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
  193. (unsigned long long)le64_to_cpu(di->i_blkno));
  194. return -EINVAL;
  195. }
  196. max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
  197. if (le16_to_cpu(gd->bg_bits) > max_bits) {
  198. do_error("Group descriptor #%llu has bit count of %u",
  199. (unsigned long long)bh->b_blocknr,
  200. le16_to_cpu(gd->bg_bits));
  201. return -EINVAL;
  202. }
  203. if (le16_to_cpu(gd->bg_chain) >=
  204. le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
  205. do_error("Group descriptor #%llu has bad chain %u",
  206. (unsigned long long)bh->b_blocknr,
  207. le16_to_cpu(gd->bg_chain));
  208. return -EINVAL;
  209. }
  210. return 0;
  211. }
  212. #undef do_error
  213. /*
  214. * This version only prints errors. It does not fail the filesystem, and
  215. * exists only for resize.
  216. */
  217. int ocfs2_check_group_descriptor(struct super_block *sb,
  218. struct ocfs2_dinode *di,
  219. struct buffer_head *bh)
  220. {
  221. int rc;
  222. rc = ocfs2_validate_gd_self(sb, bh, 1);
  223. if (!rc)
  224. rc = ocfs2_validate_gd_parent(sb, di, bh, 1);
  225. return rc;
  226. }
  227. static int ocfs2_validate_group_descriptor(struct super_block *sb,
  228. struct buffer_head *bh)
  229. {
  230. mlog(0, "Validating group descriptor %llu\n",
  231. (unsigned long long)bh->b_blocknr);
  232. return ocfs2_validate_gd_self(sb, bh, 0);
  233. }
  234. int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di,
  235. u64 gd_blkno, struct buffer_head **bh)
  236. {
  237. int rc;
  238. struct buffer_head *tmp = *bh;
  239. rc = ocfs2_read_block(inode, gd_blkno, &tmp,
  240. ocfs2_validate_group_descriptor);
  241. if (rc)
  242. goto out;
  243. rc = ocfs2_validate_gd_parent(inode->i_sb, di, tmp, 0);
  244. if (rc) {
  245. brelse(tmp);
  246. goto out;
  247. }
  248. /* If ocfs2_read_block() got us a new bh, pass it up. */
  249. if (!*bh)
  250. *bh = tmp;
  251. out:
  252. return rc;
  253. }
  254. static int ocfs2_block_group_fill(handle_t *handle,
  255. struct inode *alloc_inode,
  256. struct buffer_head *bg_bh,
  257. u64 group_blkno,
  258. u16 my_chain,
  259. struct ocfs2_chain_list *cl)
  260. {
  261. int status = 0;
  262. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  263. struct super_block * sb = alloc_inode->i_sb;
  264. mlog_entry_void();
  265. if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
  266. ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
  267. "b_blocknr (%llu)",
  268. (unsigned long long)group_blkno,
  269. (unsigned long long) bg_bh->b_blocknr);
  270. status = -EIO;
  271. goto bail;
  272. }
  273. status = ocfs2_journal_access(handle,
  274. alloc_inode,
  275. bg_bh,
  276. OCFS2_JOURNAL_ACCESS_CREATE);
  277. if (status < 0) {
  278. mlog_errno(status);
  279. goto bail;
  280. }
  281. memset(bg, 0, sb->s_blocksize);
  282. strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
  283. bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  284. bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
  285. bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
  286. bg->bg_chain = cpu_to_le16(my_chain);
  287. bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
  288. bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
  289. bg->bg_blkno = cpu_to_le64(group_blkno);
  290. /* set the 1st bit in the bitmap to account for the descriptor block */
  291. ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
  292. bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
  293. status = ocfs2_journal_dirty(handle, bg_bh);
  294. if (status < 0)
  295. mlog_errno(status);
  296. /* There is no need to zero out or otherwise initialize the
  297. * other blocks in a group - All valid FS metadata in a block
  298. * group stores the superblock fs_generation value at
  299. * allocation time. */
  300. bail:
  301. mlog_exit(status);
  302. return status;
  303. }
  304. static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
  305. {
  306. u16 curr, best;
  307. best = curr = 0;
  308. while (curr < le16_to_cpu(cl->cl_count)) {
  309. if (le32_to_cpu(cl->cl_recs[best].c_total) >
  310. le32_to_cpu(cl->cl_recs[curr].c_total))
  311. best = curr;
  312. curr++;
  313. }
  314. return best;
  315. }
  316. /*
  317. * We expect the block group allocator to already be locked.
  318. */
  319. static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
  320. struct inode *alloc_inode,
  321. struct buffer_head *bh,
  322. u64 max_block)
  323. {
  324. int status, credits;
  325. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
  326. struct ocfs2_chain_list *cl;
  327. struct ocfs2_alloc_context *ac = NULL;
  328. handle_t *handle = NULL;
  329. u32 bit_off, num_bits;
  330. u16 alloc_rec;
  331. u64 bg_blkno;
  332. struct buffer_head *bg_bh = NULL;
  333. struct ocfs2_group_desc *bg;
  334. BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
  335. mlog_entry_void();
  336. cl = &fe->id2.i_chain;
  337. status = ocfs2_reserve_clusters_with_limit(osb,
  338. le16_to_cpu(cl->cl_cpg),
  339. max_block, &ac);
  340. if (status < 0) {
  341. if (status != -ENOSPC)
  342. mlog_errno(status);
  343. goto bail;
  344. }
  345. credits = ocfs2_calc_group_alloc_credits(osb->sb,
  346. le16_to_cpu(cl->cl_cpg));
  347. handle = ocfs2_start_trans(osb, credits);
  348. if (IS_ERR(handle)) {
  349. status = PTR_ERR(handle);
  350. handle = NULL;
  351. mlog_errno(status);
  352. goto bail;
  353. }
  354. status = ocfs2_claim_clusters(osb,
  355. handle,
  356. ac,
  357. le16_to_cpu(cl->cl_cpg),
  358. &bit_off,
  359. &num_bits);
  360. if (status < 0) {
  361. if (status != -ENOSPC)
  362. mlog_errno(status);
  363. goto bail;
  364. }
  365. alloc_rec = ocfs2_find_smallest_chain(cl);
  366. /* setup the group */
  367. bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
  368. mlog(0, "new descriptor, record %u, at block %llu\n",
  369. alloc_rec, (unsigned long long)bg_blkno);
  370. bg_bh = sb_getblk(osb->sb, bg_blkno);
  371. if (!bg_bh) {
  372. status = -EIO;
  373. mlog_errno(status);
  374. goto bail;
  375. }
  376. ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
  377. status = ocfs2_block_group_fill(handle,
  378. alloc_inode,
  379. bg_bh,
  380. bg_blkno,
  381. alloc_rec,
  382. cl);
  383. if (status < 0) {
  384. mlog_errno(status);
  385. goto bail;
  386. }
  387. bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  388. status = ocfs2_journal_access(handle, alloc_inode,
  389. bh, OCFS2_JOURNAL_ACCESS_WRITE);
  390. if (status < 0) {
  391. mlog_errno(status);
  392. goto bail;
  393. }
  394. le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
  395. le16_to_cpu(bg->bg_free_bits_count));
  396. le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
  397. cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
  398. if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
  399. le16_add_cpu(&cl->cl_next_free_rec, 1);
  400. le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
  401. le16_to_cpu(bg->bg_free_bits_count));
  402. le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
  403. le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
  404. status = ocfs2_journal_dirty(handle, bh);
  405. if (status < 0) {
  406. mlog_errno(status);
  407. goto bail;
  408. }
  409. spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
  410. OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
  411. fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
  412. le32_to_cpu(fe->i_clusters)));
  413. spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
  414. i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
  415. alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
  416. status = 0;
  417. bail:
  418. if (handle)
  419. ocfs2_commit_trans(osb, handle);
  420. if (ac)
  421. ocfs2_free_alloc_context(ac);
  422. brelse(bg_bh);
  423. mlog_exit(status);
  424. return status;
  425. }
  426. static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
  427. struct ocfs2_alloc_context *ac,
  428. int type,
  429. u32 slot,
  430. int alloc_new_group)
  431. {
  432. int status;
  433. u32 bits_wanted = ac->ac_bits_wanted;
  434. struct inode *alloc_inode;
  435. struct buffer_head *bh = NULL;
  436. struct ocfs2_dinode *fe;
  437. u32 free_bits;
  438. mlog_entry_void();
  439. alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
  440. if (!alloc_inode) {
  441. mlog_errno(-EINVAL);
  442. return -EINVAL;
  443. }
  444. mutex_lock(&alloc_inode->i_mutex);
  445. status = ocfs2_inode_lock(alloc_inode, &bh, 1);
  446. if (status < 0) {
  447. mutex_unlock(&alloc_inode->i_mutex);
  448. iput(alloc_inode);
  449. mlog_errno(status);
  450. return status;
  451. }
  452. ac->ac_inode = alloc_inode;
  453. ac->ac_alloc_slot = slot;
  454. fe = (struct ocfs2_dinode *) bh->b_data;
  455. /* The bh was validated by the inode read inside
  456. * ocfs2_inode_lock(). Any corruption is a code bug. */
  457. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  458. if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
  459. ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
  460. (unsigned long long)le64_to_cpu(fe->i_blkno));
  461. status = -EIO;
  462. goto bail;
  463. }
  464. free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
  465. le32_to_cpu(fe->id1.bitmap1.i_used);
  466. if (bits_wanted > free_bits) {
  467. /* cluster bitmap never grows */
  468. if (ocfs2_is_cluster_bitmap(alloc_inode)) {
  469. mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
  470. bits_wanted, free_bits);
  471. status = -ENOSPC;
  472. goto bail;
  473. }
  474. if (alloc_new_group != ALLOC_NEW_GROUP) {
  475. mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, "
  476. "and we don't alloc a new group for it.\n",
  477. slot, bits_wanted, free_bits);
  478. status = -ENOSPC;
  479. goto bail;
  480. }
  481. status = ocfs2_block_group_alloc(osb, alloc_inode, bh,
  482. ac->ac_max_block);
  483. if (status < 0) {
  484. if (status != -ENOSPC)
  485. mlog_errno(status);
  486. goto bail;
  487. }
  488. atomic_inc(&osb->alloc_stats.bg_extends);
  489. /* You should never ask for this much metadata */
  490. BUG_ON(bits_wanted >
  491. (le32_to_cpu(fe->id1.bitmap1.i_total)
  492. - le32_to_cpu(fe->id1.bitmap1.i_used)));
  493. }
  494. get_bh(bh);
  495. ac->ac_bh = bh;
  496. bail:
  497. brelse(bh);
  498. mlog_exit(status);
  499. return status;
  500. }
  501. int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
  502. int blocks,
  503. struct ocfs2_alloc_context **ac)
  504. {
  505. int status;
  506. u32 slot;
  507. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  508. if (!(*ac)) {
  509. status = -ENOMEM;
  510. mlog_errno(status);
  511. goto bail;
  512. }
  513. (*ac)->ac_bits_wanted = blocks;
  514. (*ac)->ac_which = OCFS2_AC_USE_META;
  515. slot = osb->slot_num;
  516. (*ac)->ac_group_search = ocfs2_block_group_search;
  517. status = ocfs2_reserve_suballoc_bits(osb, (*ac),
  518. EXTENT_ALLOC_SYSTEM_INODE,
  519. slot, ALLOC_NEW_GROUP);
  520. if (status < 0) {
  521. if (status != -ENOSPC)
  522. mlog_errno(status);
  523. goto bail;
  524. }
  525. status = 0;
  526. bail:
  527. if ((status < 0) && *ac) {
  528. ocfs2_free_alloc_context(*ac);
  529. *ac = NULL;
  530. }
  531. mlog_exit(status);
  532. return status;
  533. }
  534. int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
  535. struct ocfs2_extent_list *root_el,
  536. struct ocfs2_alloc_context **ac)
  537. {
  538. return ocfs2_reserve_new_metadata_blocks(osb,
  539. ocfs2_extend_meta_needed(root_el),
  540. ac);
  541. }
  542. static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
  543. struct ocfs2_alloc_context *ac)
  544. {
  545. int i, status = -ENOSPC;
  546. s16 slot = ocfs2_get_inode_steal_slot(osb);
  547. /* Start to steal inodes from the first slot after ours. */
  548. if (slot == OCFS2_INVALID_SLOT)
  549. slot = osb->slot_num + 1;
  550. for (i = 0; i < osb->max_slots; i++, slot++) {
  551. if (slot == osb->max_slots)
  552. slot = 0;
  553. if (slot == osb->slot_num)
  554. continue;
  555. status = ocfs2_reserve_suballoc_bits(osb, ac,
  556. INODE_ALLOC_SYSTEM_INODE,
  557. slot, NOT_ALLOC_NEW_GROUP);
  558. if (status >= 0) {
  559. ocfs2_set_inode_steal_slot(osb, slot);
  560. break;
  561. }
  562. ocfs2_free_ac_resource(ac);
  563. }
  564. return status;
  565. }
  566. int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
  567. struct ocfs2_alloc_context **ac)
  568. {
  569. int status;
  570. s16 slot = ocfs2_get_inode_steal_slot(osb);
  571. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  572. if (!(*ac)) {
  573. status = -ENOMEM;
  574. mlog_errno(status);
  575. goto bail;
  576. }
  577. (*ac)->ac_bits_wanted = 1;
  578. (*ac)->ac_which = OCFS2_AC_USE_INODE;
  579. (*ac)->ac_group_search = ocfs2_block_group_search;
  580. /*
  581. * stat(2) can't handle i_ino > 32bits, so we tell the
  582. * lower levels not to allocate us a block group past that
  583. * limit. The 'inode64' mount option avoids this behavior.
  584. */
  585. if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64))
  586. (*ac)->ac_max_block = (u32)~0U;
  587. /*
  588. * slot is set when we successfully steal inode from other nodes.
  589. * It is reset in 3 places:
  590. * 1. when we flush the truncate log
  591. * 2. when we complete local alloc recovery.
  592. * 3. when we successfully allocate from our own slot.
  593. * After it is set, we will go on stealing inodes until we find the
  594. * need to check our slots to see whether there is some space for us.
  595. */
  596. if (slot != OCFS2_INVALID_SLOT &&
  597. atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
  598. goto inode_steal;
  599. atomic_set(&osb->s_num_inodes_stolen, 0);
  600. status = ocfs2_reserve_suballoc_bits(osb, *ac,
  601. INODE_ALLOC_SYSTEM_INODE,
  602. osb->slot_num, ALLOC_NEW_GROUP);
  603. if (status >= 0) {
  604. status = 0;
  605. /*
  606. * Some inodes must be freed by us, so try to allocate
  607. * from our own next time.
  608. */
  609. if (slot != OCFS2_INVALID_SLOT)
  610. ocfs2_init_inode_steal_slot(osb);
  611. goto bail;
  612. } else if (status < 0 && status != -ENOSPC) {
  613. mlog_errno(status);
  614. goto bail;
  615. }
  616. ocfs2_free_ac_resource(*ac);
  617. inode_steal:
  618. status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
  619. atomic_inc(&osb->s_num_inodes_stolen);
  620. if (status < 0) {
  621. if (status != -ENOSPC)
  622. mlog_errno(status);
  623. goto bail;
  624. }
  625. status = 0;
  626. bail:
  627. if ((status < 0) && *ac) {
  628. ocfs2_free_alloc_context(*ac);
  629. *ac = NULL;
  630. }
  631. mlog_exit(status);
  632. return status;
  633. }
  634. /* local alloc code has to do the same thing, so rather than do this
  635. * twice.. */
  636. int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
  637. struct ocfs2_alloc_context *ac)
  638. {
  639. int status;
  640. ac->ac_which = OCFS2_AC_USE_MAIN;
  641. ac->ac_group_search = ocfs2_cluster_group_search;
  642. status = ocfs2_reserve_suballoc_bits(osb, ac,
  643. GLOBAL_BITMAP_SYSTEM_INODE,
  644. OCFS2_INVALID_SLOT,
  645. ALLOC_NEW_GROUP);
  646. if (status < 0 && status != -ENOSPC) {
  647. mlog_errno(status);
  648. goto bail;
  649. }
  650. bail:
  651. return status;
  652. }
  653. /* Callers don't need to care which bitmap (local alloc or main) to
  654. * use so we figure it out for them, but unfortunately this clutters
  655. * things a bit. */
  656. static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
  657. u32 bits_wanted, u64 max_block,
  658. struct ocfs2_alloc_context **ac)
  659. {
  660. int status;
  661. mlog_entry_void();
  662. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  663. if (!(*ac)) {
  664. status = -ENOMEM;
  665. mlog_errno(status);
  666. goto bail;
  667. }
  668. (*ac)->ac_bits_wanted = bits_wanted;
  669. (*ac)->ac_max_block = max_block;
  670. status = -ENOSPC;
  671. if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
  672. status = ocfs2_reserve_local_alloc_bits(osb,
  673. bits_wanted,
  674. *ac);
  675. if (status == -EFBIG) {
  676. /* The local alloc window is outside ac_max_block.
  677. * use the main bitmap. */
  678. status = -ENOSPC;
  679. } else if ((status < 0) && (status != -ENOSPC)) {
  680. mlog_errno(status);
  681. goto bail;
  682. }
  683. }
  684. if (status == -ENOSPC) {
  685. status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
  686. if (status < 0) {
  687. if (status != -ENOSPC)
  688. mlog_errno(status);
  689. goto bail;
  690. }
  691. }
  692. status = 0;
  693. bail:
  694. if ((status < 0) && *ac) {
  695. ocfs2_free_alloc_context(*ac);
  696. *ac = NULL;
  697. }
  698. mlog_exit(status);
  699. return status;
  700. }
  701. int ocfs2_reserve_clusters(struct ocfs2_super *osb,
  702. u32 bits_wanted,
  703. struct ocfs2_alloc_context **ac)
  704. {
  705. return ocfs2_reserve_clusters_with_limit(osb, bits_wanted, 0, ac);
  706. }
  707. /*
  708. * More or less lifted from ext3. I'll leave their description below:
  709. *
  710. * "For ext3 allocations, we must not reuse any blocks which are
  711. * allocated in the bitmap buffer's "last committed data" copy. This
  712. * prevents deletes from freeing up the page for reuse until we have
  713. * committed the delete transaction.
  714. *
  715. * If we didn't do this, then deleting something and reallocating it as
  716. * data would allow the old block to be overwritten before the
  717. * transaction committed (because we force data to disk before commit).
  718. * This would lead to corruption if we crashed between overwriting the
  719. * data and committing the delete.
  720. *
  721. * @@@ We may want to make this allocation behaviour conditional on
  722. * data-writes at some point, and disable it for metadata allocations or
  723. * sync-data inodes."
  724. *
  725. * Note: OCFS2 already does this differently for metadata vs data
  726. * allocations, as those bitmaps are separate and undo access is never
  727. * called on a metadata group descriptor.
  728. */
  729. static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
  730. int nr)
  731. {
  732. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  733. if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
  734. return 0;
  735. if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
  736. return 1;
  737. bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
  738. return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
  739. }
  740. static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
  741. struct buffer_head *bg_bh,
  742. unsigned int bits_wanted,
  743. unsigned int total_bits,
  744. u16 *bit_off,
  745. u16 *bits_found)
  746. {
  747. void *bitmap;
  748. u16 best_offset, best_size;
  749. int offset, start, found, status = 0;
  750. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  751. /* Callers got this descriptor from
  752. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  753. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  754. found = start = best_offset = best_size = 0;
  755. bitmap = bg->bg_bitmap;
  756. while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
  757. if (offset == total_bits)
  758. break;
  759. if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
  760. /* We found a zero, but we can't use it as it
  761. * hasn't been put to disk yet! */
  762. found = 0;
  763. start = offset + 1;
  764. } else if (offset == start) {
  765. /* we found a zero */
  766. found++;
  767. /* move start to the next bit to test */
  768. start++;
  769. } else {
  770. /* got a zero after some ones */
  771. found = 1;
  772. start = offset + 1;
  773. }
  774. if (found > best_size) {
  775. best_size = found;
  776. best_offset = start - found;
  777. }
  778. /* we got everything we needed */
  779. if (found == bits_wanted) {
  780. /* mlog(0, "Found it all!\n"); */
  781. break;
  782. }
  783. }
  784. /* XXX: I think the first clause is equivalent to the second
  785. * - jlbec */
  786. if (found == bits_wanted) {
  787. *bit_off = start - found;
  788. *bits_found = found;
  789. } else if (best_size) {
  790. *bit_off = best_offset;
  791. *bits_found = best_size;
  792. } else {
  793. status = -ENOSPC;
  794. /* No error log here -- see the comment above
  795. * ocfs2_test_bg_bit_allocatable */
  796. }
  797. return status;
  798. }
  799. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  800. struct inode *alloc_inode,
  801. struct ocfs2_group_desc *bg,
  802. struct buffer_head *group_bh,
  803. unsigned int bit_off,
  804. unsigned int num_bits)
  805. {
  806. int status;
  807. void *bitmap = bg->bg_bitmap;
  808. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  809. mlog_entry_void();
  810. /* All callers get the descriptor via
  811. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  812. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  813. BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
  814. mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
  815. num_bits);
  816. if (ocfs2_is_cluster_bitmap(alloc_inode))
  817. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  818. status = ocfs2_journal_access(handle,
  819. alloc_inode,
  820. group_bh,
  821. journal_type);
  822. if (status < 0) {
  823. mlog_errno(status);
  824. goto bail;
  825. }
  826. le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
  827. while(num_bits--)
  828. ocfs2_set_bit(bit_off++, bitmap);
  829. status = ocfs2_journal_dirty(handle,
  830. group_bh);
  831. if (status < 0) {
  832. mlog_errno(status);
  833. goto bail;
  834. }
  835. bail:
  836. mlog_exit(status);
  837. return status;
  838. }
  839. /* find the one with the most empty bits */
  840. static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
  841. {
  842. u16 curr, best;
  843. BUG_ON(!cl->cl_next_free_rec);
  844. best = curr = 0;
  845. while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
  846. if (le32_to_cpu(cl->cl_recs[curr].c_free) >
  847. le32_to_cpu(cl->cl_recs[best].c_free))
  848. best = curr;
  849. curr++;
  850. }
  851. BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
  852. return best;
  853. }
  854. static int ocfs2_relink_block_group(handle_t *handle,
  855. struct inode *alloc_inode,
  856. struct buffer_head *fe_bh,
  857. struct buffer_head *bg_bh,
  858. struct buffer_head *prev_bg_bh,
  859. u16 chain)
  860. {
  861. int status;
  862. /* there is a really tiny chance the journal calls could fail,
  863. * but we wouldn't want inconsistent blocks in *any* case. */
  864. u64 fe_ptr, bg_ptr, prev_bg_ptr;
  865. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
  866. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  867. struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
  868. /* The caller got these descriptors from
  869. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  870. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  871. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
  872. mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n",
  873. (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
  874. (unsigned long long)le64_to_cpu(bg->bg_blkno),
  875. (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
  876. fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
  877. bg_ptr = le64_to_cpu(bg->bg_next_group);
  878. prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
  879. status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh,
  880. OCFS2_JOURNAL_ACCESS_WRITE);
  881. if (status < 0) {
  882. mlog_errno(status);
  883. goto out_rollback;
  884. }
  885. prev_bg->bg_next_group = bg->bg_next_group;
  886. status = ocfs2_journal_dirty(handle, prev_bg_bh);
  887. if (status < 0) {
  888. mlog_errno(status);
  889. goto out_rollback;
  890. }
  891. status = ocfs2_journal_access(handle, alloc_inode, bg_bh,
  892. OCFS2_JOURNAL_ACCESS_WRITE);
  893. if (status < 0) {
  894. mlog_errno(status);
  895. goto out_rollback;
  896. }
  897. bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
  898. status = ocfs2_journal_dirty(handle, bg_bh);
  899. if (status < 0) {
  900. mlog_errno(status);
  901. goto out_rollback;
  902. }
  903. status = ocfs2_journal_access(handle, alloc_inode, fe_bh,
  904. OCFS2_JOURNAL_ACCESS_WRITE);
  905. if (status < 0) {
  906. mlog_errno(status);
  907. goto out_rollback;
  908. }
  909. fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
  910. status = ocfs2_journal_dirty(handle, fe_bh);
  911. if (status < 0) {
  912. mlog_errno(status);
  913. goto out_rollback;
  914. }
  915. status = 0;
  916. out_rollback:
  917. if (status < 0) {
  918. fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
  919. bg->bg_next_group = cpu_to_le64(bg_ptr);
  920. prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
  921. }
  922. mlog_exit(status);
  923. return status;
  924. }
  925. static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
  926. u32 wanted)
  927. {
  928. return le16_to_cpu(bg->bg_free_bits_count) > wanted;
  929. }
  930. /* return 0 on success, -ENOSPC to keep searching and any other < 0
  931. * value on error. */
  932. static int ocfs2_cluster_group_search(struct inode *inode,
  933. struct buffer_head *group_bh,
  934. u32 bits_wanted, u32 min_bits,
  935. u64 max_block,
  936. u16 *bit_off, u16 *bits_found)
  937. {
  938. int search = -ENOSPC;
  939. int ret;
  940. u64 blkoff;
  941. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
  942. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  943. u16 tmp_off, tmp_found;
  944. unsigned int max_bits, gd_cluster_off;
  945. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  946. if (gd->bg_free_bits_count) {
  947. max_bits = le16_to_cpu(gd->bg_bits);
  948. /* Tail groups in cluster bitmaps which aren't cpg
  949. * aligned are prone to partial extention by a failed
  950. * fs resize. If the file system resize never got to
  951. * update the dinode cluster count, then we don't want
  952. * to trust any clusters past it, regardless of what
  953. * the group descriptor says. */
  954. gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
  955. le64_to_cpu(gd->bg_blkno));
  956. if ((gd_cluster_off + max_bits) >
  957. OCFS2_I(inode)->ip_clusters) {
  958. max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
  959. mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
  960. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  961. le16_to_cpu(gd->bg_bits),
  962. OCFS2_I(inode)->ip_clusters, max_bits);
  963. }
  964. ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
  965. group_bh, bits_wanted,
  966. max_bits,
  967. &tmp_off, &tmp_found);
  968. if (ret)
  969. return ret;
  970. if (max_block) {
  971. blkoff = ocfs2_clusters_to_blocks(inode->i_sb,
  972. gd_cluster_off +
  973. tmp_off + tmp_found);
  974. mlog(0, "Checking %llu against %llu\n",
  975. (unsigned long long)blkoff,
  976. (unsigned long long)max_block);
  977. if (blkoff > max_block)
  978. return -ENOSPC;
  979. }
  980. /* ocfs2_block_group_find_clear_bits() might
  981. * return success, but we still want to return
  982. * -ENOSPC unless it found the minimum number
  983. * of bits. */
  984. if (min_bits <= tmp_found) {
  985. *bit_off = tmp_off;
  986. *bits_found = tmp_found;
  987. search = 0; /* success */
  988. } else if (tmp_found) {
  989. /*
  990. * Don't show bits which we'll be returning
  991. * for allocation to the local alloc bitmap.
  992. */
  993. ocfs2_local_alloc_seen_free_bits(osb, tmp_found);
  994. }
  995. }
  996. return search;
  997. }
  998. static int ocfs2_block_group_search(struct inode *inode,
  999. struct buffer_head *group_bh,
  1000. u32 bits_wanted, u32 min_bits,
  1001. u64 max_block,
  1002. u16 *bit_off, u16 *bits_found)
  1003. {
  1004. int ret = -ENOSPC;
  1005. u64 blkoff;
  1006. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1007. BUG_ON(min_bits != 1);
  1008. BUG_ON(ocfs2_is_cluster_bitmap(inode));
  1009. if (bg->bg_free_bits_count) {
  1010. ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
  1011. group_bh, bits_wanted,
  1012. le16_to_cpu(bg->bg_bits),
  1013. bit_off, bits_found);
  1014. if (!ret && max_block) {
  1015. blkoff = le64_to_cpu(bg->bg_blkno) + *bit_off +
  1016. *bits_found;
  1017. mlog(0, "Checking %llu against %llu\n",
  1018. (unsigned long long)blkoff,
  1019. (unsigned long long)max_block);
  1020. if (blkoff > max_block)
  1021. ret = -ENOSPC;
  1022. }
  1023. }
  1024. return ret;
  1025. }
  1026. static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
  1027. handle_t *handle,
  1028. struct buffer_head *di_bh,
  1029. u32 num_bits,
  1030. u16 chain)
  1031. {
  1032. int ret;
  1033. u32 tmp_used;
  1034. struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
  1035. struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
  1036. ret = ocfs2_journal_access(handle, inode, di_bh,
  1037. OCFS2_JOURNAL_ACCESS_WRITE);
  1038. if (ret < 0) {
  1039. mlog_errno(ret);
  1040. goto out;
  1041. }
  1042. tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
  1043. di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
  1044. le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
  1045. ret = ocfs2_journal_dirty(handle, di_bh);
  1046. if (ret < 0)
  1047. mlog_errno(ret);
  1048. out:
  1049. return ret;
  1050. }
  1051. static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
  1052. handle_t *handle,
  1053. u32 bits_wanted,
  1054. u32 min_bits,
  1055. u16 *bit_off,
  1056. unsigned int *num_bits,
  1057. u64 gd_blkno,
  1058. u16 *bits_left)
  1059. {
  1060. int ret;
  1061. u16 found;
  1062. struct buffer_head *group_bh = NULL;
  1063. struct ocfs2_group_desc *gd;
  1064. struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
  1065. struct inode *alloc_inode = ac->ac_inode;
  1066. ret = ocfs2_read_group_descriptor(alloc_inode, di, gd_blkno,
  1067. &group_bh);
  1068. if (ret < 0) {
  1069. mlog_errno(ret);
  1070. return ret;
  1071. }
  1072. gd = (struct ocfs2_group_desc *) group_bh->b_data;
  1073. ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
  1074. ac->ac_max_block, bit_off, &found);
  1075. if (ret < 0) {
  1076. if (ret != -ENOSPC)
  1077. mlog_errno(ret);
  1078. goto out;
  1079. }
  1080. *num_bits = found;
  1081. ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
  1082. *num_bits,
  1083. le16_to_cpu(gd->bg_chain));
  1084. if (ret < 0) {
  1085. mlog_errno(ret);
  1086. goto out;
  1087. }
  1088. ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
  1089. *bit_off, *num_bits);
  1090. if (ret < 0)
  1091. mlog_errno(ret);
  1092. *bits_left = le16_to_cpu(gd->bg_free_bits_count);
  1093. out:
  1094. brelse(group_bh);
  1095. return ret;
  1096. }
  1097. static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
  1098. handle_t *handle,
  1099. u32 bits_wanted,
  1100. u32 min_bits,
  1101. u16 *bit_off,
  1102. unsigned int *num_bits,
  1103. u64 *bg_blkno,
  1104. u16 *bits_left)
  1105. {
  1106. int status;
  1107. u16 chain, tmp_bits;
  1108. u32 tmp_used;
  1109. u64 next_group;
  1110. struct inode *alloc_inode = ac->ac_inode;
  1111. struct buffer_head *group_bh = NULL;
  1112. struct buffer_head *prev_group_bh = NULL;
  1113. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
  1114. struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
  1115. struct ocfs2_group_desc *bg;
  1116. chain = ac->ac_chain;
  1117. mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
  1118. bits_wanted, chain,
  1119. (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);
  1120. status = ocfs2_read_group_descriptor(alloc_inode, fe,
  1121. le64_to_cpu(cl->cl_recs[chain].c_blkno),
  1122. &group_bh);
  1123. if (status < 0) {
  1124. mlog_errno(status);
  1125. goto bail;
  1126. }
  1127. bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1128. status = -ENOSPC;
  1129. /* for now, the chain search is a bit simplistic. We just use
  1130. * the 1st group with any empty bits. */
  1131. while ((status = ac->ac_group_search(alloc_inode, group_bh,
  1132. bits_wanted, min_bits,
  1133. ac->ac_max_block, bit_off,
  1134. &tmp_bits)) == -ENOSPC) {
  1135. if (!bg->bg_next_group)
  1136. break;
  1137. brelse(prev_group_bh);
  1138. prev_group_bh = NULL;
  1139. next_group = le64_to_cpu(bg->bg_next_group);
  1140. prev_group_bh = group_bh;
  1141. group_bh = NULL;
  1142. status = ocfs2_read_group_descriptor(alloc_inode, fe,
  1143. next_group, &group_bh);
  1144. if (status < 0) {
  1145. mlog_errno(status);
  1146. goto bail;
  1147. }
  1148. bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1149. }
  1150. if (status < 0) {
  1151. if (status != -ENOSPC)
  1152. mlog_errno(status);
  1153. goto bail;
  1154. }
  1155. mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
  1156. tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
  1157. *num_bits = tmp_bits;
  1158. BUG_ON(*num_bits == 0);
  1159. /*
  1160. * Keep track of previous block descriptor read. When
  1161. * we find a target, if we have read more than X
  1162. * number of descriptors, and the target is reasonably
  1163. * empty, relink him to top of his chain.
  1164. *
  1165. * We've read 0 extra blocks and only send one more to
  1166. * the transaction, yet the next guy to search has a
  1167. * much easier time.
  1168. *
  1169. * Do this *after* figuring out how many bits we're taking out
  1170. * of our target group.
  1171. */
  1172. if (ac->ac_allow_chain_relink &&
  1173. (prev_group_bh) &&
  1174. (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
  1175. status = ocfs2_relink_block_group(handle, alloc_inode,
  1176. ac->ac_bh, group_bh,
  1177. prev_group_bh, chain);
  1178. if (status < 0) {
  1179. mlog_errno(status);
  1180. goto bail;
  1181. }
  1182. }
  1183. /* Ok, claim our bits now: set the info on dinode, chainlist
  1184. * and then the group */
  1185. status = ocfs2_journal_access(handle,
  1186. alloc_inode,
  1187. ac->ac_bh,
  1188. OCFS2_JOURNAL_ACCESS_WRITE);
  1189. if (status < 0) {
  1190. mlog_errno(status);
  1191. goto bail;
  1192. }
  1193. tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
  1194. fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
  1195. le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
  1196. status = ocfs2_journal_dirty(handle,
  1197. ac->ac_bh);
  1198. if (status < 0) {
  1199. mlog_errno(status);
  1200. goto bail;
  1201. }
  1202. status = ocfs2_block_group_set_bits(handle,
  1203. alloc_inode,
  1204. bg,
  1205. group_bh,
  1206. *bit_off,
  1207. *num_bits);
  1208. if (status < 0) {
  1209. mlog_errno(status);
  1210. goto bail;
  1211. }
  1212. mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
  1213. (unsigned long long)le64_to_cpu(fe->i_blkno));
  1214. *bg_blkno = le64_to_cpu(bg->bg_blkno);
  1215. *bits_left = le16_to_cpu(bg->bg_free_bits_count);
  1216. bail:
  1217. brelse(group_bh);
  1218. brelse(prev_group_bh);
  1219. mlog_exit(status);
  1220. return status;
  1221. }
  1222. /* will give out up to bits_wanted contiguous bits. */
  1223. static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
  1224. struct ocfs2_alloc_context *ac,
  1225. handle_t *handle,
  1226. u32 bits_wanted,
  1227. u32 min_bits,
  1228. u16 *bit_off,
  1229. unsigned int *num_bits,
  1230. u64 *bg_blkno)
  1231. {
  1232. int status;
  1233. u16 victim, i;
  1234. u16 bits_left = 0;
  1235. u64 hint_blkno = ac->ac_last_group;
  1236. struct ocfs2_chain_list *cl;
  1237. struct ocfs2_dinode *fe;
  1238. mlog_entry_void();
  1239. BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
  1240. BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
  1241. BUG_ON(!ac->ac_bh);
  1242. fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
  1243. /* The bh was validated by the inode read during
  1244. * ocfs2_reserve_suballoc_bits(). Any corruption is a code bug. */
  1245. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  1246. if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
  1247. le32_to_cpu(fe->id1.bitmap1.i_total)) {
  1248. ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used "
  1249. "bits but only %u total.",
  1250. (unsigned long long)le64_to_cpu(fe->i_blkno),
  1251. le32_to_cpu(fe->id1.bitmap1.i_used),
  1252. le32_to_cpu(fe->id1.bitmap1.i_total));
  1253. status = -EIO;
  1254. goto bail;
  1255. }
  1256. if (hint_blkno) {
  1257. /* Attempt to short-circuit the usual search mechanism
  1258. * by jumping straight to the most recently used
  1259. * allocation group. This helps us mantain some
  1260. * contiguousness across allocations. */
  1261. status = ocfs2_search_one_group(ac, handle, bits_wanted,
  1262. min_bits, bit_off, num_bits,
  1263. hint_blkno, &bits_left);
  1264. if (!status) {
  1265. /* Be careful to update *bg_blkno here as the
  1266. * caller is expecting it to be filled in, and
  1267. * ocfs2_search_one_group() won't do that for
  1268. * us. */
  1269. *bg_blkno = hint_blkno;
  1270. goto set_hint;
  1271. }
  1272. if (status < 0 && status != -ENOSPC) {
  1273. mlog_errno(status);
  1274. goto bail;
  1275. }
  1276. }
  1277. cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
  1278. victim = ocfs2_find_victim_chain(cl);
  1279. ac->ac_chain = victim;
  1280. ac->ac_allow_chain_relink = 1;
  1281. status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
  1282. num_bits, bg_blkno, &bits_left);
  1283. if (!status)
  1284. goto set_hint;
  1285. if (status < 0 && status != -ENOSPC) {
  1286. mlog_errno(status);
  1287. goto bail;
  1288. }
  1289. mlog(0, "Search of victim chain %u came up with nothing, "
  1290. "trying all chains now.\n", victim);
  1291. /* If we didn't pick a good victim, then just default to
  1292. * searching each chain in order. Don't allow chain relinking
  1293. * because we only calculate enough journal credits for one
  1294. * relink per alloc. */
  1295. ac->ac_allow_chain_relink = 0;
  1296. for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
  1297. if (i == victim)
  1298. continue;
  1299. if (!cl->cl_recs[i].c_free)
  1300. continue;
  1301. ac->ac_chain = i;
  1302. status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
  1303. bit_off, num_bits, bg_blkno,
  1304. &bits_left);
  1305. if (!status)
  1306. break;
  1307. if (status < 0 && status != -ENOSPC) {
  1308. mlog_errno(status);
  1309. goto bail;
  1310. }
  1311. }
  1312. set_hint:
  1313. if (status != -ENOSPC) {
  1314. /* If the next search of this group is not likely to
  1315. * yield a suitable extent, then we reset the last
  1316. * group hint so as to not waste a disk read */
  1317. if (bits_left < min_bits)
  1318. ac->ac_last_group = 0;
  1319. else
  1320. ac->ac_last_group = *bg_blkno;
  1321. }
  1322. bail:
  1323. mlog_exit(status);
  1324. return status;
  1325. }
  1326. int ocfs2_claim_metadata(struct ocfs2_super *osb,
  1327. handle_t *handle,
  1328. struct ocfs2_alloc_context *ac,
  1329. u32 bits_wanted,
  1330. u16 *suballoc_bit_start,
  1331. unsigned int *num_bits,
  1332. u64 *blkno_start)
  1333. {
  1334. int status;
  1335. u64 bg_blkno;
  1336. BUG_ON(!ac);
  1337. BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
  1338. BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
  1339. status = ocfs2_claim_suballoc_bits(osb,
  1340. ac,
  1341. handle,
  1342. bits_wanted,
  1343. 1,
  1344. suballoc_bit_start,
  1345. num_bits,
  1346. &bg_blkno);
  1347. if (status < 0) {
  1348. mlog_errno(status);
  1349. goto bail;
  1350. }
  1351. atomic_inc(&osb->alloc_stats.bg_allocs);
  1352. *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
  1353. ac->ac_bits_given += (*num_bits);
  1354. status = 0;
  1355. bail:
  1356. mlog_exit(status);
  1357. return status;
  1358. }
  1359. int ocfs2_claim_new_inode(struct ocfs2_super *osb,
  1360. handle_t *handle,
  1361. struct ocfs2_alloc_context *ac,
  1362. u16 *suballoc_bit,
  1363. u64 *fe_blkno)
  1364. {
  1365. int status;
  1366. unsigned int num_bits;
  1367. u64 bg_blkno;
  1368. mlog_entry_void();
  1369. BUG_ON(!ac);
  1370. BUG_ON(ac->ac_bits_given != 0);
  1371. BUG_ON(ac->ac_bits_wanted != 1);
  1372. BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
  1373. status = ocfs2_claim_suballoc_bits(osb,
  1374. ac,
  1375. handle,
  1376. 1,
  1377. 1,
  1378. suballoc_bit,
  1379. &num_bits,
  1380. &bg_blkno);
  1381. if (status < 0) {
  1382. mlog_errno(status);
  1383. goto bail;
  1384. }
  1385. atomic_inc(&osb->alloc_stats.bg_allocs);
  1386. BUG_ON(num_bits != 1);
  1387. *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
  1388. ac->ac_bits_given++;
  1389. status = 0;
  1390. bail:
  1391. mlog_exit(status);
  1392. return status;
  1393. }
  1394. /* translate a group desc. blkno and it's bitmap offset into
  1395. * disk cluster offset. */
  1396. static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
  1397. u64 bg_blkno,
  1398. u16 bg_bit_off)
  1399. {
  1400. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1401. u32 cluster = 0;
  1402. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1403. if (bg_blkno != osb->first_cluster_group_blkno)
  1404. cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
  1405. cluster += (u32) bg_bit_off;
  1406. return cluster;
  1407. }
  1408. /* given a cluster offset, calculate which block group it belongs to
  1409. * and return that block offset. */
  1410. u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster)
  1411. {
  1412. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1413. u32 group_no;
  1414. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1415. group_no = cluster / osb->bitmap_cpg;
  1416. if (!group_no)
  1417. return osb->first_cluster_group_blkno;
  1418. return ocfs2_clusters_to_blocks(inode->i_sb,
  1419. group_no * osb->bitmap_cpg);
  1420. }
  1421. /* given the block number of a cluster start, calculate which cluster
  1422. * group and descriptor bitmap offset that corresponds to. */
  1423. static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  1424. u64 data_blkno,
  1425. u64 *bg_blkno,
  1426. u16 *bg_bit_off)
  1427. {
  1428. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1429. u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
  1430. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1431. *bg_blkno = ocfs2_which_cluster_group(inode,
  1432. data_cluster);
  1433. if (*bg_blkno == osb->first_cluster_group_blkno)
  1434. *bg_bit_off = (u16) data_cluster;
  1435. else
  1436. *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
  1437. data_blkno - *bg_blkno);
  1438. }
  1439. /*
  1440. * min_bits - minimum contiguous chunk from this total allocation we
  1441. * can handle. set to what we asked for originally for a full
  1442. * contig. allocation, set to '1' to indicate we can deal with extents
  1443. * of any size.
  1444. */
  1445. int __ocfs2_claim_clusters(struct ocfs2_super *osb,
  1446. handle_t *handle,
  1447. struct ocfs2_alloc_context *ac,
  1448. u32 min_clusters,
  1449. u32 max_clusters,
  1450. u32 *cluster_start,
  1451. u32 *num_clusters)
  1452. {
  1453. int status;
  1454. unsigned int bits_wanted = max_clusters;
  1455. u64 bg_blkno = 0;
  1456. u16 bg_bit_off;
  1457. mlog_entry_void();
  1458. BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
  1459. BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
  1460. && ac->ac_which != OCFS2_AC_USE_MAIN);
  1461. if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
  1462. status = ocfs2_claim_local_alloc_bits(osb,
  1463. handle,
  1464. ac,
  1465. bits_wanted,
  1466. cluster_start,
  1467. num_clusters);
  1468. if (!status)
  1469. atomic_inc(&osb->alloc_stats.local_data);
  1470. } else {
  1471. if (min_clusters > (osb->bitmap_cpg - 1)) {
  1472. /* The only paths asking for contiguousness
  1473. * should know about this already. */
  1474. mlog(ML_ERROR, "minimum allocation requested %u exceeds "
  1475. "group bitmap size %u!\n", min_clusters,
  1476. osb->bitmap_cpg);
  1477. status = -ENOSPC;
  1478. goto bail;
  1479. }
  1480. /* clamp the current request down to a realistic size. */
  1481. if (bits_wanted > (osb->bitmap_cpg - 1))
  1482. bits_wanted = osb->bitmap_cpg - 1;
  1483. status = ocfs2_claim_suballoc_bits(osb,
  1484. ac,
  1485. handle,
  1486. bits_wanted,
  1487. min_clusters,
  1488. &bg_bit_off,
  1489. num_clusters,
  1490. &bg_blkno);
  1491. if (!status) {
  1492. *cluster_start =
  1493. ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
  1494. bg_blkno,
  1495. bg_bit_off);
  1496. atomic_inc(&osb->alloc_stats.bitmap_data);
  1497. }
  1498. }
  1499. if (status < 0) {
  1500. if (status != -ENOSPC)
  1501. mlog_errno(status);
  1502. goto bail;
  1503. }
  1504. ac->ac_bits_given += *num_clusters;
  1505. bail:
  1506. mlog_exit(status);
  1507. return status;
  1508. }
  1509. int ocfs2_claim_clusters(struct ocfs2_super *osb,
  1510. handle_t *handle,
  1511. struct ocfs2_alloc_context *ac,
  1512. u32 min_clusters,
  1513. u32 *cluster_start,
  1514. u32 *num_clusters)
  1515. {
  1516. unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
  1517. return __ocfs2_claim_clusters(osb, handle, ac, min_clusters,
  1518. bits_wanted, cluster_start, num_clusters);
  1519. }
  1520. static inline int ocfs2_block_group_clear_bits(handle_t *handle,
  1521. struct inode *alloc_inode,
  1522. struct ocfs2_group_desc *bg,
  1523. struct buffer_head *group_bh,
  1524. unsigned int bit_off,
  1525. unsigned int num_bits)
  1526. {
  1527. int status;
  1528. unsigned int tmp;
  1529. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  1530. struct ocfs2_group_desc *undo_bg = NULL;
  1531. mlog_entry_void();
  1532. /* The caller got this descriptor from
  1533. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  1534. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  1535. mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
  1536. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1537. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  1538. status = ocfs2_journal_access(handle, alloc_inode, group_bh,
  1539. journal_type);
  1540. if (status < 0) {
  1541. mlog_errno(status);
  1542. goto bail;
  1543. }
  1544. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1545. undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
  1546. tmp = num_bits;
  1547. while(tmp--) {
  1548. ocfs2_clear_bit((bit_off + tmp),
  1549. (unsigned long *) bg->bg_bitmap);
  1550. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1551. ocfs2_set_bit(bit_off + tmp,
  1552. (unsigned long *) undo_bg->bg_bitmap);
  1553. }
  1554. le16_add_cpu(&bg->bg_free_bits_count, num_bits);
  1555. status = ocfs2_journal_dirty(handle, group_bh);
  1556. if (status < 0)
  1557. mlog_errno(status);
  1558. bail:
  1559. return status;
  1560. }
  1561. /*
  1562. * expects the suballoc inode to already be locked.
  1563. */
  1564. int ocfs2_free_suballoc_bits(handle_t *handle,
  1565. struct inode *alloc_inode,
  1566. struct buffer_head *alloc_bh,
  1567. unsigned int start_bit,
  1568. u64 bg_blkno,
  1569. unsigned int count)
  1570. {
  1571. int status = 0;
  1572. u32 tmp_used;
  1573. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
  1574. struct ocfs2_chain_list *cl = &fe->id2.i_chain;
  1575. struct buffer_head *group_bh = NULL;
  1576. struct ocfs2_group_desc *group;
  1577. mlog_entry_void();
  1578. /* The alloc_bh comes from ocfs2_free_dinode() or
  1579. * ocfs2_free_clusters(). The callers have all locked the
  1580. * allocator and gotten alloc_bh from the lock call. This
  1581. * validates the dinode buffer. Any corruption that has happended
  1582. * is a code bug. */
  1583. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  1584. BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
  1585. mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
  1586. (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
  1587. (unsigned long long)bg_blkno, start_bit);
  1588. status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
  1589. &group_bh);
  1590. if (status < 0) {
  1591. mlog_errno(status);
  1592. goto bail;
  1593. }
  1594. group = (struct ocfs2_group_desc *) group_bh->b_data;
  1595. BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
  1596. status = ocfs2_block_group_clear_bits(handle, alloc_inode,
  1597. group, group_bh,
  1598. start_bit, count);
  1599. if (status < 0) {
  1600. mlog_errno(status);
  1601. goto bail;
  1602. }
  1603. status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
  1604. OCFS2_JOURNAL_ACCESS_WRITE);
  1605. if (status < 0) {
  1606. mlog_errno(status);
  1607. goto bail;
  1608. }
  1609. le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
  1610. count);
  1611. tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
  1612. fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
  1613. status = ocfs2_journal_dirty(handle, alloc_bh);
  1614. if (status < 0) {
  1615. mlog_errno(status);
  1616. goto bail;
  1617. }
  1618. bail:
  1619. brelse(group_bh);
  1620. mlog_exit(status);
  1621. return status;
  1622. }
  1623. int ocfs2_free_dinode(handle_t *handle,
  1624. struct inode *inode_alloc_inode,
  1625. struct buffer_head *inode_alloc_bh,
  1626. struct ocfs2_dinode *di)
  1627. {
  1628. u64 blk = le64_to_cpu(di->i_blkno);
  1629. u16 bit = le16_to_cpu(di->i_suballoc_bit);
  1630. u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  1631. return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
  1632. inode_alloc_bh, bit, bg_blkno, 1);
  1633. }
  1634. int ocfs2_free_clusters(handle_t *handle,
  1635. struct inode *bitmap_inode,
  1636. struct buffer_head *bitmap_bh,
  1637. u64 start_blk,
  1638. unsigned int num_clusters)
  1639. {
  1640. int status;
  1641. u16 bg_start_bit;
  1642. u64 bg_blkno;
  1643. struct ocfs2_dinode *fe;
  1644. /* You can't ever have a contiguous set of clusters
  1645. * bigger than a block group bitmap so we never have to worry
  1646. * about looping on them. */
  1647. mlog_entry_void();
  1648. /* This is expensive. We can safely remove once this stuff has
  1649. * gotten tested really well. */
  1650. BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
  1651. fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
  1652. ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
  1653. &bg_start_bit);
  1654. mlog(0, "want to free %u clusters starting at block %llu\n",
  1655. num_clusters, (unsigned long long)start_blk);
  1656. mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
  1657. (unsigned long long)bg_blkno, bg_start_bit);
  1658. status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
  1659. bg_start_bit, bg_blkno,
  1660. num_clusters);
  1661. if (status < 0) {
  1662. mlog_errno(status);
  1663. goto out;
  1664. }
  1665. ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb),
  1666. num_clusters);
  1667. out:
  1668. mlog_exit(status);
  1669. return status;
  1670. }
  1671. static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
  1672. {
  1673. printk("Block Group:\n");
  1674. printk("bg_signature: %s\n", bg->bg_signature);
  1675. printk("bg_size: %u\n", bg->bg_size);
  1676. printk("bg_bits: %u\n", bg->bg_bits);
  1677. printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
  1678. printk("bg_chain: %u\n", bg->bg_chain);
  1679. printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
  1680. printk("bg_next_group: %llu\n",
  1681. (unsigned long long)bg->bg_next_group);
  1682. printk("bg_parent_dinode: %llu\n",
  1683. (unsigned long long)bg->bg_parent_dinode);
  1684. printk("bg_blkno: %llu\n",
  1685. (unsigned long long)bg->bg_blkno);
  1686. }
  1687. static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
  1688. {
  1689. int i;
  1690. printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
  1691. printk("i_signature: %s\n", fe->i_signature);
  1692. printk("i_size: %llu\n",
  1693. (unsigned long long)fe->i_size);
  1694. printk("i_clusters: %u\n", fe->i_clusters);
  1695. printk("i_generation: %u\n",
  1696. le32_to_cpu(fe->i_generation));
  1697. printk("id1.bitmap1.i_used: %u\n",
  1698. le32_to_cpu(fe->id1.bitmap1.i_used));
  1699. printk("id1.bitmap1.i_total: %u\n",
  1700. le32_to_cpu(fe->id1.bitmap1.i_total));
  1701. printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
  1702. printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
  1703. printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
  1704. printk("id2.i_chain.cl_next_free_rec: %u\n",
  1705. fe->id2.i_chain.cl_next_free_rec);
  1706. for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
  1707. printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
  1708. fe->id2.i_chain.cl_recs[i].c_free);
  1709. printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
  1710. fe->id2.i_chain.cl_recs[i].c_total);
  1711. printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
  1712. (unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
  1713. }
  1714. }
  1715. /*
  1716. * For a given allocation, determine which allocators will need to be
  1717. * accessed, and lock them, reserving the appropriate number of bits.
  1718. *
  1719. * Sparse file systems call this from ocfs2_write_begin_nolock()
  1720. * and ocfs2_allocate_unwritten_extents().
  1721. *
  1722. * File systems which don't support holes call this from
  1723. * ocfs2_extend_allocation().
  1724. */
  1725. int ocfs2_lock_allocators(struct inode *inode,
  1726. struct ocfs2_extent_tree *et,
  1727. u32 clusters_to_add, u32 extents_to_split,
  1728. struct ocfs2_alloc_context **data_ac,
  1729. struct ocfs2_alloc_context **meta_ac)
  1730. {
  1731. int ret = 0, num_free_extents;
  1732. unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
  1733. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1734. *meta_ac = NULL;
  1735. if (data_ac)
  1736. *data_ac = NULL;
  1737. BUG_ON(clusters_to_add != 0 && data_ac == NULL);
  1738. num_free_extents = ocfs2_num_free_extents(osb, inode, et);
  1739. if (num_free_extents < 0) {
  1740. ret = num_free_extents;
  1741. mlog_errno(ret);
  1742. goto out;
  1743. }
  1744. /*
  1745. * Sparse allocation file systems need to be more conservative
  1746. * with reserving room for expansion - the actual allocation
  1747. * happens while we've got a journal handle open so re-taking
  1748. * a cluster lock (because we ran out of room for another
  1749. * extent) will violate ordering rules.
  1750. *
  1751. * Most of the time we'll only be seeing this 1 cluster at a time
  1752. * anyway.
  1753. *
  1754. * Always lock for any unwritten extents - we might want to
  1755. * add blocks during a split.
  1756. */
  1757. if (!num_free_extents ||
  1758. (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
  1759. ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac);
  1760. if (ret < 0) {
  1761. if (ret != -ENOSPC)
  1762. mlog_errno(ret);
  1763. goto out;
  1764. }
  1765. }
  1766. if (clusters_to_add == 0)
  1767. goto out;
  1768. ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
  1769. if (ret < 0) {
  1770. if (ret != -ENOSPC)
  1771. mlog_errno(ret);
  1772. goto out;
  1773. }
  1774. out:
  1775. if (ret) {
  1776. if (*meta_ac) {
  1777. ocfs2_free_alloc_context(*meta_ac);
  1778. *meta_ac = NULL;
  1779. }
  1780. /*
  1781. * We cannot have an error and a non null *data_ac.
  1782. */
  1783. }
  1784. return ret;
  1785. }