suballoc.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * suballoc.c
  5. *
  6. * metadata alloc and free
  7. * Inspired by ext3 block groups.
  8. *
  9. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2 of the License, or (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public
  22. * License along with this program; if not, write to the
  23. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  24. * Boston, MA 021110-1307, USA.
  25. */
  26. #include <linux/fs.h>
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/highmem.h>
  30. #define MLOG_MASK_PREFIX ML_DISK_ALLOC
  31. #include <cluster/masklog.h>
  32. #include "ocfs2.h"
  33. #include "alloc.h"
  34. #include "dlmglue.h"
  35. #include "inode.h"
  36. #include "journal.h"
  37. #include "localalloc.h"
  38. #include "suballoc.h"
  39. #include "super.h"
  40. #include "sysfile.h"
  41. #include "uptodate.h"
  42. #include "buffer_head_io.h"
  43. #define NOT_ALLOC_NEW_GROUP 0
  44. #define ALLOC_NEW_GROUP 1
  45. #define OCFS2_MAX_INODES_TO_STEAL 1024
  46. static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
  47. static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
  48. static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
  49. static int ocfs2_block_group_fill(handle_t *handle,
  50. struct inode *alloc_inode,
  51. struct buffer_head *bg_bh,
  52. u64 group_blkno,
  53. u16 my_chain,
  54. struct ocfs2_chain_list *cl);
  55. static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
  56. struct inode *alloc_inode,
  57. struct buffer_head *bh);
  58. static int ocfs2_cluster_group_search(struct inode *inode,
  59. struct buffer_head *group_bh,
  60. u32 bits_wanted, u32 min_bits,
  61. u16 *bit_off, u16 *bits_found);
  62. static int ocfs2_block_group_search(struct inode *inode,
  63. struct buffer_head *group_bh,
  64. u32 bits_wanted, u32 min_bits,
  65. u16 *bit_off, u16 *bits_found);
  66. static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
  67. struct ocfs2_alloc_context *ac,
  68. handle_t *handle,
  69. u32 bits_wanted,
  70. u32 min_bits,
  71. u16 *bit_off,
  72. unsigned int *num_bits,
  73. u64 *bg_blkno);
  74. static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
  75. int nr);
  76. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  77. struct inode *alloc_inode,
  78. struct ocfs2_group_desc *bg,
  79. struct buffer_head *group_bh,
  80. unsigned int bit_off,
  81. unsigned int num_bits);
  82. static inline int ocfs2_block_group_clear_bits(handle_t *handle,
  83. struct inode *alloc_inode,
  84. struct ocfs2_group_desc *bg,
  85. struct buffer_head *group_bh,
  86. unsigned int bit_off,
  87. unsigned int num_bits);
  88. static int ocfs2_relink_block_group(handle_t *handle,
  89. struct inode *alloc_inode,
  90. struct buffer_head *fe_bh,
  91. struct buffer_head *bg_bh,
  92. struct buffer_head *prev_bg_bh,
  93. u16 chain);
  94. static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
  95. u32 wanted);
  96. static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
  97. u64 bg_blkno,
  98. u16 bg_bit_off);
  99. static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  100. u64 data_blkno,
  101. u64 *bg_blkno,
  102. u16 *bg_bit_off);
  103. void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
  104. {
  105. struct inode *inode = ac->ac_inode;
  106. if (inode) {
  107. if (ac->ac_which != OCFS2_AC_USE_LOCAL)
  108. ocfs2_inode_unlock(inode, 1);
  109. mutex_unlock(&inode->i_mutex);
  110. iput(inode);
  111. ac->ac_inode = NULL;
  112. }
  113. if (ac->ac_bh) {
  114. brelse(ac->ac_bh);
  115. ac->ac_bh = NULL;
  116. }
  117. }
  118. void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
  119. {
  120. ocfs2_free_ac_resource(ac);
  121. kfree(ac);
  122. }
  123. static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
  124. {
  125. return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
  126. }
  127. /* somewhat more expensive than our other checks, so use sparingly. */
  128. int ocfs2_check_group_descriptor(struct super_block *sb,
  129. struct ocfs2_dinode *di,
  130. struct ocfs2_group_desc *gd)
  131. {
  132. unsigned int max_bits;
  133. if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
  134. OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd);
  135. return -EIO;
  136. }
  137. if (di->i_blkno != gd->bg_parent_dinode) {
  138. ocfs2_error(sb, "Group descriptor # %llu has bad parent "
  139. "pointer (%llu, expected %llu)",
  140. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  141. (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
  142. (unsigned long long)le64_to_cpu(di->i_blkno));
  143. return -EIO;
  144. }
  145. max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
  146. if (le16_to_cpu(gd->bg_bits) > max_bits) {
  147. ocfs2_error(sb, "Group descriptor # %llu has bit count of %u",
  148. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  149. le16_to_cpu(gd->bg_bits));
  150. return -EIO;
  151. }
  152. if (le16_to_cpu(gd->bg_chain) >=
  153. le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
  154. ocfs2_error(sb, "Group descriptor # %llu has bad chain %u",
  155. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  156. le16_to_cpu(gd->bg_chain));
  157. return -EIO;
  158. }
  159. if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
  160. ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
  161. "claims that %u are free",
  162. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  163. le16_to_cpu(gd->bg_bits),
  164. le16_to_cpu(gd->bg_free_bits_count));
  165. return -EIO;
  166. }
  167. if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
  168. ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
  169. "max bitmap bits of %u",
  170. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  171. le16_to_cpu(gd->bg_bits),
  172. 8 * le16_to_cpu(gd->bg_size));
  173. return -EIO;
  174. }
  175. return 0;
  176. }
  177. static int ocfs2_block_group_fill(handle_t *handle,
  178. struct inode *alloc_inode,
  179. struct buffer_head *bg_bh,
  180. u64 group_blkno,
  181. u16 my_chain,
  182. struct ocfs2_chain_list *cl)
  183. {
  184. int status = 0;
  185. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  186. struct super_block * sb = alloc_inode->i_sb;
  187. mlog_entry_void();
  188. if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
  189. ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
  190. "b_blocknr (%llu)",
  191. (unsigned long long)group_blkno,
  192. (unsigned long long) bg_bh->b_blocknr);
  193. status = -EIO;
  194. goto bail;
  195. }
  196. status = ocfs2_journal_access(handle,
  197. alloc_inode,
  198. bg_bh,
  199. OCFS2_JOURNAL_ACCESS_CREATE);
  200. if (status < 0) {
  201. mlog_errno(status);
  202. goto bail;
  203. }
  204. memset(bg, 0, sb->s_blocksize);
  205. strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
  206. bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  207. bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
  208. bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
  209. bg->bg_chain = cpu_to_le16(my_chain);
  210. bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
  211. bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
  212. bg->bg_blkno = cpu_to_le64(group_blkno);
  213. /* set the 1st bit in the bitmap to account for the descriptor block */
  214. ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
  215. bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
  216. status = ocfs2_journal_dirty(handle, bg_bh);
  217. if (status < 0)
  218. mlog_errno(status);
  219. /* There is no need to zero out or otherwise initialize the
  220. * other blocks in a group - All valid FS metadata in a block
  221. * group stores the superblock fs_generation value at
  222. * allocation time. */
  223. bail:
  224. mlog_exit(status);
  225. return status;
  226. }
  227. static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
  228. {
  229. u16 curr, best;
  230. best = curr = 0;
  231. while (curr < le16_to_cpu(cl->cl_count)) {
  232. if (le32_to_cpu(cl->cl_recs[best].c_total) >
  233. le32_to_cpu(cl->cl_recs[curr].c_total))
  234. best = curr;
  235. curr++;
  236. }
  237. return best;
  238. }
  239. /*
  240. * We expect the block group allocator to already be locked.
  241. */
  242. static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
  243. struct inode *alloc_inode,
  244. struct buffer_head *bh)
  245. {
  246. int status, credits;
  247. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
  248. struct ocfs2_chain_list *cl;
  249. struct ocfs2_alloc_context *ac = NULL;
  250. handle_t *handle = NULL;
  251. u32 bit_off, num_bits;
  252. u16 alloc_rec;
  253. u64 bg_blkno;
  254. struct buffer_head *bg_bh = NULL;
  255. struct ocfs2_group_desc *bg;
  256. BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
  257. mlog_entry_void();
  258. cl = &fe->id2.i_chain;
  259. status = ocfs2_reserve_clusters(osb,
  260. le16_to_cpu(cl->cl_cpg),
  261. &ac);
  262. if (status < 0) {
  263. if (status != -ENOSPC)
  264. mlog_errno(status);
  265. goto bail;
  266. }
  267. credits = ocfs2_calc_group_alloc_credits(osb->sb,
  268. le16_to_cpu(cl->cl_cpg));
  269. handle = ocfs2_start_trans(osb, credits);
  270. if (IS_ERR(handle)) {
  271. status = PTR_ERR(handle);
  272. handle = NULL;
  273. mlog_errno(status);
  274. goto bail;
  275. }
  276. status = ocfs2_claim_clusters(osb,
  277. handle,
  278. ac,
  279. le16_to_cpu(cl->cl_cpg),
  280. &bit_off,
  281. &num_bits);
  282. if (status < 0) {
  283. if (status != -ENOSPC)
  284. mlog_errno(status);
  285. goto bail;
  286. }
  287. alloc_rec = ocfs2_find_smallest_chain(cl);
  288. /* setup the group */
  289. bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
  290. mlog(0, "new descriptor, record %u, at block %llu\n",
  291. alloc_rec, (unsigned long long)bg_blkno);
  292. bg_bh = sb_getblk(osb->sb, bg_blkno);
  293. if (!bg_bh) {
  294. status = -EIO;
  295. mlog_errno(status);
  296. goto bail;
  297. }
  298. ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
  299. status = ocfs2_block_group_fill(handle,
  300. alloc_inode,
  301. bg_bh,
  302. bg_blkno,
  303. alloc_rec,
  304. cl);
  305. if (status < 0) {
  306. mlog_errno(status);
  307. goto bail;
  308. }
  309. bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  310. status = ocfs2_journal_access(handle, alloc_inode,
  311. bh, OCFS2_JOURNAL_ACCESS_WRITE);
  312. if (status < 0) {
  313. mlog_errno(status);
  314. goto bail;
  315. }
  316. le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
  317. le16_to_cpu(bg->bg_free_bits_count));
  318. le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
  319. cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
  320. if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
  321. le16_add_cpu(&cl->cl_next_free_rec, 1);
  322. le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
  323. le16_to_cpu(bg->bg_free_bits_count));
  324. le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
  325. le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
  326. status = ocfs2_journal_dirty(handle, bh);
  327. if (status < 0) {
  328. mlog_errno(status);
  329. goto bail;
  330. }
  331. spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
  332. OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
  333. fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
  334. le32_to_cpu(fe->i_clusters)));
  335. spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
  336. i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
  337. alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
  338. status = 0;
  339. bail:
  340. if (handle)
  341. ocfs2_commit_trans(osb, handle);
  342. if (ac)
  343. ocfs2_free_alloc_context(ac);
  344. if (bg_bh)
  345. brelse(bg_bh);
  346. mlog_exit(status);
  347. return status;
  348. }
  349. static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
  350. struct ocfs2_alloc_context *ac,
  351. int type,
  352. u32 slot,
  353. int alloc_new_group)
  354. {
  355. int status;
  356. u32 bits_wanted = ac->ac_bits_wanted;
  357. struct inode *alloc_inode;
  358. struct buffer_head *bh = NULL;
  359. struct ocfs2_dinode *fe;
  360. u32 free_bits;
  361. mlog_entry_void();
  362. alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
  363. if (!alloc_inode) {
  364. mlog_errno(-EINVAL);
  365. return -EINVAL;
  366. }
  367. mutex_lock(&alloc_inode->i_mutex);
  368. status = ocfs2_inode_lock(alloc_inode, &bh, 1);
  369. if (status < 0) {
  370. mutex_unlock(&alloc_inode->i_mutex);
  371. iput(alloc_inode);
  372. mlog_errno(status);
  373. return status;
  374. }
  375. ac->ac_inode = alloc_inode;
  376. ac->ac_alloc_slot = slot;
  377. fe = (struct ocfs2_dinode *) bh->b_data;
  378. if (!OCFS2_IS_VALID_DINODE(fe)) {
  379. OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
  380. status = -EIO;
  381. goto bail;
  382. }
  383. if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
  384. ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
  385. (unsigned long long)le64_to_cpu(fe->i_blkno));
  386. status = -EIO;
  387. goto bail;
  388. }
  389. free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
  390. le32_to_cpu(fe->id1.bitmap1.i_used);
  391. if (bits_wanted > free_bits) {
  392. /* cluster bitmap never grows */
  393. if (ocfs2_is_cluster_bitmap(alloc_inode)) {
  394. mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
  395. bits_wanted, free_bits);
  396. status = -ENOSPC;
  397. goto bail;
  398. }
  399. if (alloc_new_group != ALLOC_NEW_GROUP) {
  400. mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, "
  401. "and we don't alloc a new group for it.\n",
  402. slot, bits_wanted, free_bits);
  403. status = -ENOSPC;
  404. goto bail;
  405. }
  406. status = ocfs2_block_group_alloc(osb, alloc_inode, bh);
  407. if (status < 0) {
  408. if (status != -ENOSPC)
  409. mlog_errno(status);
  410. goto bail;
  411. }
  412. atomic_inc(&osb->alloc_stats.bg_extends);
  413. /* You should never ask for this much metadata */
  414. BUG_ON(bits_wanted >
  415. (le32_to_cpu(fe->id1.bitmap1.i_total)
  416. - le32_to_cpu(fe->id1.bitmap1.i_used)));
  417. }
  418. get_bh(bh);
  419. ac->ac_bh = bh;
  420. bail:
  421. if (bh)
  422. brelse(bh);
  423. mlog_exit(status);
  424. return status;
  425. }
  426. int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
  427. int blocks,
  428. struct ocfs2_alloc_context **ac)
  429. {
  430. int status;
  431. u32 slot;
  432. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  433. if (!(*ac)) {
  434. status = -ENOMEM;
  435. mlog_errno(status);
  436. goto bail;
  437. }
  438. (*ac)->ac_bits_wanted = blocks;
  439. (*ac)->ac_which = OCFS2_AC_USE_META;
  440. slot = osb->slot_num;
  441. (*ac)->ac_group_search = ocfs2_block_group_search;
  442. status = ocfs2_reserve_suballoc_bits(osb, (*ac),
  443. EXTENT_ALLOC_SYSTEM_INODE,
  444. slot, ALLOC_NEW_GROUP);
  445. if (status < 0) {
  446. if (status != -ENOSPC)
  447. mlog_errno(status);
  448. goto bail;
  449. }
  450. status = 0;
  451. bail:
  452. if ((status < 0) && *ac) {
  453. ocfs2_free_alloc_context(*ac);
  454. *ac = NULL;
  455. }
  456. mlog_exit(status);
  457. return status;
  458. }
  459. int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
  460. struct ocfs2_extent_list *root_el,
  461. struct ocfs2_alloc_context **ac)
  462. {
  463. return ocfs2_reserve_new_metadata_blocks(osb,
  464. ocfs2_extend_meta_needed(root_el),
  465. ac);
  466. }
  467. static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
  468. struct ocfs2_alloc_context *ac)
  469. {
  470. int i, status = -ENOSPC;
  471. s16 slot = ocfs2_get_inode_steal_slot(osb);
  472. /* Start to steal inodes from the first slot after ours. */
  473. if (slot == OCFS2_INVALID_SLOT)
  474. slot = osb->slot_num + 1;
  475. for (i = 0; i < osb->max_slots; i++, slot++) {
  476. if (slot == osb->max_slots)
  477. slot = 0;
  478. if (slot == osb->slot_num)
  479. continue;
  480. status = ocfs2_reserve_suballoc_bits(osb, ac,
  481. INODE_ALLOC_SYSTEM_INODE,
  482. slot, NOT_ALLOC_NEW_GROUP);
  483. if (status >= 0) {
  484. ocfs2_set_inode_steal_slot(osb, slot);
  485. break;
  486. }
  487. ocfs2_free_ac_resource(ac);
  488. }
  489. return status;
  490. }
  491. int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
  492. struct ocfs2_alloc_context **ac)
  493. {
  494. int status;
  495. s16 slot = ocfs2_get_inode_steal_slot(osb);
  496. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  497. if (!(*ac)) {
  498. status = -ENOMEM;
  499. mlog_errno(status);
  500. goto bail;
  501. }
  502. (*ac)->ac_bits_wanted = 1;
  503. (*ac)->ac_which = OCFS2_AC_USE_INODE;
  504. (*ac)->ac_group_search = ocfs2_block_group_search;
  505. /*
  506. * slot is set when we successfully steal inode from other nodes.
  507. * It is reset in 3 places:
  508. * 1. when we flush the truncate log
  509. * 2. when we complete local alloc recovery.
  510. * 3. when we successfully allocate from our own slot.
  511. * After it is set, we will go on stealing inodes until we find the
  512. * need to check our slots to see whether there is some space for us.
  513. */
  514. if (slot != OCFS2_INVALID_SLOT &&
  515. atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
  516. goto inode_steal;
  517. atomic_set(&osb->s_num_inodes_stolen, 0);
  518. status = ocfs2_reserve_suballoc_bits(osb, *ac,
  519. INODE_ALLOC_SYSTEM_INODE,
  520. osb->slot_num, ALLOC_NEW_GROUP);
  521. if (status >= 0) {
  522. status = 0;
  523. /*
  524. * Some inodes must be freed by us, so try to allocate
  525. * from our own next time.
  526. */
  527. if (slot != OCFS2_INVALID_SLOT)
  528. ocfs2_init_inode_steal_slot(osb);
  529. goto bail;
  530. } else if (status < 0 && status != -ENOSPC) {
  531. mlog_errno(status);
  532. goto bail;
  533. }
  534. ocfs2_free_ac_resource(*ac);
  535. inode_steal:
  536. status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
  537. atomic_inc(&osb->s_num_inodes_stolen);
  538. if (status < 0) {
  539. if (status != -ENOSPC)
  540. mlog_errno(status);
  541. goto bail;
  542. }
  543. status = 0;
  544. bail:
  545. if ((status < 0) && *ac) {
  546. ocfs2_free_alloc_context(*ac);
  547. *ac = NULL;
  548. }
  549. mlog_exit(status);
  550. return status;
  551. }
  552. /* local alloc code has to do the same thing, so rather than do this
  553. * twice.. */
  554. int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
  555. struct ocfs2_alloc_context *ac)
  556. {
  557. int status;
  558. ac->ac_which = OCFS2_AC_USE_MAIN;
  559. ac->ac_group_search = ocfs2_cluster_group_search;
  560. status = ocfs2_reserve_suballoc_bits(osb, ac,
  561. GLOBAL_BITMAP_SYSTEM_INODE,
  562. OCFS2_INVALID_SLOT,
  563. ALLOC_NEW_GROUP);
  564. if (status < 0 && status != -ENOSPC) {
  565. mlog_errno(status);
  566. goto bail;
  567. }
  568. bail:
  569. return status;
  570. }
  571. /* Callers don't need to care which bitmap (local alloc or main) to
  572. * use so we figure it out for them, but unfortunately this clutters
  573. * things a bit. */
  574. int ocfs2_reserve_clusters(struct ocfs2_super *osb,
  575. u32 bits_wanted,
  576. struct ocfs2_alloc_context **ac)
  577. {
  578. int status;
  579. mlog_entry_void();
  580. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  581. if (!(*ac)) {
  582. status = -ENOMEM;
  583. mlog_errno(status);
  584. goto bail;
  585. }
  586. (*ac)->ac_bits_wanted = bits_wanted;
  587. status = -ENOSPC;
  588. if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
  589. status = ocfs2_reserve_local_alloc_bits(osb,
  590. bits_wanted,
  591. *ac);
  592. if ((status < 0) && (status != -ENOSPC)) {
  593. mlog_errno(status);
  594. goto bail;
  595. }
  596. }
  597. if (status == -ENOSPC) {
  598. status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
  599. if (status < 0) {
  600. if (status != -ENOSPC)
  601. mlog_errno(status);
  602. goto bail;
  603. }
  604. }
  605. status = 0;
  606. bail:
  607. if ((status < 0) && *ac) {
  608. ocfs2_free_alloc_context(*ac);
  609. *ac = NULL;
  610. }
  611. mlog_exit(status);
  612. return status;
  613. }
  614. /*
  615. * More or less lifted from ext3. I'll leave their description below:
  616. *
  617. * "For ext3 allocations, we must not reuse any blocks which are
  618. * allocated in the bitmap buffer's "last committed data" copy. This
  619. * prevents deletes from freeing up the page for reuse until we have
  620. * committed the delete transaction.
  621. *
  622. * If we didn't do this, then deleting something and reallocating it as
  623. * data would allow the old block to be overwritten before the
  624. * transaction committed (because we force data to disk before commit).
  625. * This would lead to corruption if we crashed between overwriting the
  626. * data and committing the delete.
  627. *
  628. * @@@ We may want to make this allocation behaviour conditional on
  629. * data-writes at some point, and disable it for metadata allocations or
  630. * sync-data inodes."
  631. *
  632. * Note: OCFS2 already does this differently for metadata vs data
  633. * allocations, as those bitmaps are separate and undo access is never
  634. * called on a metadata group descriptor.
  635. */
  636. static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
  637. int nr)
  638. {
  639. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  640. if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
  641. return 0;
  642. if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
  643. return 1;
  644. bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
  645. return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
  646. }
  647. static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
  648. struct buffer_head *bg_bh,
  649. unsigned int bits_wanted,
  650. unsigned int total_bits,
  651. u16 *bit_off,
  652. u16 *bits_found)
  653. {
  654. void *bitmap;
  655. u16 best_offset, best_size;
  656. int offset, start, found, status = 0;
  657. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  658. if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
  659. OCFS2_RO_ON_INVALID_GROUP_DESC(osb->sb, bg);
  660. return -EIO;
  661. }
  662. found = start = best_offset = best_size = 0;
  663. bitmap = bg->bg_bitmap;
  664. while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
  665. if (offset == total_bits)
  666. break;
  667. if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
  668. /* We found a zero, but we can't use it as it
  669. * hasn't been put to disk yet! */
  670. found = 0;
  671. start = offset + 1;
  672. } else if (offset == start) {
  673. /* we found a zero */
  674. found++;
  675. /* move start to the next bit to test */
  676. start++;
  677. } else {
  678. /* got a zero after some ones */
  679. found = 1;
  680. start = offset + 1;
  681. }
  682. if (found > best_size) {
  683. best_size = found;
  684. best_offset = start - found;
  685. }
  686. /* we got everything we needed */
  687. if (found == bits_wanted) {
  688. /* mlog(0, "Found it all!\n"); */
  689. break;
  690. }
  691. }
  692. /* XXX: I think the first clause is equivalent to the second
  693. * - jlbec */
  694. if (found == bits_wanted) {
  695. *bit_off = start - found;
  696. *bits_found = found;
  697. } else if (best_size) {
  698. *bit_off = best_offset;
  699. *bits_found = best_size;
  700. } else {
  701. status = -ENOSPC;
  702. /* No error log here -- see the comment above
  703. * ocfs2_test_bg_bit_allocatable */
  704. }
  705. return status;
  706. }
  707. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  708. struct inode *alloc_inode,
  709. struct ocfs2_group_desc *bg,
  710. struct buffer_head *group_bh,
  711. unsigned int bit_off,
  712. unsigned int num_bits)
  713. {
  714. int status;
  715. void *bitmap = bg->bg_bitmap;
  716. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  717. mlog_entry_void();
  718. if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
  719. OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
  720. status = -EIO;
  721. goto bail;
  722. }
  723. BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
  724. mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
  725. num_bits);
  726. if (ocfs2_is_cluster_bitmap(alloc_inode))
  727. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  728. status = ocfs2_journal_access(handle,
  729. alloc_inode,
  730. group_bh,
  731. journal_type);
  732. if (status < 0) {
  733. mlog_errno(status);
  734. goto bail;
  735. }
  736. le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
  737. while(num_bits--)
  738. ocfs2_set_bit(bit_off++, bitmap);
  739. status = ocfs2_journal_dirty(handle,
  740. group_bh);
  741. if (status < 0) {
  742. mlog_errno(status);
  743. goto bail;
  744. }
  745. bail:
  746. mlog_exit(status);
  747. return status;
  748. }
  749. /* find the one with the most empty bits */
  750. static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
  751. {
  752. u16 curr, best;
  753. BUG_ON(!cl->cl_next_free_rec);
  754. best = curr = 0;
  755. while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
  756. if (le32_to_cpu(cl->cl_recs[curr].c_free) >
  757. le32_to_cpu(cl->cl_recs[best].c_free))
  758. best = curr;
  759. curr++;
  760. }
  761. BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
  762. return best;
  763. }
  764. static int ocfs2_relink_block_group(handle_t *handle,
  765. struct inode *alloc_inode,
  766. struct buffer_head *fe_bh,
  767. struct buffer_head *bg_bh,
  768. struct buffer_head *prev_bg_bh,
  769. u16 chain)
  770. {
  771. int status;
  772. /* there is a really tiny chance the journal calls could fail,
  773. * but we wouldn't want inconsistent blocks in *any* case. */
  774. u64 fe_ptr, bg_ptr, prev_bg_ptr;
  775. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
  776. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  777. struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
  778. if (!OCFS2_IS_VALID_DINODE(fe)) {
  779. OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
  780. status = -EIO;
  781. goto out;
  782. }
  783. if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
  784. OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
  785. status = -EIO;
  786. goto out;
  787. }
  788. if (!OCFS2_IS_VALID_GROUP_DESC(prev_bg)) {
  789. OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, prev_bg);
  790. status = -EIO;
  791. goto out;
  792. }
  793. mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n",
  794. (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
  795. (unsigned long long)le64_to_cpu(bg->bg_blkno),
  796. (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
  797. fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
  798. bg_ptr = le64_to_cpu(bg->bg_next_group);
  799. prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
  800. status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh,
  801. OCFS2_JOURNAL_ACCESS_WRITE);
  802. if (status < 0) {
  803. mlog_errno(status);
  804. goto out_rollback;
  805. }
  806. prev_bg->bg_next_group = bg->bg_next_group;
  807. status = ocfs2_journal_dirty(handle, prev_bg_bh);
  808. if (status < 0) {
  809. mlog_errno(status);
  810. goto out_rollback;
  811. }
  812. status = ocfs2_journal_access(handle, alloc_inode, bg_bh,
  813. OCFS2_JOURNAL_ACCESS_WRITE);
  814. if (status < 0) {
  815. mlog_errno(status);
  816. goto out_rollback;
  817. }
  818. bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
  819. status = ocfs2_journal_dirty(handle, bg_bh);
  820. if (status < 0) {
  821. mlog_errno(status);
  822. goto out_rollback;
  823. }
  824. status = ocfs2_journal_access(handle, alloc_inode, fe_bh,
  825. OCFS2_JOURNAL_ACCESS_WRITE);
  826. if (status < 0) {
  827. mlog_errno(status);
  828. goto out_rollback;
  829. }
  830. fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
  831. status = ocfs2_journal_dirty(handle, fe_bh);
  832. if (status < 0) {
  833. mlog_errno(status);
  834. goto out_rollback;
  835. }
  836. status = 0;
  837. out_rollback:
  838. if (status < 0) {
  839. fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
  840. bg->bg_next_group = cpu_to_le64(bg_ptr);
  841. prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
  842. }
  843. out:
  844. mlog_exit(status);
  845. return status;
  846. }
  847. static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
  848. u32 wanted)
  849. {
  850. return le16_to_cpu(bg->bg_free_bits_count) > wanted;
  851. }
  852. /* return 0 on success, -ENOSPC to keep searching and any other < 0
  853. * value on error. */
  854. static int ocfs2_cluster_group_search(struct inode *inode,
  855. struct buffer_head *group_bh,
  856. u32 bits_wanted, u32 min_bits,
  857. u16 *bit_off, u16 *bits_found)
  858. {
  859. int search = -ENOSPC;
  860. int ret;
  861. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
  862. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  863. u16 tmp_off, tmp_found;
  864. unsigned int max_bits, gd_cluster_off;
  865. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  866. if (gd->bg_free_bits_count) {
  867. max_bits = le16_to_cpu(gd->bg_bits);
  868. /* Tail groups in cluster bitmaps which aren't cpg
  869. * aligned are prone to partial extention by a failed
  870. * fs resize. If the file system resize never got to
  871. * update the dinode cluster count, then we don't want
  872. * to trust any clusters past it, regardless of what
  873. * the group descriptor says. */
  874. gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
  875. le64_to_cpu(gd->bg_blkno));
  876. if ((gd_cluster_off + max_bits) >
  877. OCFS2_I(inode)->ip_clusters) {
  878. max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
  879. mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
  880. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  881. le16_to_cpu(gd->bg_bits),
  882. OCFS2_I(inode)->ip_clusters, max_bits);
  883. }
  884. ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
  885. group_bh, bits_wanted,
  886. max_bits,
  887. &tmp_off, &tmp_found);
  888. if (ret)
  889. return ret;
  890. /* ocfs2_block_group_find_clear_bits() might
  891. * return success, but we still want to return
  892. * -ENOSPC unless it found the minimum number
  893. * of bits. */
  894. if (min_bits <= tmp_found) {
  895. *bit_off = tmp_off;
  896. *bits_found = tmp_found;
  897. search = 0; /* success */
  898. } else if (tmp_found) {
  899. /*
  900. * Don't show bits which we'll be returning
  901. * for allocation to the local alloc bitmap.
  902. */
  903. ocfs2_local_alloc_seen_free_bits(osb, tmp_found);
  904. }
  905. }
  906. return search;
  907. }
  908. static int ocfs2_block_group_search(struct inode *inode,
  909. struct buffer_head *group_bh,
  910. u32 bits_wanted, u32 min_bits,
  911. u16 *bit_off, u16 *bits_found)
  912. {
  913. int ret = -ENOSPC;
  914. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
  915. BUG_ON(min_bits != 1);
  916. BUG_ON(ocfs2_is_cluster_bitmap(inode));
  917. if (bg->bg_free_bits_count)
  918. ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
  919. group_bh, bits_wanted,
  920. le16_to_cpu(bg->bg_bits),
  921. bit_off, bits_found);
  922. return ret;
  923. }
  924. static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
  925. handle_t *handle,
  926. struct buffer_head *di_bh,
  927. u32 num_bits,
  928. u16 chain)
  929. {
  930. int ret;
  931. u32 tmp_used;
  932. struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
  933. struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
  934. ret = ocfs2_journal_access(handle, inode, di_bh,
  935. OCFS2_JOURNAL_ACCESS_WRITE);
  936. if (ret < 0) {
  937. mlog_errno(ret);
  938. goto out;
  939. }
  940. tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
  941. di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
  942. le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
  943. ret = ocfs2_journal_dirty(handle, di_bh);
  944. if (ret < 0)
  945. mlog_errno(ret);
  946. out:
  947. return ret;
  948. }
  949. static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
  950. handle_t *handle,
  951. u32 bits_wanted,
  952. u32 min_bits,
  953. u16 *bit_off,
  954. unsigned int *num_bits,
  955. u64 gd_blkno,
  956. u16 *bits_left)
  957. {
  958. int ret;
  959. u16 found;
  960. struct buffer_head *group_bh = NULL;
  961. struct ocfs2_group_desc *gd;
  962. struct inode *alloc_inode = ac->ac_inode;
  963. ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno,
  964. &group_bh, OCFS2_BH_CACHED, alloc_inode);
  965. if (ret < 0) {
  966. mlog_errno(ret);
  967. return ret;
  968. }
  969. gd = (struct ocfs2_group_desc *) group_bh->b_data;
  970. if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
  971. OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd);
  972. ret = -EIO;
  973. goto out;
  974. }
  975. ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
  976. bit_off, &found);
  977. if (ret < 0) {
  978. if (ret != -ENOSPC)
  979. mlog_errno(ret);
  980. goto out;
  981. }
  982. *num_bits = found;
  983. ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
  984. *num_bits,
  985. le16_to_cpu(gd->bg_chain));
  986. if (ret < 0) {
  987. mlog_errno(ret);
  988. goto out;
  989. }
  990. ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
  991. *bit_off, *num_bits);
  992. if (ret < 0)
  993. mlog_errno(ret);
  994. *bits_left = le16_to_cpu(gd->bg_free_bits_count);
  995. out:
  996. brelse(group_bh);
  997. return ret;
  998. }
  999. static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
  1000. handle_t *handle,
  1001. u32 bits_wanted,
  1002. u32 min_bits,
  1003. u16 *bit_off,
  1004. unsigned int *num_bits,
  1005. u64 *bg_blkno,
  1006. u16 *bits_left)
  1007. {
  1008. int status;
  1009. u16 chain, tmp_bits;
  1010. u32 tmp_used;
  1011. u64 next_group;
  1012. struct inode *alloc_inode = ac->ac_inode;
  1013. struct buffer_head *group_bh = NULL;
  1014. struct buffer_head *prev_group_bh = NULL;
  1015. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
  1016. struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
  1017. struct ocfs2_group_desc *bg;
  1018. chain = ac->ac_chain;
  1019. mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
  1020. bits_wanted, chain,
  1021. (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);
  1022. status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
  1023. le64_to_cpu(cl->cl_recs[chain].c_blkno),
  1024. &group_bh, OCFS2_BH_CACHED, alloc_inode);
  1025. if (status < 0) {
  1026. mlog_errno(status);
  1027. goto bail;
  1028. }
  1029. bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1030. status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
  1031. if (status) {
  1032. mlog_errno(status);
  1033. goto bail;
  1034. }
  1035. status = -ENOSPC;
  1036. /* for now, the chain search is a bit simplistic. We just use
  1037. * the 1st group with any empty bits. */
  1038. while ((status = ac->ac_group_search(alloc_inode, group_bh, bits_wanted,
  1039. min_bits, bit_off, &tmp_bits)) == -ENOSPC) {
  1040. if (!bg->bg_next_group)
  1041. break;
  1042. if (prev_group_bh) {
  1043. brelse(prev_group_bh);
  1044. prev_group_bh = NULL;
  1045. }
  1046. next_group = le64_to_cpu(bg->bg_next_group);
  1047. prev_group_bh = group_bh;
  1048. group_bh = NULL;
  1049. status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
  1050. next_group, &group_bh,
  1051. OCFS2_BH_CACHED, alloc_inode);
  1052. if (status < 0) {
  1053. mlog_errno(status);
  1054. goto bail;
  1055. }
  1056. bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1057. status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
  1058. if (status) {
  1059. mlog_errno(status);
  1060. goto bail;
  1061. }
  1062. }
  1063. if (status < 0) {
  1064. if (status != -ENOSPC)
  1065. mlog_errno(status);
  1066. goto bail;
  1067. }
  1068. mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
  1069. tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
  1070. *num_bits = tmp_bits;
  1071. BUG_ON(*num_bits == 0);
  1072. /*
  1073. * Keep track of previous block descriptor read. When
  1074. * we find a target, if we have read more than X
  1075. * number of descriptors, and the target is reasonably
  1076. * empty, relink him to top of his chain.
  1077. *
  1078. * We've read 0 extra blocks and only send one more to
  1079. * the transaction, yet the next guy to search has a
  1080. * much easier time.
  1081. *
  1082. * Do this *after* figuring out how many bits we're taking out
  1083. * of our target group.
  1084. */
  1085. if (ac->ac_allow_chain_relink &&
  1086. (prev_group_bh) &&
  1087. (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
  1088. status = ocfs2_relink_block_group(handle, alloc_inode,
  1089. ac->ac_bh, group_bh,
  1090. prev_group_bh, chain);
  1091. if (status < 0) {
  1092. mlog_errno(status);
  1093. goto bail;
  1094. }
  1095. }
  1096. /* Ok, claim our bits now: set the info on dinode, chainlist
  1097. * and then the group */
  1098. status = ocfs2_journal_access(handle,
  1099. alloc_inode,
  1100. ac->ac_bh,
  1101. OCFS2_JOURNAL_ACCESS_WRITE);
  1102. if (status < 0) {
  1103. mlog_errno(status);
  1104. goto bail;
  1105. }
  1106. tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
  1107. fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
  1108. le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
  1109. status = ocfs2_journal_dirty(handle,
  1110. ac->ac_bh);
  1111. if (status < 0) {
  1112. mlog_errno(status);
  1113. goto bail;
  1114. }
  1115. status = ocfs2_block_group_set_bits(handle,
  1116. alloc_inode,
  1117. bg,
  1118. group_bh,
  1119. *bit_off,
  1120. *num_bits);
  1121. if (status < 0) {
  1122. mlog_errno(status);
  1123. goto bail;
  1124. }
  1125. mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
  1126. (unsigned long long)le64_to_cpu(fe->i_blkno));
  1127. *bg_blkno = le64_to_cpu(bg->bg_blkno);
  1128. *bits_left = le16_to_cpu(bg->bg_free_bits_count);
  1129. bail:
  1130. if (group_bh)
  1131. brelse(group_bh);
  1132. if (prev_group_bh)
  1133. brelse(prev_group_bh);
  1134. mlog_exit(status);
  1135. return status;
  1136. }
  1137. /* will give out up to bits_wanted contiguous bits. */
  1138. static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
  1139. struct ocfs2_alloc_context *ac,
  1140. handle_t *handle,
  1141. u32 bits_wanted,
  1142. u32 min_bits,
  1143. u16 *bit_off,
  1144. unsigned int *num_bits,
  1145. u64 *bg_blkno)
  1146. {
  1147. int status;
  1148. u16 victim, i;
  1149. u16 bits_left = 0;
  1150. u64 hint_blkno = ac->ac_last_group;
  1151. struct ocfs2_chain_list *cl;
  1152. struct ocfs2_dinode *fe;
  1153. mlog_entry_void();
  1154. BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
  1155. BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
  1156. BUG_ON(!ac->ac_bh);
  1157. fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
  1158. if (!OCFS2_IS_VALID_DINODE(fe)) {
  1159. OCFS2_RO_ON_INVALID_DINODE(osb->sb, fe);
  1160. status = -EIO;
  1161. goto bail;
  1162. }
  1163. if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
  1164. le32_to_cpu(fe->id1.bitmap1.i_total)) {
  1165. ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used "
  1166. "bits but only %u total.",
  1167. (unsigned long long)le64_to_cpu(fe->i_blkno),
  1168. le32_to_cpu(fe->id1.bitmap1.i_used),
  1169. le32_to_cpu(fe->id1.bitmap1.i_total));
  1170. status = -EIO;
  1171. goto bail;
  1172. }
  1173. if (hint_blkno) {
  1174. /* Attempt to short-circuit the usual search mechanism
  1175. * by jumping straight to the most recently used
  1176. * allocation group. This helps us mantain some
  1177. * contiguousness across allocations. */
  1178. status = ocfs2_search_one_group(ac, handle, bits_wanted,
  1179. min_bits, bit_off, num_bits,
  1180. hint_blkno, &bits_left);
  1181. if (!status) {
  1182. /* Be careful to update *bg_blkno here as the
  1183. * caller is expecting it to be filled in, and
  1184. * ocfs2_search_one_group() won't do that for
  1185. * us. */
  1186. *bg_blkno = hint_blkno;
  1187. goto set_hint;
  1188. }
  1189. if (status < 0 && status != -ENOSPC) {
  1190. mlog_errno(status);
  1191. goto bail;
  1192. }
  1193. }
  1194. cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
  1195. victim = ocfs2_find_victim_chain(cl);
  1196. ac->ac_chain = victim;
  1197. ac->ac_allow_chain_relink = 1;
  1198. status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
  1199. num_bits, bg_blkno, &bits_left);
  1200. if (!status)
  1201. goto set_hint;
  1202. if (status < 0 && status != -ENOSPC) {
  1203. mlog_errno(status);
  1204. goto bail;
  1205. }
  1206. mlog(0, "Search of victim chain %u came up with nothing, "
  1207. "trying all chains now.\n", victim);
  1208. /* If we didn't pick a good victim, then just default to
  1209. * searching each chain in order. Don't allow chain relinking
  1210. * because we only calculate enough journal credits for one
  1211. * relink per alloc. */
  1212. ac->ac_allow_chain_relink = 0;
  1213. for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
  1214. if (i == victim)
  1215. continue;
  1216. if (!cl->cl_recs[i].c_free)
  1217. continue;
  1218. ac->ac_chain = i;
  1219. status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
  1220. bit_off, num_bits, bg_blkno,
  1221. &bits_left);
  1222. if (!status)
  1223. break;
  1224. if (status < 0 && status != -ENOSPC) {
  1225. mlog_errno(status);
  1226. goto bail;
  1227. }
  1228. }
  1229. set_hint:
  1230. if (status != -ENOSPC) {
  1231. /* If the next search of this group is not likely to
  1232. * yield a suitable extent, then we reset the last
  1233. * group hint so as to not waste a disk read */
  1234. if (bits_left < min_bits)
  1235. ac->ac_last_group = 0;
  1236. else
  1237. ac->ac_last_group = *bg_blkno;
  1238. }
  1239. bail:
  1240. mlog_exit(status);
  1241. return status;
  1242. }
  1243. int ocfs2_claim_metadata(struct ocfs2_super *osb,
  1244. handle_t *handle,
  1245. struct ocfs2_alloc_context *ac,
  1246. u32 bits_wanted,
  1247. u16 *suballoc_bit_start,
  1248. unsigned int *num_bits,
  1249. u64 *blkno_start)
  1250. {
  1251. int status;
  1252. u64 bg_blkno;
  1253. BUG_ON(!ac);
  1254. BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
  1255. BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
  1256. status = ocfs2_claim_suballoc_bits(osb,
  1257. ac,
  1258. handle,
  1259. bits_wanted,
  1260. 1,
  1261. suballoc_bit_start,
  1262. num_bits,
  1263. &bg_blkno);
  1264. if (status < 0) {
  1265. mlog_errno(status);
  1266. goto bail;
  1267. }
  1268. atomic_inc(&osb->alloc_stats.bg_allocs);
  1269. *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
  1270. ac->ac_bits_given += (*num_bits);
  1271. status = 0;
  1272. bail:
  1273. mlog_exit(status);
  1274. return status;
  1275. }
  1276. int ocfs2_claim_new_inode(struct ocfs2_super *osb,
  1277. handle_t *handle,
  1278. struct ocfs2_alloc_context *ac,
  1279. u16 *suballoc_bit,
  1280. u64 *fe_blkno)
  1281. {
  1282. int status;
  1283. unsigned int num_bits;
  1284. u64 bg_blkno;
  1285. mlog_entry_void();
  1286. BUG_ON(!ac);
  1287. BUG_ON(ac->ac_bits_given != 0);
  1288. BUG_ON(ac->ac_bits_wanted != 1);
  1289. BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
  1290. status = ocfs2_claim_suballoc_bits(osb,
  1291. ac,
  1292. handle,
  1293. 1,
  1294. 1,
  1295. suballoc_bit,
  1296. &num_bits,
  1297. &bg_blkno);
  1298. if (status < 0) {
  1299. mlog_errno(status);
  1300. goto bail;
  1301. }
  1302. atomic_inc(&osb->alloc_stats.bg_allocs);
  1303. BUG_ON(num_bits != 1);
  1304. *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
  1305. ac->ac_bits_given++;
  1306. status = 0;
  1307. bail:
  1308. mlog_exit(status);
  1309. return status;
  1310. }
  1311. /* translate a group desc. blkno and it's bitmap offset into
  1312. * disk cluster offset. */
  1313. static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
  1314. u64 bg_blkno,
  1315. u16 bg_bit_off)
  1316. {
  1317. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1318. u32 cluster = 0;
  1319. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1320. if (bg_blkno != osb->first_cluster_group_blkno)
  1321. cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
  1322. cluster += (u32) bg_bit_off;
  1323. return cluster;
  1324. }
  1325. /* given a cluster offset, calculate which block group it belongs to
  1326. * and return that block offset. */
  1327. u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster)
  1328. {
  1329. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1330. u32 group_no;
  1331. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1332. group_no = cluster / osb->bitmap_cpg;
  1333. if (!group_no)
  1334. return osb->first_cluster_group_blkno;
  1335. return ocfs2_clusters_to_blocks(inode->i_sb,
  1336. group_no * osb->bitmap_cpg);
  1337. }
  1338. /* given the block number of a cluster start, calculate which cluster
  1339. * group and descriptor bitmap offset that corresponds to. */
  1340. static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  1341. u64 data_blkno,
  1342. u64 *bg_blkno,
  1343. u16 *bg_bit_off)
  1344. {
  1345. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1346. u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
  1347. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1348. *bg_blkno = ocfs2_which_cluster_group(inode,
  1349. data_cluster);
  1350. if (*bg_blkno == osb->first_cluster_group_blkno)
  1351. *bg_bit_off = (u16) data_cluster;
  1352. else
  1353. *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
  1354. data_blkno - *bg_blkno);
  1355. }
  1356. /*
  1357. * min_bits - minimum contiguous chunk from this total allocation we
  1358. * can handle. set to what we asked for originally for a full
  1359. * contig. allocation, set to '1' to indicate we can deal with extents
  1360. * of any size.
  1361. */
  1362. int __ocfs2_claim_clusters(struct ocfs2_super *osb,
  1363. handle_t *handle,
  1364. struct ocfs2_alloc_context *ac,
  1365. u32 min_clusters,
  1366. u32 max_clusters,
  1367. u32 *cluster_start,
  1368. u32 *num_clusters)
  1369. {
  1370. int status;
  1371. unsigned int bits_wanted = max_clusters;
  1372. u64 bg_blkno = 0;
  1373. u16 bg_bit_off;
  1374. mlog_entry_void();
  1375. BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
  1376. BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
  1377. && ac->ac_which != OCFS2_AC_USE_MAIN);
  1378. if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
  1379. status = ocfs2_claim_local_alloc_bits(osb,
  1380. handle,
  1381. ac,
  1382. bits_wanted,
  1383. cluster_start,
  1384. num_clusters);
  1385. if (!status)
  1386. atomic_inc(&osb->alloc_stats.local_data);
  1387. } else {
  1388. if (min_clusters > (osb->bitmap_cpg - 1)) {
  1389. /* The only paths asking for contiguousness
  1390. * should know about this already. */
  1391. mlog(ML_ERROR, "minimum allocation requested %u exceeds "
  1392. "group bitmap size %u!\n", min_clusters,
  1393. osb->bitmap_cpg);
  1394. status = -ENOSPC;
  1395. goto bail;
  1396. }
  1397. /* clamp the current request down to a realistic size. */
  1398. if (bits_wanted > (osb->bitmap_cpg - 1))
  1399. bits_wanted = osb->bitmap_cpg - 1;
  1400. status = ocfs2_claim_suballoc_bits(osb,
  1401. ac,
  1402. handle,
  1403. bits_wanted,
  1404. min_clusters,
  1405. &bg_bit_off,
  1406. num_clusters,
  1407. &bg_blkno);
  1408. if (!status) {
  1409. *cluster_start =
  1410. ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
  1411. bg_blkno,
  1412. bg_bit_off);
  1413. atomic_inc(&osb->alloc_stats.bitmap_data);
  1414. }
  1415. }
  1416. if (status < 0) {
  1417. if (status != -ENOSPC)
  1418. mlog_errno(status);
  1419. goto bail;
  1420. }
  1421. ac->ac_bits_given += *num_clusters;
  1422. bail:
  1423. mlog_exit(status);
  1424. return status;
  1425. }
  1426. int ocfs2_claim_clusters(struct ocfs2_super *osb,
  1427. handle_t *handle,
  1428. struct ocfs2_alloc_context *ac,
  1429. u32 min_clusters,
  1430. u32 *cluster_start,
  1431. u32 *num_clusters)
  1432. {
  1433. unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
  1434. return __ocfs2_claim_clusters(osb, handle, ac, min_clusters,
  1435. bits_wanted, cluster_start, num_clusters);
  1436. }
  1437. static inline int ocfs2_block_group_clear_bits(handle_t *handle,
  1438. struct inode *alloc_inode,
  1439. struct ocfs2_group_desc *bg,
  1440. struct buffer_head *group_bh,
  1441. unsigned int bit_off,
  1442. unsigned int num_bits)
  1443. {
  1444. int status;
  1445. unsigned int tmp;
  1446. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  1447. struct ocfs2_group_desc *undo_bg = NULL;
  1448. mlog_entry_void();
  1449. if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
  1450. OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
  1451. status = -EIO;
  1452. goto bail;
  1453. }
  1454. mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
  1455. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1456. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  1457. status = ocfs2_journal_access(handle, alloc_inode, group_bh,
  1458. journal_type);
  1459. if (status < 0) {
  1460. mlog_errno(status);
  1461. goto bail;
  1462. }
  1463. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1464. undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
  1465. tmp = num_bits;
  1466. while(tmp--) {
  1467. ocfs2_clear_bit((bit_off + tmp),
  1468. (unsigned long *) bg->bg_bitmap);
  1469. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1470. ocfs2_set_bit(bit_off + tmp,
  1471. (unsigned long *) undo_bg->bg_bitmap);
  1472. }
  1473. le16_add_cpu(&bg->bg_free_bits_count, num_bits);
  1474. status = ocfs2_journal_dirty(handle, group_bh);
  1475. if (status < 0)
  1476. mlog_errno(status);
  1477. bail:
  1478. return status;
  1479. }
  1480. /*
  1481. * expects the suballoc inode to already be locked.
  1482. */
  1483. int ocfs2_free_suballoc_bits(handle_t *handle,
  1484. struct inode *alloc_inode,
  1485. struct buffer_head *alloc_bh,
  1486. unsigned int start_bit,
  1487. u64 bg_blkno,
  1488. unsigned int count)
  1489. {
  1490. int status = 0;
  1491. u32 tmp_used;
  1492. struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
  1493. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
  1494. struct ocfs2_chain_list *cl = &fe->id2.i_chain;
  1495. struct buffer_head *group_bh = NULL;
  1496. struct ocfs2_group_desc *group;
  1497. mlog_entry_void();
  1498. if (!OCFS2_IS_VALID_DINODE(fe)) {
  1499. OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
  1500. status = -EIO;
  1501. goto bail;
  1502. }
  1503. BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
  1504. mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
  1505. (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
  1506. (unsigned long long)bg_blkno, start_bit);
  1507. status = ocfs2_read_block(osb, bg_blkno, &group_bh, OCFS2_BH_CACHED,
  1508. alloc_inode);
  1509. if (status < 0) {
  1510. mlog_errno(status);
  1511. goto bail;
  1512. }
  1513. group = (struct ocfs2_group_desc *) group_bh->b_data;
  1514. status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group);
  1515. if (status) {
  1516. mlog_errno(status);
  1517. goto bail;
  1518. }
  1519. BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
  1520. status = ocfs2_block_group_clear_bits(handle, alloc_inode,
  1521. group, group_bh,
  1522. start_bit, count);
  1523. if (status < 0) {
  1524. mlog_errno(status);
  1525. goto bail;
  1526. }
  1527. status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
  1528. OCFS2_JOURNAL_ACCESS_WRITE);
  1529. if (status < 0) {
  1530. mlog_errno(status);
  1531. goto bail;
  1532. }
  1533. le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
  1534. count);
  1535. tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
  1536. fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
  1537. status = ocfs2_journal_dirty(handle, alloc_bh);
  1538. if (status < 0) {
  1539. mlog_errno(status);
  1540. goto bail;
  1541. }
  1542. bail:
  1543. if (group_bh)
  1544. brelse(group_bh);
  1545. mlog_exit(status);
  1546. return status;
  1547. }
  1548. int ocfs2_free_dinode(handle_t *handle,
  1549. struct inode *inode_alloc_inode,
  1550. struct buffer_head *inode_alloc_bh,
  1551. struct ocfs2_dinode *di)
  1552. {
  1553. u64 blk = le64_to_cpu(di->i_blkno);
  1554. u16 bit = le16_to_cpu(di->i_suballoc_bit);
  1555. u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  1556. return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
  1557. inode_alloc_bh, bit, bg_blkno, 1);
  1558. }
  1559. int ocfs2_free_clusters(handle_t *handle,
  1560. struct inode *bitmap_inode,
  1561. struct buffer_head *bitmap_bh,
  1562. u64 start_blk,
  1563. unsigned int num_clusters)
  1564. {
  1565. int status;
  1566. u16 bg_start_bit;
  1567. u64 bg_blkno;
  1568. struct ocfs2_dinode *fe;
  1569. /* You can't ever have a contiguous set of clusters
  1570. * bigger than a block group bitmap so we never have to worry
  1571. * about looping on them. */
  1572. mlog_entry_void();
  1573. /* This is expensive. We can safely remove once this stuff has
  1574. * gotten tested really well. */
  1575. BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
  1576. fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
  1577. ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
  1578. &bg_start_bit);
  1579. mlog(0, "want to free %u clusters starting at block %llu\n",
  1580. num_clusters, (unsigned long long)start_blk);
  1581. mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
  1582. (unsigned long long)bg_blkno, bg_start_bit);
  1583. status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
  1584. bg_start_bit, bg_blkno,
  1585. num_clusters);
  1586. if (status < 0) {
  1587. mlog_errno(status);
  1588. goto out;
  1589. }
  1590. ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb),
  1591. num_clusters);
  1592. out:
  1593. mlog_exit(status);
  1594. return status;
  1595. }
  1596. static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
  1597. {
  1598. printk("Block Group:\n");
  1599. printk("bg_signature: %s\n", bg->bg_signature);
  1600. printk("bg_size: %u\n", bg->bg_size);
  1601. printk("bg_bits: %u\n", bg->bg_bits);
  1602. printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
  1603. printk("bg_chain: %u\n", bg->bg_chain);
  1604. printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
  1605. printk("bg_next_group: %llu\n",
  1606. (unsigned long long)bg->bg_next_group);
  1607. printk("bg_parent_dinode: %llu\n",
  1608. (unsigned long long)bg->bg_parent_dinode);
  1609. printk("bg_blkno: %llu\n",
  1610. (unsigned long long)bg->bg_blkno);
  1611. }
  1612. static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
  1613. {
  1614. int i;
  1615. printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
  1616. printk("i_signature: %s\n", fe->i_signature);
  1617. printk("i_size: %llu\n",
  1618. (unsigned long long)fe->i_size);
  1619. printk("i_clusters: %u\n", fe->i_clusters);
  1620. printk("i_generation: %u\n",
  1621. le32_to_cpu(fe->i_generation));
  1622. printk("id1.bitmap1.i_used: %u\n",
  1623. le32_to_cpu(fe->id1.bitmap1.i_used));
  1624. printk("id1.bitmap1.i_total: %u\n",
  1625. le32_to_cpu(fe->id1.bitmap1.i_total));
  1626. printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
  1627. printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
  1628. printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
  1629. printk("id2.i_chain.cl_next_free_rec: %u\n",
  1630. fe->id2.i_chain.cl_next_free_rec);
  1631. for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
  1632. printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
  1633. fe->id2.i_chain.cl_recs[i].c_free);
  1634. printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
  1635. fe->id2.i_chain.cl_recs[i].c_total);
  1636. printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
  1637. (unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
  1638. }
  1639. }
  1640. /*
  1641. * For a given allocation, determine which allocators will need to be
  1642. * accessed, and lock them, reserving the appropriate number of bits.
  1643. *
  1644. * Sparse file systems call this from ocfs2_write_begin_nolock()
  1645. * and ocfs2_allocate_unwritten_extents().
  1646. *
  1647. * File systems which don't support holes call this from
  1648. * ocfs2_extend_allocation().
  1649. */
  1650. int ocfs2_lock_allocators(struct inode *inode,
  1651. struct ocfs2_extent_tree *et,
  1652. u32 clusters_to_add, u32 extents_to_split,
  1653. struct ocfs2_alloc_context **data_ac,
  1654. struct ocfs2_alloc_context **meta_ac)
  1655. {
  1656. int ret = 0, num_free_extents;
  1657. unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
  1658. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1659. *meta_ac = NULL;
  1660. if (data_ac)
  1661. *data_ac = NULL;
  1662. BUG_ON(clusters_to_add != 0 && data_ac == NULL);
  1663. num_free_extents = ocfs2_num_free_extents(osb, inode, et);
  1664. if (num_free_extents < 0) {
  1665. ret = num_free_extents;
  1666. mlog_errno(ret);
  1667. goto out;
  1668. }
  1669. /*
  1670. * Sparse allocation file systems need to be more conservative
  1671. * with reserving room for expansion - the actual allocation
  1672. * happens while we've got a journal handle open so re-taking
  1673. * a cluster lock (because we ran out of room for another
  1674. * extent) will violate ordering rules.
  1675. *
  1676. * Most of the time we'll only be seeing this 1 cluster at a time
  1677. * anyway.
  1678. *
  1679. * Always lock for any unwritten extents - we might want to
  1680. * add blocks during a split.
  1681. */
  1682. if (!num_free_extents ||
  1683. (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
  1684. ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac);
  1685. if (ret < 0) {
  1686. if (ret != -ENOSPC)
  1687. mlog_errno(ret);
  1688. goto out;
  1689. }
  1690. }
  1691. if (clusters_to_add == 0)
  1692. goto out;
  1693. ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
  1694. if (ret < 0) {
  1695. if (ret != -ENOSPC)
  1696. mlog_errno(ret);
  1697. goto out;
  1698. }
  1699. out:
  1700. if (ret) {
  1701. if (*meta_ac) {
  1702. ocfs2_free_alloc_context(*meta_ac);
  1703. *meta_ac = NULL;
  1704. }
  1705. /*
  1706. * We cannot have an error and a non null *data_ac.
  1707. */
  1708. }
  1709. return ret;
  1710. }