suballoc.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * suballoc.c
  5. *
  6. * metadata alloc and free
  7. * Inspired by ext3 block groups.
  8. *
  9. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2 of the License, or (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public
  22. * License along with this program; if not, write to the
  23. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  24. * Boston, MA 021110-1307, USA.
  25. */
  26. #include <linux/fs.h>
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/highmem.h>
  30. #define MLOG_MASK_PREFIX ML_DISK_ALLOC
  31. #include <cluster/masklog.h>
  32. #include "ocfs2.h"
  33. #include "alloc.h"
  34. #include "blockcheck.h"
  35. #include "dlmglue.h"
  36. #include "inode.h"
  37. #include "journal.h"
  38. #include "localalloc.h"
  39. #include "suballoc.h"
  40. #include "super.h"
  41. #include "sysfile.h"
  42. #include "uptodate.h"
  43. #include "buffer_head_io.h"
  44. #define NOT_ALLOC_NEW_GROUP 0
  45. #define ALLOC_NEW_GROUP 1
  46. #define OCFS2_MAX_INODES_TO_STEAL 1024
  47. static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
  48. static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
  49. static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
  50. static int ocfs2_block_group_fill(handle_t *handle,
  51. struct inode *alloc_inode,
  52. struct buffer_head *bg_bh,
  53. u64 group_blkno,
  54. u16 my_chain,
  55. struct ocfs2_chain_list *cl);
  56. static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
  57. struct inode *alloc_inode,
  58. struct buffer_head *bh,
  59. u64 max_block);
  60. static int ocfs2_cluster_group_search(struct inode *inode,
  61. struct buffer_head *group_bh,
  62. u32 bits_wanted, u32 min_bits,
  63. u64 max_block,
  64. u16 *bit_off, u16 *bits_found);
  65. static int ocfs2_block_group_search(struct inode *inode,
  66. struct buffer_head *group_bh,
  67. u32 bits_wanted, u32 min_bits,
  68. u64 max_block,
  69. u16 *bit_off, u16 *bits_found);
  70. static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
  71. struct ocfs2_alloc_context *ac,
  72. handle_t *handle,
  73. u32 bits_wanted,
  74. u32 min_bits,
  75. u16 *bit_off,
  76. unsigned int *num_bits,
  77. u64 *bg_blkno);
  78. static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
  79. int nr);
  80. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  81. struct inode *alloc_inode,
  82. struct ocfs2_group_desc *bg,
  83. struct buffer_head *group_bh,
  84. unsigned int bit_off,
  85. unsigned int num_bits);
  86. static inline int ocfs2_block_group_clear_bits(handle_t *handle,
  87. struct inode *alloc_inode,
  88. struct ocfs2_group_desc *bg,
  89. struct buffer_head *group_bh,
  90. unsigned int bit_off,
  91. unsigned int num_bits);
  92. static int ocfs2_relink_block_group(handle_t *handle,
  93. struct inode *alloc_inode,
  94. struct buffer_head *fe_bh,
  95. struct buffer_head *bg_bh,
  96. struct buffer_head *prev_bg_bh,
  97. u16 chain);
  98. static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
  99. u32 wanted);
  100. static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
  101. u64 bg_blkno,
  102. u16 bg_bit_off);
  103. static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  104. u64 data_blkno,
  105. u64 *bg_blkno,
  106. u16 *bg_bit_off);
  107. static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
  108. u32 bits_wanted, u64 max_block,
  109. struct ocfs2_alloc_context **ac);
  110. void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
  111. {
  112. struct inode *inode = ac->ac_inode;
  113. if (inode) {
  114. if (ac->ac_which != OCFS2_AC_USE_LOCAL)
  115. ocfs2_inode_unlock(inode, 1);
  116. mutex_unlock(&inode->i_mutex);
  117. iput(inode);
  118. ac->ac_inode = NULL;
  119. }
  120. brelse(ac->ac_bh);
  121. ac->ac_bh = NULL;
  122. }
  123. void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
  124. {
  125. ocfs2_free_ac_resource(ac);
  126. kfree(ac);
  127. }
  128. static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
  129. {
  130. return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
  131. }
  132. #define do_error(fmt, ...) \
  133. do{ \
  134. if (clean_error) \
  135. mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
  136. else \
  137. ocfs2_error(sb, fmt, ##__VA_ARGS__); \
  138. } while (0)
  139. static int ocfs2_validate_gd_self(struct super_block *sb,
  140. struct buffer_head *bh,
  141. int clean_error)
  142. {
  143. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  144. if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
  145. do_error("Group descriptor #%llu has bad signature %.*s",
  146. (unsigned long long)bh->b_blocknr, 7,
  147. gd->bg_signature);
  148. return -EINVAL;
  149. }
  150. if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
  151. do_error("Group descriptor #%llu has an invalid bg_blkno "
  152. "of %llu",
  153. (unsigned long long)bh->b_blocknr,
  154. (unsigned long long)le64_to_cpu(gd->bg_blkno));
  155. return -EINVAL;
  156. }
  157. if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
  158. do_error("Group descriptor #%llu has an invalid "
  159. "fs_generation of #%u",
  160. (unsigned long long)bh->b_blocknr,
  161. le32_to_cpu(gd->bg_generation));
  162. return -EINVAL;
  163. }
  164. if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
  165. do_error("Group descriptor #%llu has bit count %u but "
  166. "claims that %u are free",
  167. (unsigned long long)bh->b_blocknr,
  168. le16_to_cpu(gd->bg_bits),
  169. le16_to_cpu(gd->bg_free_bits_count));
  170. return -EINVAL;
  171. }
  172. if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
  173. do_error("Group descriptor #%llu has bit count %u but "
  174. "max bitmap bits of %u",
  175. (unsigned long long)bh->b_blocknr,
  176. le16_to_cpu(gd->bg_bits),
  177. 8 * le16_to_cpu(gd->bg_size));
  178. return -EINVAL;
  179. }
  180. return 0;
  181. }
  182. static int ocfs2_validate_gd_parent(struct super_block *sb,
  183. struct ocfs2_dinode *di,
  184. struct buffer_head *bh,
  185. int clean_error)
  186. {
  187. unsigned int max_bits;
  188. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  189. if (di->i_blkno != gd->bg_parent_dinode) {
  190. do_error("Group descriptor #%llu has bad parent "
  191. "pointer (%llu, expected %llu)",
  192. (unsigned long long)bh->b_blocknr,
  193. (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
  194. (unsigned long long)le64_to_cpu(di->i_blkno));
  195. return -EINVAL;
  196. }
  197. max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
  198. if (le16_to_cpu(gd->bg_bits) > max_bits) {
  199. do_error("Group descriptor #%llu has bit count of %u",
  200. (unsigned long long)bh->b_blocknr,
  201. le16_to_cpu(gd->bg_bits));
  202. return -EINVAL;
  203. }
  204. if (le16_to_cpu(gd->bg_chain) >=
  205. le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
  206. do_error("Group descriptor #%llu has bad chain %u",
  207. (unsigned long long)bh->b_blocknr,
  208. le16_to_cpu(gd->bg_chain));
  209. return -EINVAL;
  210. }
  211. return 0;
  212. }
  213. #undef do_error
  214. /*
  215. * This version only prints errors. It does not fail the filesystem, and
  216. * exists only for resize.
  217. */
  218. int ocfs2_check_group_descriptor(struct super_block *sb,
  219. struct ocfs2_dinode *di,
  220. struct buffer_head *bh)
  221. {
  222. int rc;
  223. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  224. BUG_ON(!buffer_uptodate(bh));
  225. /*
  226. * If the ecc fails, we return the error but otherwise
  227. * leave the filesystem running. We know any error is
  228. * local to this block.
  229. */
  230. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
  231. if (rc) {
  232. mlog(ML_ERROR,
  233. "Checksum failed for group descriptor %llu\n",
  234. (unsigned long long)bh->b_blocknr);
  235. } else
  236. rc = ocfs2_validate_gd_self(sb, bh, 1);
  237. if (!rc)
  238. rc = ocfs2_validate_gd_parent(sb, di, bh, 1);
  239. return rc;
  240. }
  241. static int ocfs2_validate_group_descriptor(struct super_block *sb,
  242. struct buffer_head *bh)
  243. {
  244. int rc;
  245. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
  246. mlog(0, "Validating group descriptor %llu\n",
  247. (unsigned long long)bh->b_blocknr);
  248. BUG_ON(!buffer_uptodate(bh));
  249. /*
  250. * If the ecc fails, we return the error but otherwise
  251. * leave the filesystem running. We know any error is
  252. * local to this block.
  253. */
  254. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &gd->bg_check);
  255. if (rc)
  256. return rc;
  257. /*
  258. * Errors after here are fatal.
  259. */
  260. return ocfs2_validate_gd_self(sb, bh, 0);
  261. }
  262. int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di,
  263. u64 gd_blkno, struct buffer_head **bh)
  264. {
  265. int rc;
  266. struct buffer_head *tmp = *bh;
  267. rc = ocfs2_read_block(inode, gd_blkno, &tmp,
  268. ocfs2_validate_group_descriptor);
  269. if (rc)
  270. goto out;
  271. rc = ocfs2_validate_gd_parent(inode->i_sb, di, tmp, 0);
  272. if (rc) {
  273. brelse(tmp);
  274. goto out;
  275. }
  276. /* If ocfs2_read_block() got us a new bh, pass it up. */
  277. if (!*bh)
  278. *bh = tmp;
  279. out:
  280. return rc;
  281. }
  282. static int ocfs2_block_group_fill(handle_t *handle,
  283. struct inode *alloc_inode,
  284. struct buffer_head *bg_bh,
  285. u64 group_blkno,
  286. u16 my_chain,
  287. struct ocfs2_chain_list *cl)
  288. {
  289. int status = 0;
  290. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  291. struct super_block * sb = alloc_inode->i_sb;
  292. mlog_entry_void();
  293. if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
  294. ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
  295. "b_blocknr (%llu)",
  296. (unsigned long long)group_blkno,
  297. (unsigned long long) bg_bh->b_blocknr);
  298. status = -EIO;
  299. goto bail;
  300. }
  301. status = ocfs2_journal_access_gd(handle,
  302. alloc_inode,
  303. bg_bh,
  304. OCFS2_JOURNAL_ACCESS_CREATE);
  305. if (status < 0) {
  306. mlog_errno(status);
  307. goto bail;
  308. }
  309. memset(bg, 0, sb->s_blocksize);
  310. strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
  311. bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
  312. bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
  313. bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
  314. bg->bg_chain = cpu_to_le16(my_chain);
  315. bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
  316. bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
  317. bg->bg_blkno = cpu_to_le64(group_blkno);
  318. /* set the 1st bit in the bitmap to account for the descriptor block */
  319. ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
  320. bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
  321. status = ocfs2_journal_dirty(handle, bg_bh);
  322. if (status < 0)
  323. mlog_errno(status);
  324. /* There is no need to zero out or otherwise initialize the
  325. * other blocks in a group - All valid FS metadata in a block
  326. * group stores the superblock fs_generation value at
  327. * allocation time. */
  328. bail:
  329. mlog_exit(status);
  330. return status;
  331. }
  332. static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
  333. {
  334. u16 curr, best;
  335. best = curr = 0;
  336. while (curr < le16_to_cpu(cl->cl_count)) {
  337. if (le32_to_cpu(cl->cl_recs[best].c_total) >
  338. le32_to_cpu(cl->cl_recs[curr].c_total))
  339. best = curr;
  340. curr++;
  341. }
  342. return best;
  343. }
  344. /*
  345. * We expect the block group allocator to already be locked.
  346. */
  347. static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
  348. struct inode *alloc_inode,
  349. struct buffer_head *bh,
  350. u64 max_block)
  351. {
  352. int status, credits;
  353. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
  354. struct ocfs2_chain_list *cl;
  355. struct ocfs2_alloc_context *ac = NULL;
  356. handle_t *handle = NULL;
  357. u32 bit_off, num_bits;
  358. u16 alloc_rec;
  359. u64 bg_blkno;
  360. struct buffer_head *bg_bh = NULL;
  361. struct ocfs2_group_desc *bg;
  362. BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
  363. mlog_entry_void();
  364. cl = &fe->id2.i_chain;
  365. status = ocfs2_reserve_clusters_with_limit(osb,
  366. le16_to_cpu(cl->cl_cpg),
  367. max_block, &ac);
  368. if (status < 0) {
  369. if (status != -ENOSPC)
  370. mlog_errno(status);
  371. goto bail;
  372. }
  373. credits = ocfs2_calc_group_alloc_credits(osb->sb,
  374. le16_to_cpu(cl->cl_cpg));
  375. handle = ocfs2_start_trans(osb, credits);
  376. if (IS_ERR(handle)) {
  377. status = PTR_ERR(handle);
  378. handle = NULL;
  379. mlog_errno(status);
  380. goto bail;
  381. }
  382. status = ocfs2_claim_clusters(osb,
  383. handle,
  384. ac,
  385. le16_to_cpu(cl->cl_cpg),
  386. &bit_off,
  387. &num_bits);
  388. if (status < 0) {
  389. if (status != -ENOSPC)
  390. mlog_errno(status);
  391. goto bail;
  392. }
  393. alloc_rec = ocfs2_find_smallest_chain(cl);
  394. /* setup the group */
  395. bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
  396. mlog(0, "new descriptor, record %u, at block %llu\n",
  397. alloc_rec, (unsigned long long)bg_blkno);
  398. bg_bh = sb_getblk(osb->sb, bg_blkno);
  399. if (!bg_bh) {
  400. status = -EIO;
  401. mlog_errno(status);
  402. goto bail;
  403. }
  404. ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
  405. status = ocfs2_block_group_fill(handle,
  406. alloc_inode,
  407. bg_bh,
  408. bg_blkno,
  409. alloc_rec,
  410. cl);
  411. if (status < 0) {
  412. mlog_errno(status);
  413. goto bail;
  414. }
  415. bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  416. status = ocfs2_journal_access_di(handle, alloc_inode,
  417. bh, OCFS2_JOURNAL_ACCESS_WRITE);
  418. if (status < 0) {
  419. mlog_errno(status);
  420. goto bail;
  421. }
  422. le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
  423. le16_to_cpu(bg->bg_free_bits_count));
  424. le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
  425. cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
  426. if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
  427. le16_add_cpu(&cl->cl_next_free_rec, 1);
  428. le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
  429. le16_to_cpu(bg->bg_free_bits_count));
  430. le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
  431. le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
  432. status = ocfs2_journal_dirty(handle, bh);
  433. if (status < 0) {
  434. mlog_errno(status);
  435. goto bail;
  436. }
  437. spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
  438. OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
  439. fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
  440. le32_to_cpu(fe->i_clusters)));
  441. spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
  442. i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
  443. alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
  444. status = 0;
  445. bail:
  446. if (handle)
  447. ocfs2_commit_trans(osb, handle);
  448. if (ac)
  449. ocfs2_free_alloc_context(ac);
  450. brelse(bg_bh);
  451. mlog_exit(status);
  452. return status;
  453. }
  454. static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
  455. struct ocfs2_alloc_context *ac,
  456. int type,
  457. u32 slot,
  458. int alloc_new_group)
  459. {
  460. int status;
  461. u32 bits_wanted = ac->ac_bits_wanted;
  462. struct inode *alloc_inode;
  463. struct buffer_head *bh = NULL;
  464. struct ocfs2_dinode *fe;
  465. u32 free_bits;
  466. mlog_entry_void();
  467. alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
  468. if (!alloc_inode) {
  469. mlog_errno(-EINVAL);
  470. return -EINVAL;
  471. }
  472. mutex_lock(&alloc_inode->i_mutex);
  473. status = ocfs2_inode_lock(alloc_inode, &bh, 1);
  474. if (status < 0) {
  475. mutex_unlock(&alloc_inode->i_mutex);
  476. iput(alloc_inode);
  477. mlog_errno(status);
  478. return status;
  479. }
  480. ac->ac_inode = alloc_inode;
  481. ac->ac_alloc_slot = slot;
  482. fe = (struct ocfs2_dinode *) bh->b_data;
  483. /* The bh was validated by the inode read inside
  484. * ocfs2_inode_lock(). Any corruption is a code bug. */
  485. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  486. if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
  487. ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
  488. (unsigned long long)le64_to_cpu(fe->i_blkno));
  489. status = -EIO;
  490. goto bail;
  491. }
  492. free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
  493. le32_to_cpu(fe->id1.bitmap1.i_used);
  494. if (bits_wanted > free_bits) {
  495. /* cluster bitmap never grows */
  496. if (ocfs2_is_cluster_bitmap(alloc_inode)) {
  497. mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
  498. bits_wanted, free_bits);
  499. status = -ENOSPC;
  500. goto bail;
  501. }
  502. if (alloc_new_group != ALLOC_NEW_GROUP) {
  503. mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, "
  504. "and we don't alloc a new group for it.\n",
  505. slot, bits_wanted, free_bits);
  506. status = -ENOSPC;
  507. goto bail;
  508. }
  509. status = ocfs2_block_group_alloc(osb, alloc_inode, bh,
  510. ac->ac_max_block);
  511. if (status < 0) {
  512. if (status != -ENOSPC)
  513. mlog_errno(status);
  514. goto bail;
  515. }
  516. atomic_inc(&osb->alloc_stats.bg_extends);
  517. /* You should never ask for this much metadata */
  518. BUG_ON(bits_wanted >
  519. (le32_to_cpu(fe->id1.bitmap1.i_total)
  520. - le32_to_cpu(fe->id1.bitmap1.i_used)));
  521. }
  522. get_bh(bh);
  523. ac->ac_bh = bh;
  524. bail:
  525. brelse(bh);
  526. mlog_exit(status);
  527. return status;
  528. }
  529. int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
  530. int blocks,
  531. struct ocfs2_alloc_context **ac)
  532. {
  533. int status;
  534. u32 slot;
  535. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  536. if (!(*ac)) {
  537. status = -ENOMEM;
  538. mlog_errno(status);
  539. goto bail;
  540. }
  541. (*ac)->ac_bits_wanted = blocks;
  542. (*ac)->ac_which = OCFS2_AC_USE_META;
  543. slot = osb->slot_num;
  544. (*ac)->ac_group_search = ocfs2_block_group_search;
  545. status = ocfs2_reserve_suballoc_bits(osb, (*ac),
  546. EXTENT_ALLOC_SYSTEM_INODE,
  547. slot, ALLOC_NEW_GROUP);
  548. if (status < 0) {
  549. if (status != -ENOSPC)
  550. mlog_errno(status);
  551. goto bail;
  552. }
  553. status = 0;
  554. bail:
  555. if ((status < 0) && *ac) {
  556. ocfs2_free_alloc_context(*ac);
  557. *ac = NULL;
  558. }
  559. mlog_exit(status);
  560. return status;
  561. }
  562. int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
  563. struct ocfs2_extent_list *root_el,
  564. struct ocfs2_alloc_context **ac)
  565. {
  566. return ocfs2_reserve_new_metadata_blocks(osb,
  567. ocfs2_extend_meta_needed(root_el),
  568. ac);
  569. }
  570. static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
  571. struct ocfs2_alloc_context *ac)
  572. {
  573. int i, status = -ENOSPC;
  574. s16 slot = ocfs2_get_inode_steal_slot(osb);
  575. /* Start to steal inodes from the first slot after ours. */
  576. if (slot == OCFS2_INVALID_SLOT)
  577. slot = osb->slot_num + 1;
  578. for (i = 0; i < osb->max_slots; i++, slot++) {
  579. if (slot == osb->max_slots)
  580. slot = 0;
  581. if (slot == osb->slot_num)
  582. continue;
  583. status = ocfs2_reserve_suballoc_bits(osb, ac,
  584. INODE_ALLOC_SYSTEM_INODE,
  585. slot, NOT_ALLOC_NEW_GROUP);
  586. if (status >= 0) {
  587. ocfs2_set_inode_steal_slot(osb, slot);
  588. break;
  589. }
  590. ocfs2_free_ac_resource(ac);
  591. }
  592. return status;
  593. }
  594. int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
  595. struct ocfs2_alloc_context **ac)
  596. {
  597. int status;
  598. s16 slot = ocfs2_get_inode_steal_slot(osb);
  599. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  600. if (!(*ac)) {
  601. status = -ENOMEM;
  602. mlog_errno(status);
  603. goto bail;
  604. }
  605. (*ac)->ac_bits_wanted = 1;
  606. (*ac)->ac_which = OCFS2_AC_USE_INODE;
  607. (*ac)->ac_group_search = ocfs2_block_group_search;
  608. /*
  609. * stat(2) can't handle i_ino > 32bits, so we tell the
  610. * lower levels not to allocate us a block group past that
  611. * limit. The 'inode64' mount option avoids this behavior.
  612. */
  613. if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64))
  614. (*ac)->ac_max_block = (u32)~0U;
  615. /*
  616. * slot is set when we successfully steal inode from other nodes.
  617. * It is reset in 3 places:
  618. * 1. when we flush the truncate log
  619. * 2. when we complete local alloc recovery.
  620. * 3. when we successfully allocate from our own slot.
  621. * After it is set, we will go on stealing inodes until we find the
  622. * need to check our slots to see whether there is some space for us.
  623. */
  624. if (slot != OCFS2_INVALID_SLOT &&
  625. atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
  626. goto inode_steal;
  627. atomic_set(&osb->s_num_inodes_stolen, 0);
  628. status = ocfs2_reserve_suballoc_bits(osb, *ac,
  629. INODE_ALLOC_SYSTEM_INODE,
  630. osb->slot_num, ALLOC_NEW_GROUP);
  631. if (status >= 0) {
  632. status = 0;
  633. /*
  634. * Some inodes must be freed by us, so try to allocate
  635. * from our own next time.
  636. */
  637. if (slot != OCFS2_INVALID_SLOT)
  638. ocfs2_init_inode_steal_slot(osb);
  639. goto bail;
  640. } else if (status < 0 && status != -ENOSPC) {
  641. mlog_errno(status);
  642. goto bail;
  643. }
  644. ocfs2_free_ac_resource(*ac);
  645. inode_steal:
  646. status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
  647. atomic_inc(&osb->s_num_inodes_stolen);
  648. if (status < 0) {
  649. if (status != -ENOSPC)
  650. mlog_errno(status);
  651. goto bail;
  652. }
  653. status = 0;
  654. bail:
  655. if ((status < 0) && *ac) {
  656. ocfs2_free_alloc_context(*ac);
  657. *ac = NULL;
  658. }
  659. mlog_exit(status);
  660. return status;
  661. }
  662. /* local alloc code has to do the same thing, so rather than do this
  663. * twice.. */
  664. int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
  665. struct ocfs2_alloc_context *ac)
  666. {
  667. int status;
  668. ac->ac_which = OCFS2_AC_USE_MAIN;
  669. ac->ac_group_search = ocfs2_cluster_group_search;
  670. status = ocfs2_reserve_suballoc_bits(osb, ac,
  671. GLOBAL_BITMAP_SYSTEM_INODE,
  672. OCFS2_INVALID_SLOT,
  673. ALLOC_NEW_GROUP);
  674. if (status < 0 && status != -ENOSPC) {
  675. mlog_errno(status);
  676. goto bail;
  677. }
  678. bail:
  679. return status;
  680. }
  681. /* Callers don't need to care which bitmap (local alloc or main) to
  682. * use so we figure it out for them, but unfortunately this clutters
  683. * things a bit. */
  684. static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
  685. u32 bits_wanted, u64 max_block,
  686. struct ocfs2_alloc_context **ac)
  687. {
  688. int status;
  689. mlog_entry_void();
  690. *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
  691. if (!(*ac)) {
  692. status = -ENOMEM;
  693. mlog_errno(status);
  694. goto bail;
  695. }
  696. (*ac)->ac_bits_wanted = bits_wanted;
  697. (*ac)->ac_max_block = max_block;
  698. status = -ENOSPC;
  699. if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
  700. status = ocfs2_reserve_local_alloc_bits(osb,
  701. bits_wanted,
  702. *ac);
  703. if (status == -EFBIG) {
  704. /* The local alloc window is outside ac_max_block.
  705. * use the main bitmap. */
  706. status = -ENOSPC;
  707. } else if ((status < 0) && (status != -ENOSPC)) {
  708. mlog_errno(status);
  709. goto bail;
  710. }
  711. }
  712. if (status == -ENOSPC) {
  713. status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
  714. if (status < 0) {
  715. if (status != -ENOSPC)
  716. mlog_errno(status);
  717. goto bail;
  718. }
  719. }
  720. status = 0;
  721. bail:
  722. if ((status < 0) && *ac) {
  723. ocfs2_free_alloc_context(*ac);
  724. *ac = NULL;
  725. }
  726. mlog_exit(status);
  727. return status;
  728. }
  729. int ocfs2_reserve_clusters(struct ocfs2_super *osb,
  730. u32 bits_wanted,
  731. struct ocfs2_alloc_context **ac)
  732. {
  733. return ocfs2_reserve_clusters_with_limit(osb, bits_wanted, 0, ac);
  734. }
  735. /*
  736. * More or less lifted from ext3. I'll leave their description below:
  737. *
  738. * "For ext3 allocations, we must not reuse any blocks which are
  739. * allocated in the bitmap buffer's "last committed data" copy. This
  740. * prevents deletes from freeing up the page for reuse until we have
  741. * committed the delete transaction.
  742. *
  743. * If we didn't do this, then deleting something and reallocating it as
  744. * data would allow the old block to be overwritten before the
  745. * transaction committed (because we force data to disk before commit).
  746. * This would lead to corruption if we crashed between overwriting the
  747. * data and committing the delete.
  748. *
  749. * @@@ We may want to make this allocation behaviour conditional on
  750. * data-writes at some point, and disable it for metadata allocations or
  751. * sync-data inodes."
  752. *
  753. * Note: OCFS2 already does this differently for metadata vs data
  754. * allocations, as those bitmaps are separate and undo access is never
  755. * called on a metadata group descriptor.
  756. */
  757. static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
  758. int nr)
  759. {
  760. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  761. if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
  762. return 0;
  763. if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
  764. return 1;
  765. bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
  766. return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
  767. }
  768. static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
  769. struct buffer_head *bg_bh,
  770. unsigned int bits_wanted,
  771. unsigned int total_bits,
  772. u16 *bit_off,
  773. u16 *bits_found)
  774. {
  775. void *bitmap;
  776. u16 best_offset, best_size;
  777. int offset, start, found, status = 0;
  778. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  779. /* Callers got this descriptor from
  780. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  781. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  782. found = start = best_offset = best_size = 0;
  783. bitmap = bg->bg_bitmap;
  784. while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
  785. if (offset == total_bits)
  786. break;
  787. if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
  788. /* We found a zero, but we can't use it as it
  789. * hasn't been put to disk yet! */
  790. found = 0;
  791. start = offset + 1;
  792. } else if (offset == start) {
  793. /* we found a zero */
  794. found++;
  795. /* move start to the next bit to test */
  796. start++;
  797. } else {
  798. /* got a zero after some ones */
  799. found = 1;
  800. start = offset + 1;
  801. }
  802. if (found > best_size) {
  803. best_size = found;
  804. best_offset = start - found;
  805. }
  806. /* we got everything we needed */
  807. if (found == bits_wanted) {
  808. /* mlog(0, "Found it all!\n"); */
  809. break;
  810. }
  811. }
  812. /* XXX: I think the first clause is equivalent to the second
  813. * - jlbec */
  814. if (found == bits_wanted) {
  815. *bit_off = start - found;
  816. *bits_found = found;
  817. } else if (best_size) {
  818. *bit_off = best_offset;
  819. *bits_found = best_size;
  820. } else {
  821. status = -ENOSPC;
  822. /* No error log here -- see the comment above
  823. * ocfs2_test_bg_bit_allocatable */
  824. }
  825. return status;
  826. }
  827. static inline int ocfs2_block_group_set_bits(handle_t *handle,
  828. struct inode *alloc_inode,
  829. struct ocfs2_group_desc *bg,
  830. struct buffer_head *group_bh,
  831. unsigned int bit_off,
  832. unsigned int num_bits)
  833. {
  834. int status;
  835. void *bitmap = bg->bg_bitmap;
  836. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  837. mlog_entry_void();
  838. /* All callers get the descriptor via
  839. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  840. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  841. BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
  842. mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
  843. num_bits);
  844. if (ocfs2_is_cluster_bitmap(alloc_inode))
  845. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  846. status = ocfs2_journal_access_gd(handle,
  847. alloc_inode,
  848. group_bh,
  849. journal_type);
  850. if (status < 0) {
  851. mlog_errno(status);
  852. goto bail;
  853. }
  854. le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
  855. while(num_bits--)
  856. ocfs2_set_bit(bit_off++, bitmap);
  857. status = ocfs2_journal_dirty(handle,
  858. group_bh);
  859. if (status < 0) {
  860. mlog_errno(status);
  861. goto bail;
  862. }
  863. bail:
  864. mlog_exit(status);
  865. return status;
  866. }
  867. /* find the one with the most empty bits */
  868. static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
  869. {
  870. u16 curr, best;
  871. BUG_ON(!cl->cl_next_free_rec);
  872. best = curr = 0;
  873. while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
  874. if (le32_to_cpu(cl->cl_recs[curr].c_free) >
  875. le32_to_cpu(cl->cl_recs[best].c_free))
  876. best = curr;
  877. curr++;
  878. }
  879. BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
  880. return best;
  881. }
  882. static int ocfs2_relink_block_group(handle_t *handle,
  883. struct inode *alloc_inode,
  884. struct buffer_head *fe_bh,
  885. struct buffer_head *bg_bh,
  886. struct buffer_head *prev_bg_bh,
  887. u16 chain)
  888. {
  889. int status;
  890. /* there is a really tiny chance the journal calls could fail,
  891. * but we wouldn't want inconsistent blocks in *any* case. */
  892. u64 fe_ptr, bg_ptr, prev_bg_ptr;
  893. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
  894. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
  895. struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
  896. /* The caller got these descriptors from
  897. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  898. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  899. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg));
  900. mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n",
  901. (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
  902. (unsigned long long)le64_to_cpu(bg->bg_blkno),
  903. (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
  904. fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
  905. bg_ptr = le64_to_cpu(bg->bg_next_group);
  906. prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
  907. status = ocfs2_journal_access_gd(handle, alloc_inode, prev_bg_bh,
  908. OCFS2_JOURNAL_ACCESS_WRITE);
  909. if (status < 0) {
  910. mlog_errno(status);
  911. goto out_rollback;
  912. }
  913. prev_bg->bg_next_group = bg->bg_next_group;
  914. status = ocfs2_journal_dirty(handle, prev_bg_bh);
  915. if (status < 0) {
  916. mlog_errno(status);
  917. goto out_rollback;
  918. }
  919. status = ocfs2_journal_access_gd(handle, alloc_inode, bg_bh,
  920. OCFS2_JOURNAL_ACCESS_WRITE);
  921. if (status < 0) {
  922. mlog_errno(status);
  923. goto out_rollback;
  924. }
  925. bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
  926. status = ocfs2_journal_dirty(handle, bg_bh);
  927. if (status < 0) {
  928. mlog_errno(status);
  929. goto out_rollback;
  930. }
  931. status = ocfs2_journal_access_di(handle, alloc_inode, fe_bh,
  932. OCFS2_JOURNAL_ACCESS_WRITE);
  933. if (status < 0) {
  934. mlog_errno(status);
  935. goto out_rollback;
  936. }
  937. fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
  938. status = ocfs2_journal_dirty(handle, fe_bh);
  939. if (status < 0) {
  940. mlog_errno(status);
  941. goto out_rollback;
  942. }
  943. status = 0;
  944. out_rollback:
  945. if (status < 0) {
  946. fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
  947. bg->bg_next_group = cpu_to_le64(bg_ptr);
  948. prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
  949. }
  950. mlog_exit(status);
  951. return status;
  952. }
  953. static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
  954. u32 wanted)
  955. {
  956. return le16_to_cpu(bg->bg_free_bits_count) > wanted;
  957. }
  958. /* return 0 on success, -ENOSPC to keep searching and any other < 0
  959. * value on error. */
  960. static int ocfs2_cluster_group_search(struct inode *inode,
  961. struct buffer_head *group_bh,
  962. u32 bits_wanted, u32 min_bits,
  963. u64 max_block,
  964. u16 *bit_off, u16 *bits_found)
  965. {
  966. int search = -ENOSPC;
  967. int ret;
  968. u64 blkoff;
  969. struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
  970. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  971. u16 tmp_off, tmp_found;
  972. unsigned int max_bits, gd_cluster_off;
  973. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  974. if (gd->bg_free_bits_count) {
  975. max_bits = le16_to_cpu(gd->bg_bits);
  976. /* Tail groups in cluster bitmaps which aren't cpg
  977. * aligned are prone to partial extention by a failed
  978. * fs resize. If the file system resize never got to
  979. * update the dinode cluster count, then we don't want
  980. * to trust any clusters past it, regardless of what
  981. * the group descriptor says. */
  982. gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
  983. le64_to_cpu(gd->bg_blkno));
  984. if ((gd_cluster_off + max_bits) >
  985. OCFS2_I(inode)->ip_clusters) {
  986. max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
  987. mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
  988. (unsigned long long)le64_to_cpu(gd->bg_blkno),
  989. le16_to_cpu(gd->bg_bits),
  990. OCFS2_I(inode)->ip_clusters, max_bits);
  991. }
  992. ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
  993. group_bh, bits_wanted,
  994. max_bits,
  995. &tmp_off, &tmp_found);
  996. if (ret)
  997. return ret;
  998. if (max_block) {
  999. blkoff = ocfs2_clusters_to_blocks(inode->i_sb,
  1000. gd_cluster_off +
  1001. tmp_off + tmp_found);
  1002. mlog(0, "Checking %llu against %llu\n",
  1003. (unsigned long long)blkoff,
  1004. (unsigned long long)max_block);
  1005. if (blkoff > max_block)
  1006. return -ENOSPC;
  1007. }
  1008. /* ocfs2_block_group_find_clear_bits() might
  1009. * return success, but we still want to return
  1010. * -ENOSPC unless it found the minimum number
  1011. * of bits. */
  1012. if (min_bits <= tmp_found) {
  1013. *bit_off = tmp_off;
  1014. *bits_found = tmp_found;
  1015. search = 0; /* success */
  1016. } else if (tmp_found) {
  1017. /*
  1018. * Don't show bits which we'll be returning
  1019. * for allocation to the local alloc bitmap.
  1020. */
  1021. ocfs2_local_alloc_seen_free_bits(osb, tmp_found);
  1022. }
  1023. }
  1024. return search;
  1025. }
  1026. static int ocfs2_block_group_search(struct inode *inode,
  1027. struct buffer_head *group_bh,
  1028. u32 bits_wanted, u32 min_bits,
  1029. u64 max_block,
  1030. u16 *bit_off, u16 *bits_found)
  1031. {
  1032. int ret = -ENOSPC;
  1033. u64 blkoff;
  1034. struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1035. BUG_ON(min_bits != 1);
  1036. BUG_ON(ocfs2_is_cluster_bitmap(inode));
  1037. if (bg->bg_free_bits_count) {
  1038. ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
  1039. group_bh, bits_wanted,
  1040. le16_to_cpu(bg->bg_bits),
  1041. bit_off, bits_found);
  1042. if (!ret && max_block) {
  1043. blkoff = le64_to_cpu(bg->bg_blkno) + *bit_off +
  1044. *bits_found;
  1045. mlog(0, "Checking %llu against %llu\n",
  1046. (unsigned long long)blkoff,
  1047. (unsigned long long)max_block);
  1048. if (blkoff > max_block)
  1049. ret = -ENOSPC;
  1050. }
  1051. }
  1052. return ret;
  1053. }
  1054. static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
  1055. handle_t *handle,
  1056. struct buffer_head *di_bh,
  1057. u32 num_bits,
  1058. u16 chain)
  1059. {
  1060. int ret;
  1061. u32 tmp_used;
  1062. struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
  1063. struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
  1064. ret = ocfs2_journal_access_di(handle, inode, di_bh,
  1065. OCFS2_JOURNAL_ACCESS_WRITE);
  1066. if (ret < 0) {
  1067. mlog_errno(ret);
  1068. goto out;
  1069. }
  1070. tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
  1071. di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
  1072. le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
  1073. ret = ocfs2_journal_dirty(handle, di_bh);
  1074. if (ret < 0)
  1075. mlog_errno(ret);
  1076. out:
  1077. return ret;
  1078. }
  1079. static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
  1080. handle_t *handle,
  1081. u32 bits_wanted,
  1082. u32 min_bits,
  1083. u16 *bit_off,
  1084. unsigned int *num_bits,
  1085. u64 gd_blkno,
  1086. u16 *bits_left)
  1087. {
  1088. int ret;
  1089. u16 found;
  1090. struct buffer_head *group_bh = NULL;
  1091. struct ocfs2_group_desc *gd;
  1092. struct ocfs2_dinode *di = (struct ocfs2_dinode *)ac->ac_bh->b_data;
  1093. struct inode *alloc_inode = ac->ac_inode;
  1094. ret = ocfs2_read_group_descriptor(alloc_inode, di, gd_blkno,
  1095. &group_bh);
  1096. if (ret < 0) {
  1097. mlog_errno(ret);
  1098. return ret;
  1099. }
  1100. gd = (struct ocfs2_group_desc *) group_bh->b_data;
  1101. ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
  1102. ac->ac_max_block, bit_off, &found);
  1103. if (ret < 0) {
  1104. if (ret != -ENOSPC)
  1105. mlog_errno(ret);
  1106. goto out;
  1107. }
  1108. *num_bits = found;
  1109. ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
  1110. *num_bits,
  1111. le16_to_cpu(gd->bg_chain));
  1112. if (ret < 0) {
  1113. mlog_errno(ret);
  1114. goto out;
  1115. }
  1116. ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
  1117. *bit_off, *num_bits);
  1118. if (ret < 0)
  1119. mlog_errno(ret);
  1120. *bits_left = le16_to_cpu(gd->bg_free_bits_count);
  1121. out:
  1122. brelse(group_bh);
  1123. return ret;
  1124. }
  1125. static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
  1126. handle_t *handle,
  1127. u32 bits_wanted,
  1128. u32 min_bits,
  1129. u16 *bit_off,
  1130. unsigned int *num_bits,
  1131. u64 *bg_blkno,
  1132. u16 *bits_left)
  1133. {
  1134. int status;
  1135. u16 chain, tmp_bits;
  1136. u32 tmp_used;
  1137. u64 next_group;
  1138. struct inode *alloc_inode = ac->ac_inode;
  1139. struct buffer_head *group_bh = NULL;
  1140. struct buffer_head *prev_group_bh = NULL;
  1141. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
  1142. struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
  1143. struct ocfs2_group_desc *bg;
  1144. chain = ac->ac_chain;
  1145. mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
  1146. bits_wanted, chain,
  1147. (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);
  1148. status = ocfs2_read_group_descriptor(alloc_inode, fe,
  1149. le64_to_cpu(cl->cl_recs[chain].c_blkno),
  1150. &group_bh);
  1151. if (status < 0) {
  1152. mlog_errno(status);
  1153. goto bail;
  1154. }
  1155. bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1156. status = -ENOSPC;
  1157. /* for now, the chain search is a bit simplistic. We just use
  1158. * the 1st group with any empty bits. */
  1159. while ((status = ac->ac_group_search(alloc_inode, group_bh,
  1160. bits_wanted, min_bits,
  1161. ac->ac_max_block, bit_off,
  1162. &tmp_bits)) == -ENOSPC) {
  1163. if (!bg->bg_next_group)
  1164. break;
  1165. brelse(prev_group_bh);
  1166. prev_group_bh = NULL;
  1167. next_group = le64_to_cpu(bg->bg_next_group);
  1168. prev_group_bh = group_bh;
  1169. group_bh = NULL;
  1170. status = ocfs2_read_group_descriptor(alloc_inode, fe,
  1171. next_group, &group_bh);
  1172. if (status < 0) {
  1173. mlog_errno(status);
  1174. goto bail;
  1175. }
  1176. bg = (struct ocfs2_group_desc *) group_bh->b_data;
  1177. }
  1178. if (status < 0) {
  1179. if (status != -ENOSPC)
  1180. mlog_errno(status);
  1181. goto bail;
  1182. }
  1183. mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
  1184. tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
  1185. *num_bits = tmp_bits;
  1186. BUG_ON(*num_bits == 0);
  1187. /*
  1188. * Keep track of previous block descriptor read. When
  1189. * we find a target, if we have read more than X
  1190. * number of descriptors, and the target is reasonably
  1191. * empty, relink him to top of his chain.
  1192. *
  1193. * We've read 0 extra blocks and only send one more to
  1194. * the transaction, yet the next guy to search has a
  1195. * much easier time.
  1196. *
  1197. * Do this *after* figuring out how many bits we're taking out
  1198. * of our target group.
  1199. */
  1200. if (ac->ac_allow_chain_relink &&
  1201. (prev_group_bh) &&
  1202. (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
  1203. status = ocfs2_relink_block_group(handle, alloc_inode,
  1204. ac->ac_bh, group_bh,
  1205. prev_group_bh, chain);
  1206. if (status < 0) {
  1207. mlog_errno(status);
  1208. goto bail;
  1209. }
  1210. }
  1211. /* Ok, claim our bits now: set the info on dinode, chainlist
  1212. * and then the group */
  1213. status = ocfs2_journal_access_di(handle,
  1214. alloc_inode,
  1215. ac->ac_bh,
  1216. OCFS2_JOURNAL_ACCESS_WRITE);
  1217. if (status < 0) {
  1218. mlog_errno(status);
  1219. goto bail;
  1220. }
  1221. tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
  1222. fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
  1223. le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
  1224. status = ocfs2_journal_dirty(handle,
  1225. ac->ac_bh);
  1226. if (status < 0) {
  1227. mlog_errno(status);
  1228. goto bail;
  1229. }
  1230. status = ocfs2_block_group_set_bits(handle,
  1231. alloc_inode,
  1232. bg,
  1233. group_bh,
  1234. *bit_off,
  1235. *num_bits);
  1236. if (status < 0) {
  1237. mlog_errno(status);
  1238. goto bail;
  1239. }
  1240. mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
  1241. (unsigned long long)le64_to_cpu(fe->i_blkno));
  1242. *bg_blkno = le64_to_cpu(bg->bg_blkno);
  1243. *bits_left = le16_to_cpu(bg->bg_free_bits_count);
  1244. bail:
  1245. brelse(group_bh);
  1246. brelse(prev_group_bh);
  1247. mlog_exit(status);
  1248. return status;
  1249. }
  1250. /* will give out up to bits_wanted contiguous bits. */
  1251. static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
  1252. struct ocfs2_alloc_context *ac,
  1253. handle_t *handle,
  1254. u32 bits_wanted,
  1255. u32 min_bits,
  1256. u16 *bit_off,
  1257. unsigned int *num_bits,
  1258. u64 *bg_blkno)
  1259. {
  1260. int status;
  1261. u16 victim, i;
  1262. u16 bits_left = 0;
  1263. u64 hint_blkno = ac->ac_last_group;
  1264. struct ocfs2_chain_list *cl;
  1265. struct ocfs2_dinode *fe;
  1266. mlog_entry_void();
  1267. BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
  1268. BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
  1269. BUG_ON(!ac->ac_bh);
  1270. fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
  1271. /* The bh was validated by the inode read during
  1272. * ocfs2_reserve_suballoc_bits(). Any corruption is a code bug. */
  1273. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  1274. if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
  1275. le32_to_cpu(fe->id1.bitmap1.i_total)) {
  1276. ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used "
  1277. "bits but only %u total.",
  1278. (unsigned long long)le64_to_cpu(fe->i_blkno),
  1279. le32_to_cpu(fe->id1.bitmap1.i_used),
  1280. le32_to_cpu(fe->id1.bitmap1.i_total));
  1281. status = -EIO;
  1282. goto bail;
  1283. }
  1284. if (hint_blkno) {
  1285. /* Attempt to short-circuit the usual search mechanism
  1286. * by jumping straight to the most recently used
  1287. * allocation group. This helps us mantain some
  1288. * contiguousness across allocations. */
  1289. status = ocfs2_search_one_group(ac, handle, bits_wanted,
  1290. min_bits, bit_off, num_bits,
  1291. hint_blkno, &bits_left);
  1292. if (!status) {
  1293. /* Be careful to update *bg_blkno here as the
  1294. * caller is expecting it to be filled in, and
  1295. * ocfs2_search_one_group() won't do that for
  1296. * us. */
  1297. *bg_blkno = hint_blkno;
  1298. goto set_hint;
  1299. }
  1300. if (status < 0 && status != -ENOSPC) {
  1301. mlog_errno(status);
  1302. goto bail;
  1303. }
  1304. }
  1305. cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
  1306. victim = ocfs2_find_victim_chain(cl);
  1307. ac->ac_chain = victim;
  1308. ac->ac_allow_chain_relink = 1;
  1309. status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
  1310. num_bits, bg_blkno, &bits_left);
  1311. if (!status)
  1312. goto set_hint;
  1313. if (status < 0 && status != -ENOSPC) {
  1314. mlog_errno(status);
  1315. goto bail;
  1316. }
  1317. mlog(0, "Search of victim chain %u came up with nothing, "
  1318. "trying all chains now.\n", victim);
  1319. /* If we didn't pick a good victim, then just default to
  1320. * searching each chain in order. Don't allow chain relinking
  1321. * because we only calculate enough journal credits for one
  1322. * relink per alloc. */
  1323. ac->ac_allow_chain_relink = 0;
  1324. for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
  1325. if (i == victim)
  1326. continue;
  1327. if (!cl->cl_recs[i].c_free)
  1328. continue;
  1329. ac->ac_chain = i;
  1330. status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
  1331. bit_off, num_bits, bg_blkno,
  1332. &bits_left);
  1333. if (!status)
  1334. break;
  1335. if (status < 0 && status != -ENOSPC) {
  1336. mlog_errno(status);
  1337. goto bail;
  1338. }
  1339. }
  1340. set_hint:
  1341. if (status != -ENOSPC) {
  1342. /* If the next search of this group is not likely to
  1343. * yield a suitable extent, then we reset the last
  1344. * group hint so as to not waste a disk read */
  1345. if (bits_left < min_bits)
  1346. ac->ac_last_group = 0;
  1347. else
  1348. ac->ac_last_group = *bg_blkno;
  1349. }
  1350. bail:
  1351. mlog_exit(status);
  1352. return status;
  1353. }
  1354. int ocfs2_claim_metadata(struct ocfs2_super *osb,
  1355. handle_t *handle,
  1356. struct ocfs2_alloc_context *ac,
  1357. u32 bits_wanted,
  1358. u16 *suballoc_bit_start,
  1359. unsigned int *num_bits,
  1360. u64 *blkno_start)
  1361. {
  1362. int status;
  1363. u64 bg_blkno;
  1364. BUG_ON(!ac);
  1365. BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
  1366. BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
  1367. status = ocfs2_claim_suballoc_bits(osb,
  1368. ac,
  1369. handle,
  1370. bits_wanted,
  1371. 1,
  1372. suballoc_bit_start,
  1373. num_bits,
  1374. &bg_blkno);
  1375. if (status < 0) {
  1376. mlog_errno(status);
  1377. goto bail;
  1378. }
  1379. atomic_inc(&osb->alloc_stats.bg_allocs);
  1380. *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
  1381. ac->ac_bits_given += (*num_bits);
  1382. status = 0;
  1383. bail:
  1384. mlog_exit(status);
  1385. return status;
  1386. }
  1387. int ocfs2_claim_new_inode(struct ocfs2_super *osb,
  1388. handle_t *handle,
  1389. struct ocfs2_alloc_context *ac,
  1390. u16 *suballoc_bit,
  1391. u64 *fe_blkno)
  1392. {
  1393. int status;
  1394. unsigned int num_bits;
  1395. u64 bg_blkno;
  1396. mlog_entry_void();
  1397. BUG_ON(!ac);
  1398. BUG_ON(ac->ac_bits_given != 0);
  1399. BUG_ON(ac->ac_bits_wanted != 1);
  1400. BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
  1401. status = ocfs2_claim_suballoc_bits(osb,
  1402. ac,
  1403. handle,
  1404. 1,
  1405. 1,
  1406. suballoc_bit,
  1407. &num_bits,
  1408. &bg_blkno);
  1409. if (status < 0) {
  1410. mlog_errno(status);
  1411. goto bail;
  1412. }
  1413. atomic_inc(&osb->alloc_stats.bg_allocs);
  1414. BUG_ON(num_bits != 1);
  1415. *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
  1416. ac->ac_bits_given++;
  1417. status = 0;
  1418. bail:
  1419. mlog_exit(status);
  1420. return status;
  1421. }
  1422. /* translate a group desc. blkno and it's bitmap offset into
  1423. * disk cluster offset. */
  1424. static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
  1425. u64 bg_blkno,
  1426. u16 bg_bit_off)
  1427. {
  1428. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1429. u32 cluster = 0;
  1430. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1431. if (bg_blkno != osb->first_cluster_group_blkno)
  1432. cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
  1433. cluster += (u32) bg_bit_off;
  1434. return cluster;
  1435. }
  1436. /* given a cluster offset, calculate which block group it belongs to
  1437. * and return that block offset. */
  1438. u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster)
  1439. {
  1440. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1441. u32 group_no;
  1442. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1443. group_no = cluster / osb->bitmap_cpg;
  1444. if (!group_no)
  1445. return osb->first_cluster_group_blkno;
  1446. return ocfs2_clusters_to_blocks(inode->i_sb,
  1447. group_no * osb->bitmap_cpg);
  1448. }
  1449. /* given the block number of a cluster start, calculate which cluster
  1450. * group and descriptor bitmap offset that corresponds to. */
  1451. static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  1452. u64 data_blkno,
  1453. u64 *bg_blkno,
  1454. u16 *bg_bit_off)
  1455. {
  1456. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1457. u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
  1458. BUG_ON(!ocfs2_is_cluster_bitmap(inode));
  1459. *bg_blkno = ocfs2_which_cluster_group(inode,
  1460. data_cluster);
  1461. if (*bg_blkno == osb->first_cluster_group_blkno)
  1462. *bg_bit_off = (u16) data_cluster;
  1463. else
  1464. *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
  1465. data_blkno - *bg_blkno);
  1466. }
  1467. /*
  1468. * min_bits - minimum contiguous chunk from this total allocation we
  1469. * can handle. set to what we asked for originally for a full
  1470. * contig. allocation, set to '1' to indicate we can deal with extents
  1471. * of any size.
  1472. */
  1473. int __ocfs2_claim_clusters(struct ocfs2_super *osb,
  1474. handle_t *handle,
  1475. struct ocfs2_alloc_context *ac,
  1476. u32 min_clusters,
  1477. u32 max_clusters,
  1478. u32 *cluster_start,
  1479. u32 *num_clusters)
  1480. {
  1481. int status;
  1482. unsigned int bits_wanted = max_clusters;
  1483. u64 bg_blkno = 0;
  1484. u16 bg_bit_off;
  1485. mlog_entry_void();
  1486. BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
  1487. BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
  1488. && ac->ac_which != OCFS2_AC_USE_MAIN);
  1489. if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
  1490. status = ocfs2_claim_local_alloc_bits(osb,
  1491. handle,
  1492. ac,
  1493. bits_wanted,
  1494. cluster_start,
  1495. num_clusters);
  1496. if (!status)
  1497. atomic_inc(&osb->alloc_stats.local_data);
  1498. } else {
  1499. if (min_clusters > (osb->bitmap_cpg - 1)) {
  1500. /* The only paths asking for contiguousness
  1501. * should know about this already. */
  1502. mlog(ML_ERROR, "minimum allocation requested %u exceeds "
  1503. "group bitmap size %u!\n", min_clusters,
  1504. osb->bitmap_cpg);
  1505. status = -ENOSPC;
  1506. goto bail;
  1507. }
  1508. /* clamp the current request down to a realistic size. */
  1509. if (bits_wanted > (osb->bitmap_cpg - 1))
  1510. bits_wanted = osb->bitmap_cpg - 1;
  1511. status = ocfs2_claim_suballoc_bits(osb,
  1512. ac,
  1513. handle,
  1514. bits_wanted,
  1515. min_clusters,
  1516. &bg_bit_off,
  1517. num_clusters,
  1518. &bg_blkno);
  1519. if (!status) {
  1520. *cluster_start =
  1521. ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
  1522. bg_blkno,
  1523. bg_bit_off);
  1524. atomic_inc(&osb->alloc_stats.bitmap_data);
  1525. }
  1526. }
  1527. if (status < 0) {
  1528. if (status != -ENOSPC)
  1529. mlog_errno(status);
  1530. goto bail;
  1531. }
  1532. ac->ac_bits_given += *num_clusters;
  1533. bail:
  1534. mlog_exit(status);
  1535. return status;
  1536. }
  1537. int ocfs2_claim_clusters(struct ocfs2_super *osb,
  1538. handle_t *handle,
  1539. struct ocfs2_alloc_context *ac,
  1540. u32 min_clusters,
  1541. u32 *cluster_start,
  1542. u32 *num_clusters)
  1543. {
  1544. unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
  1545. return __ocfs2_claim_clusters(osb, handle, ac, min_clusters,
  1546. bits_wanted, cluster_start, num_clusters);
  1547. }
  1548. static inline int ocfs2_block_group_clear_bits(handle_t *handle,
  1549. struct inode *alloc_inode,
  1550. struct ocfs2_group_desc *bg,
  1551. struct buffer_head *group_bh,
  1552. unsigned int bit_off,
  1553. unsigned int num_bits)
  1554. {
  1555. int status;
  1556. unsigned int tmp;
  1557. int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
  1558. struct ocfs2_group_desc *undo_bg = NULL;
  1559. mlog_entry_void();
  1560. /* The caller got this descriptor from
  1561. * ocfs2_read_group_descriptor(). Any corruption is a code bug. */
  1562. BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
  1563. mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
  1564. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1565. journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
  1566. status = ocfs2_journal_access_gd(handle, alloc_inode, group_bh,
  1567. journal_type);
  1568. if (status < 0) {
  1569. mlog_errno(status);
  1570. goto bail;
  1571. }
  1572. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1573. undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
  1574. tmp = num_bits;
  1575. while(tmp--) {
  1576. ocfs2_clear_bit((bit_off + tmp),
  1577. (unsigned long *) bg->bg_bitmap);
  1578. if (ocfs2_is_cluster_bitmap(alloc_inode))
  1579. ocfs2_set_bit(bit_off + tmp,
  1580. (unsigned long *) undo_bg->bg_bitmap);
  1581. }
  1582. le16_add_cpu(&bg->bg_free_bits_count, num_bits);
  1583. status = ocfs2_journal_dirty(handle, group_bh);
  1584. if (status < 0)
  1585. mlog_errno(status);
  1586. bail:
  1587. return status;
  1588. }
  1589. /*
  1590. * expects the suballoc inode to already be locked.
  1591. */
  1592. int ocfs2_free_suballoc_bits(handle_t *handle,
  1593. struct inode *alloc_inode,
  1594. struct buffer_head *alloc_bh,
  1595. unsigned int start_bit,
  1596. u64 bg_blkno,
  1597. unsigned int count)
  1598. {
  1599. int status = 0;
  1600. u32 tmp_used;
  1601. struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
  1602. struct ocfs2_chain_list *cl = &fe->id2.i_chain;
  1603. struct buffer_head *group_bh = NULL;
  1604. struct ocfs2_group_desc *group;
  1605. mlog_entry_void();
  1606. /* The alloc_bh comes from ocfs2_free_dinode() or
  1607. * ocfs2_free_clusters(). The callers have all locked the
  1608. * allocator and gotten alloc_bh from the lock call. This
  1609. * validates the dinode buffer. Any corruption that has happended
  1610. * is a code bug. */
  1611. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  1612. BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
  1613. mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
  1614. (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
  1615. (unsigned long long)bg_blkno, start_bit);
  1616. status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno,
  1617. &group_bh);
  1618. if (status < 0) {
  1619. mlog_errno(status);
  1620. goto bail;
  1621. }
  1622. group = (struct ocfs2_group_desc *) group_bh->b_data;
  1623. BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
  1624. status = ocfs2_block_group_clear_bits(handle, alloc_inode,
  1625. group, group_bh,
  1626. start_bit, count);
  1627. if (status < 0) {
  1628. mlog_errno(status);
  1629. goto bail;
  1630. }
  1631. status = ocfs2_journal_access_di(handle, alloc_inode, alloc_bh,
  1632. OCFS2_JOURNAL_ACCESS_WRITE);
  1633. if (status < 0) {
  1634. mlog_errno(status);
  1635. goto bail;
  1636. }
  1637. le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
  1638. count);
  1639. tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
  1640. fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
  1641. status = ocfs2_journal_dirty(handle, alloc_bh);
  1642. if (status < 0) {
  1643. mlog_errno(status);
  1644. goto bail;
  1645. }
  1646. bail:
  1647. brelse(group_bh);
  1648. mlog_exit(status);
  1649. return status;
  1650. }
  1651. int ocfs2_free_dinode(handle_t *handle,
  1652. struct inode *inode_alloc_inode,
  1653. struct buffer_head *inode_alloc_bh,
  1654. struct ocfs2_dinode *di)
  1655. {
  1656. u64 blk = le64_to_cpu(di->i_blkno);
  1657. u16 bit = le16_to_cpu(di->i_suballoc_bit);
  1658. u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
  1659. return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
  1660. inode_alloc_bh, bit, bg_blkno, 1);
  1661. }
  1662. int ocfs2_free_clusters(handle_t *handle,
  1663. struct inode *bitmap_inode,
  1664. struct buffer_head *bitmap_bh,
  1665. u64 start_blk,
  1666. unsigned int num_clusters)
  1667. {
  1668. int status;
  1669. u16 bg_start_bit;
  1670. u64 bg_blkno;
  1671. struct ocfs2_dinode *fe;
  1672. /* You can't ever have a contiguous set of clusters
  1673. * bigger than a block group bitmap so we never have to worry
  1674. * about looping on them. */
  1675. mlog_entry_void();
  1676. /* This is expensive. We can safely remove once this stuff has
  1677. * gotten tested really well. */
  1678. BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
  1679. fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
  1680. ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
  1681. &bg_start_bit);
  1682. mlog(0, "want to free %u clusters starting at block %llu\n",
  1683. num_clusters, (unsigned long long)start_blk);
  1684. mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
  1685. (unsigned long long)bg_blkno, bg_start_bit);
  1686. status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
  1687. bg_start_bit, bg_blkno,
  1688. num_clusters);
  1689. if (status < 0) {
  1690. mlog_errno(status);
  1691. goto out;
  1692. }
  1693. ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb),
  1694. num_clusters);
  1695. out:
  1696. mlog_exit(status);
  1697. return status;
  1698. }
  1699. static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
  1700. {
  1701. printk("Block Group:\n");
  1702. printk("bg_signature: %s\n", bg->bg_signature);
  1703. printk("bg_size: %u\n", bg->bg_size);
  1704. printk("bg_bits: %u\n", bg->bg_bits);
  1705. printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
  1706. printk("bg_chain: %u\n", bg->bg_chain);
  1707. printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
  1708. printk("bg_next_group: %llu\n",
  1709. (unsigned long long)bg->bg_next_group);
  1710. printk("bg_parent_dinode: %llu\n",
  1711. (unsigned long long)bg->bg_parent_dinode);
  1712. printk("bg_blkno: %llu\n",
  1713. (unsigned long long)bg->bg_blkno);
  1714. }
  1715. static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
  1716. {
  1717. int i;
  1718. printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
  1719. printk("i_signature: %s\n", fe->i_signature);
  1720. printk("i_size: %llu\n",
  1721. (unsigned long long)fe->i_size);
  1722. printk("i_clusters: %u\n", fe->i_clusters);
  1723. printk("i_generation: %u\n",
  1724. le32_to_cpu(fe->i_generation));
  1725. printk("id1.bitmap1.i_used: %u\n",
  1726. le32_to_cpu(fe->id1.bitmap1.i_used));
  1727. printk("id1.bitmap1.i_total: %u\n",
  1728. le32_to_cpu(fe->id1.bitmap1.i_total));
  1729. printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
  1730. printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
  1731. printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
  1732. printk("id2.i_chain.cl_next_free_rec: %u\n",
  1733. fe->id2.i_chain.cl_next_free_rec);
  1734. for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
  1735. printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
  1736. fe->id2.i_chain.cl_recs[i].c_free);
  1737. printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
  1738. fe->id2.i_chain.cl_recs[i].c_total);
  1739. printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
  1740. (unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
  1741. }
  1742. }
  1743. /*
  1744. * For a given allocation, determine which allocators will need to be
  1745. * accessed, and lock them, reserving the appropriate number of bits.
  1746. *
  1747. * Sparse file systems call this from ocfs2_write_begin_nolock()
  1748. * and ocfs2_allocate_unwritten_extents().
  1749. *
  1750. * File systems which don't support holes call this from
  1751. * ocfs2_extend_allocation().
  1752. */
  1753. int ocfs2_lock_allocators(struct inode *inode,
  1754. struct ocfs2_extent_tree *et,
  1755. u32 clusters_to_add, u32 extents_to_split,
  1756. struct ocfs2_alloc_context **data_ac,
  1757. struct ocfs2_alloc_context **meta_ac)
  1758. {
  1759. int ret = 0, num_free_extents;
  1760. unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
  1761. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1762. *meta_ac = NULL;
  1763. if (data_ac)
  1764. *data_ac = NULL;
  1765. BUG_ON(clusters_to_add != 0 && data_ac == NULL);
  1766. num_free_extents = ocfs2_num_free_extents(osb, inode, et);
  1767. if (num_free_extents < 0) {
  1768. ret = num_free_extents;
  1769. mlog_errno(ret);
  1770. goto out;
  1771. }
  1772. /*
  1773. * Sparse allocation file systems need to be more conservative
  1774. * with reserving room for expansion - the actual allocation
  1775. * happens while we've got a journal handle open so re-taking
  1776. * a cluster lock (because we ran out of room for another
  1777. * extent) will violate ordering rules.
  1778. *
  1779. * Most of the time we'll only be seeing this 1 cluster at a time
  1780. * anyway.
  1781. *
  1782. * Always lock for any unwritten extents - we might want to
  1783. * add blocks during a split.
  1784. */
  1785. if (!num_free_extents ||
  1786. (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
  1787. ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac);
  1788. if (ret < 0) {
  1789. if (ret != -ENOSPC)
  1790. mlog_errno(ret);
  1791. goto out;
  1792. }
  1793. }
  1794. if (clusters_to_add == 0)
  1795. goto out;
  1796. ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
  1797. if (ret < 0) {
  1798. if (ret != -ENOSPC)
  1799. mlog_errno(ret);
  1800. goto out;
  1801. }
  1802. out:
  1803. if (ret) {
  1804. if (*meta_ac) {
  1805. ocfs2_free_alloc_context(*meta_ac);
  1806. *meta_ac = NULL;
  1807. }
  1808. /*
  1809. * We cannot have an error and a non null *data_ac.
  1810. */
  1811. }
  1812. return ret;
  1813. }