ialloc.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /*
  2. * linux/fs/ext4/ialloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * BSD ufs-inspired inode and directory allocation by
  10. * Stephen Tweedie (sct@redhat.com), 1993
  11. * Big-endian to little-endian byte-swapping/bitmaps by
  12. * David S. Miller (davem@caip.rutgers.edu), 1995
  13. */
  14. #include <linux/time.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/stat.h>
  18. #include <linux/string.h>
  19. #include <linux/quotaops.h>
  20. #include <linux/buffer_head.h>
  21. #include <linux/random.h>
  22. #include <linux/bitops.h>
  23. #include <linux/blkdev.h>
  24. #include <asm/byteorder.h>
  25. #include "ext4.h"
  26. #include "ext4_jbd2.h"
  27. #include "xattr.h"
  28. #include "acl.h"
  29. #include "group.h"
  30. /*
  31. * ialloc.c contains the inodes allocation and deallocation routines
  32. */
  33. /*
  34. * The free inodes are managed by bitmaps. A file system contains several
  35. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  36. * block for inodes, N blocks for the inode table and data blocks.
  37. *
  38. * The file system contains group descriptors which are located after the
  39. * super block. Each descriptor contains the number of the bitmap block and
  40. * the free blocks count in the block.
  41. */
  42. /*
  43. * To avoid calling the atomic setbit hundreds or thousands of times, we only
  44. * need to use it within a single byte (to ensure we get endianness right).
  45. * We can use memset for the rest of the bitmap as there are no other users.
  46. */
  47. void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  48. {
  49. int i;
  50. if (start_bit >= end_bit)
  51. return;
  52. ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  53. for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  54. ext4_set_bit(i, bitmap);
  55. if (i < end_bit)
  56. memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  57. }
  58. /* Initializes an uninitialized inode bitmap */
  59. unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
  60. ext4_group_t block_group,
  61. struct ext4_group_desc *gdp)
  62. {
  63. struct ext4_sb_info *sbi = EXT4_SB(sb);
  64. J_ASSERT_BH(bh, buffer_locked(bh));
  65. /* If checksum is bad mark all blocks and inodes use to prevent
  66. * allocation, essentially implementing a per-group read-only flag. */
  67. if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  68. ext4_error(sb, __func__, "Checksum bad for group %u",
  69. block_group);
  70. ext4_free_blks_set(sb, gdp, 0);
  71. ext4_free_inodes_set(sb, gdp, 0);
  72. ext4_itable_unused_set(sb, gdp, 0);
  73. memset(bh->b_data, 0xff, sb->s_blocksize);
  74. return 0;
  75. }
  76. memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
  77. mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
  78. bh->b_data);
  79. return EXT4_INODES_PER_GROUP(sb);
  80. }
  81. /*
  82. * Read the inode allocation bitmap for a given block_group, reading
  83. * into the specified slot in the superblock's bitmap cache.
  84. *
  85. * Return buffer_head of bitmap on success or NULL.
  86. */
  87. static struct buffer_head *
  88. ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
  89. {
  90. struct ext4_group_desc *desc;
  91. struct buffer_head *bh = NULL;
  92. ext4_fsblk_t bitmap_blk;
  93. desc = ext4_get_group_desc(sb, block_group, NULL);
  94. if (!desc)
  95. return NULL;
  96. bitmap_blk = ext4_inode_bitmap(sb, desc);
  97. bh = sb_getblk(sb, bitmap_blk);
  98. if (unlikely(!bh)) {
  99. ext4_error(sb, __func__,
  100. "Cannot read inode bitmap - "
  101. "block_group = %u, inode_bitmap = %llu",
  102. block_group, bitmap_blk);
  103. return NULL;
  104. }
  105. if (bitmap_uptodate(bh))
  106. return bh;
  107. lock_buffer(bh);
  108. if (bitmap_uptodate(bh)) {
  109. unlock_buffer(bh);
  110. return bh;
  111. }
  112. spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
  113. if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  114. ext4_init_inode_bitmap(sb, bh, block_group, desc);
  115. set_bitmap_uptodate(bh);
  116. set_buffer_uptodate(bh);
  117. spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
  118. unlock_buffer(bh);
  119. return bh;
  120. }
  121. spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
  122. if (buffer_uptodate(bh)) {
  123. /*
  124. * if not uninit if bh is uptodate,
  125. * bitmap is also uptodate
  126. */
  127. set_bitmap_uptodate(bh);
  128. unlock_buffer(bh);
  129. return bh;
  130. }
  131. /*
  132. * submit the buffer_head for read. We can
  133. * safely mark the bitmap as uptodate now.
  134. * We do it here so the bitmap uptodate bit
  135. * get set with buffer lock held.
  136. */
  137. set_bitmap_uptodate(bh);
  138. if (bh_submit_read(bh) < 0) {
  139. put_bh(bh);
  140. ext4_error(sb, __func__,
  141. "Cannot read inode bitmap - "
  142. "block_group = %u, inode_bitmap = %llu",
  143. block_group, bitmap_blk);
  144. return NULL;
  145. }
  146. return bh;
  147. }
  148. /*
  149. * NOTE! When we get the inode, we're the only people
  150. * that have access to it, and as such there are no
  151. * race conditions we have to worry about. The inode
  152. * is not on the hash-lists, and it cannot be reached
  153. * through the filesystem because the directory entry
  154. * has been deleted earlier.
  155. *
  156. * HOWEVER: we must make sure that we get no aliases,
  157. * which means that we have to call "clear_inode()"
  158. * _before_ we mark the inode not in use in the inode
  159. * bitmaps. Otherwise a newly created file might use
  160. * the same inode number (not actually the same pointer
  161. * though), and then we'd have two inodes sharing the
  162. * same inode number and space on the harddisk.
  163. */
  164. void ext4_free_inode(handle_t *handle, struct inode *inode)
  165. {
  166. struct super_block *sb = inode->i_sb;
  167. int is_directory;
  168. unsigned long ino;
  169. struct buffer_head *bitmap_bh = NULL;
  170. struct buffer_head *bh2;
  171. ext4_group_t block_group;
  172. unsigned long bit;
  173. struct ext4_group_desc *gdp;
  174. struct ext4_super_block *es;
  175. struct ext4_sb_info *sbi;
  176. int fatal = 0, err, count, cleared;
  177. if (atomic_read(&inode->i_count) > 1) {
  178. printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
  179. atomic_read(&inode->i_count));
  180. return;
  181. }
  182. if (inode->i_nlink) {
  183. printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
  184. inode->i_nlink);
  185. return;
  186. }
  187. if (!sb) {
  188. printk(KERN_ERR "ext4_free_inode: inode on "
  189. "nonexistent device\n");
  190. return;
  191. }
  192. sbi = EXT4_SB(sb);
  193. ino = inode->i_ino;
  194. ext4_debug("freeing inode %lu\n", ino);
  195. trace_mark(ext4_free_inode,
  196. "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
  197. sb->s_id, inode->i_ino, inode->i_mode,
  198. (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
  199. (unsigned long long) inode->i_blocks);
  200. /*
  201. * Note: we must free any quota before locking the superblock,
  202. * as writing the quota to disk may need the lock as well.
  203. */
  204. vfs_dq_init(inode);
  205. ext4_xattr_delete_inode(handle, inode);
  206. vfs_dq_free_inode(inode);
  207. vfs_dq_drop(inode);
  208. is_directory = S_ISDIR(inode->i_mode);
  209. /* Do this BEFORE marking the inode not in use or returning an error */
  210. clear_inode(inode);
  211. es = EXT4_SB(sb)->s_es;
  212. if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
  213. ext4_error(sb, "ext4_free_inode",
  214. "reserved or nonexistent inode %lu", ino);
  215. goto error_return;
  216. }
  217. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  218. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  219. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  220. if (!bitmap_bh)
  221. goto error_return;
  222. BUFFER_TRACE(bitmap_bh, "get_write_access");
  223. fatal = ext4_journal_get_write_access(handle, bitmap_bh);
  224. if (fatal)
  225. goto error_return;
  226. /* Ok, now we can actually update the inode bitmaps.. */
  227. spin_lock(sb_bgl_lock(sbi, block_group));
  228. cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
  229. spin_unlock(sb_bgl_lock(sbi, block_group));
  230. if (!cleared)
  231. ext4_error(sb, "ext4_free_inode",
  232. "bit already cleared for inode %lu", ino);
  233. else {
  234. gdp = ext4_get_group_desc(sb, block_group, &bh2);
  235. BUFFER_TRACE(bh2, "get_write_access");
  236. fatal = ext4_journal_get_write_access(handle, bh2);
  237. if (fatal) goto error_return;
  238. if (gdp) {
  239. spin_lock(sb_bgl_lock(sbi, block_group));
  240. count = ext4_free_inodes_count(sb, gdp) + 1;
  241. ext4_free_inodes_set(sb, gdp, count);
  242. if (is_directory) {
  243. count = ext4_used_dirs_count(sb, gdp) - 1;
  244. ext4_used_dirs_set(sb, gdp, count);
  245. }
  246. gdp->bg_checksum = ext4_group_desc_csum(sbi,
  247. block_group, gdp);
  248. spin_unlock(sb_bgl_lock(sbi, block_group));
  249. percpu_counter_inc(&sbi->s_freeinodes_counter);
  250. if (is_directory)
  251. percpu_counter_dec(&sbi->s_dirs_counter);
  252. if (sbi->s_log_groups_per_flex) {
  253. ext4_group_t f;
  254. f = ext4_flex_group(sbi, block_group);
  255. atomic_inc(&sbi->s_flex_groups[f].free_inodes);
  256. }
  257. }
  258. BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
  259. err = ext4_handle_dirty_metadata(handle, NULL, bh2);
  260. if (!fatal) fatal = err;
  261. }
  262. BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
  263. err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
  264. if (!fatal)
  265. fatal = err;
  266. sb->s_dirt = 1;
  267. error_return:
  268. brelse(bitmap_bh);
  269. ext4_std_error(sb, fatal);
  270. }
  271. /*
  272. * There are two policies for allocating an inode. If the new inode is
  273. * a directory, then a forward search is made for a block group with both
  274. * free space and a low directory-to-inode ratio; if that fails, then of
  275. * the groups with above-average free space, that group with the fewest
  276. * directories already is chosen.
  277. *
  278. * For other inodes, search forward from the parent directory\'s block
  279. * group to find a free inode.
  280. */
  281. static int find_group_dir(struct super_block *sb, struct inode *parent,
  282. ext4_group_t *best_group)
  283. {
  284. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  285. unsigned int freei, avefreei;
  286. struct ext4_group_desc *desc, *best_desc = NULL;
  287. ext4_group_t group;
  288. int ret = -1;
  289. freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
  290. avefreei = freei / ngroups;
  291. for (group = 0; group < ngroups; group++) {
  292. desc = ext4_get_group_desc(sb, group, NULL);
  293. if (!desc || !ext4_free_inodes_count(sb, desc))
  294. continue;
  295. if (ext4_free_inodes_count(sb, desc) < avefreei)
  296. continue;
  297. if (!best_desc ||
  298. (ext4_free_blks_count(sb, desc) >
  299. ext4_free_blks_count(sb, best_desc))) {
  300. *best_group = group;
  301. best_desc = desc;
  302. ret = 0;
  303. }
  304. }
  305. return ret;
  306. }
  307. #define free_block_ratio 10
  308. static int find_group_flex(struct super_block *sb, struct inode *parent,
  309. ext4_group_t *best_group)
  310. {
  311. struct ext4_sb_info *sbi = EXT4_SB(sb);
  312. struct ext4_group_desc *desc;
  313. struct buffer_head *bh;
  314. struct flex_groups *flex_group = sbi->s_flex_groups;
  315. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  316. ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
  317. ext4_group_t ngroups = sbi->s_groups_count;
  318. int flex_size = ext4_flex_bg_size(sbi);
  319. ext4_group_t best_flex = parent_fbg_group;
  320. int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
  321. int flexbg_free_blocks;
  322. int flex_freeb_ratio;
  323. ext4_group_t n_fbg_groups;
  324. ext4_group_t i;
  325. n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
  326. sbi->s_log_groups_per_flex;
  327. find_close_to_parent:
  328. flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks);
  329. flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
  330. if (atomic_read(&flex_group[best_flex].free_inodes) &&
  331. flex_freeb_ratio > free_block_ratio)
  332. goto found_flexbg;
  333. if (best_flex && best_flex == parent_fbg_group) {
  334. best_flex--;
  335. goto find_close_to_parent;
  336. }
  337. for (i = 0; i < n_fbg_groups; i++) {
  338. if (i == parent_fbg_group || i == parent_fbg_group - 1)
  339. continue;
  340. flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks);
  341. flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
  342. if (flex_freeb_ratio > free_block_ratio &&
  343. (atomic_read(&flex_group[i].free_inodes))) {
  344. best_flex = i;
  345. goto found_flexbg;
  346. }
  347. if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) ||
  348. ((atomic_read(&flex_group[i].free_blocks) >
  349. atomic_read(&flex_group[best_flex].free_blocks)) &&
  350. atomic_read(&flex_group[i].free_inodes)))
  351. best_flex = i;
  352. }
  353. if (!atomic_read(&flex_group[best_flex].free_inodes) ||
  354. !atomic_read(&flex_group[best_flex].free_blocks))
  355. return -1;
  356. found_flexbg:
  357. for (i = best_flex * flex_size; i < ngroups &&
  358. i < (best_flex + 1) * flex_size; i++) {
  359. desc = ext4_get_group_desc(sb, i, &bh);
  360. if (ext4_free_inodes_count(sb, desc)) {
  361. *best_group = i;
  362. goto out;
  363. }
  364. }
  365. return -1;
  366. out:
  367. return 0;
  368. }
  369. struct orlov_stats {
  370. __u32 free_inodes;
  371. __u32 free_blocks;
  372. __u32 used_dirs;
  373. };
  374. /*
  375. * Helper function for Orlov's allocator; returns critical information
  376. * for a particular block group or flex_bg. If flex_size is 1, then g
  377. * is a block group number; otherwise it is flex_bg number.
  378. */
  379. void get_orlov_stats(struct super_block *sb, ext4_group_t g,
  380. int flex_size, struct orlov_stats *stats)
  381. {
  382. struct ext4_group_desc *desc;
  383. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  384. int i;
  385. stats->free_inodes = 0;
  386. stats->free_blocks = 0;
  387. stats->used_dirs = 0;
  388. g *= flex_size;
  389. for (i = 0; i < flex_size; i++) {
  390. if (g >= ngroups)
  391. break;
  392. desc = ext4_get_group_desc(sb, g++, NULL);
  393. if (!desc)
  394. continue;
  395. stats->free_inodes += ext4_free_inodes_count(sb, desc);
  396. stats->free_blocks += ext4_free_blks_count(sb, desc);
  397. stats->used_dirs += ext4_used_dirs_count(sb, desc);
  398. }
  399. }
  400. /*
  401. * Orlov's allocator for directories.
  402. *
  403. * We always try to spread first-level directories.
  404. *
  405. * If there are blockgroups with both free inodes and free blocks counts
  406. * not worse than average we return one with smallest directory count.
  407. * Otherwise we simply return a random group.
  408. *
  409. * For the rest rules look so:
  410. *
  411. * It's OK to put directory into a group unless
  412. * it has too many directories already (max_dirs) or
  413. * it has too few free inodes left (min_inodes) or
  414. * it has too few free blocks left (min_blocks) or
  415. * Parent's group is preferred, if it doesn't satisfy these
  416. * conditions we search cyclically through the rest. If none
  417. * of the groups look good we just look for a group with more
  418. * free inodes than average (starting at parent's group).
  419. */
  420. static int find_group_orlov(struct super_block *sb, struct inode *parent,
  421. ext4_group_t *group, int mode)
  422. {
  423. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  424. struct ext4_sb_info *sbi = EXT4_SB(sb);
  425. ext4_group_t ngroups = sbi->s_groups_count;
  426. int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
  427. unsigned int freei, avefreei;
  428. ext4_fsblk_t freeb, avefreeb;
  429. unsigned int ndirs;
  430. int max_dirs, min_inodes;
  431. ext4_grpblk_t min_blocks;
  432. ext4_group_t i, grp, g;
  433. struct ext4_group_desc *desc;
  434. struct orlov_stats stats;
  435. int flex_size = ext4_flex_bg_size(sbi);
  436. if (flex_size > 1) {
  437. ngroups = (ngroups + flex_size - 1) >>
  438. sbi->s_log_groups_per_flex;
  439. parent_group >>= sbi->s_log_groups_per_flex;
  440. }
  441. freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
  442. avefreei = freei / ngroups;
  443. freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
  444. avefreeb = freeb;
  445. do_div(avefreeb, ngroups);
  446. ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
  447. if (S_ISDIR(mode) &&
  448. ((parent == sb->s_root->d_inode) ||
  449. (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
  450. int best_ndir = inodes_per_group;
  451. int ret = -1;
  452. get_random_bytes(&grp, sizeof(grp));
  453. parent_group = (unsigned)grp % ngroups;
  454. for (i = 0; i < ngroups; i++) {
  455. g = (parent_group + i) % ngroups;
  456. get_orlov_stats(sb, g, flex_size, &stats);
  457. if (!stats.free_inodes)
  458. continue;
  459. if (stats.used_dirs >= best_ndir)
  460. continue;
  461. if (stats.free_inodes < avefreei)
  462. continue;
  463. if (stats.free_blocks < avefreeb)
  464. continue;
  465. grp = g;
  466. ret = 0;
  467. best_ndir = stats.used_dirs;
  468. }
  469. if (ret)
  470. goto fallback;
  471. found_flex_bg:
  472. if (flex_size == 1) {
  473. *group = grp;
  474. return 0;
  475. }
  476. /*
  477. * We pack inodes at the beginning of the flexgroup's
  478. * inode tables. Block allocation decisions will do
  479. * something similar, although regular files will
  480. * start at 2nd block group of the flexgroup. See
  481. * ext4_ext_find_goal() and ext4_find_near().
  482. */
  483. grp *= flex_size;
  484. for (i = 0; i < flex_size; i++) {
  485. if (grp+i >= sbi->s_groups_count)
  486. break;
  487. desc = ext4_get_group_desc(sb, grp+i, NULL);
  488. if (desc && ext4_free_inodes_count(sb, desc)) {
  489. *group = grp+i;
  490. return 0;
  491. }
  492. }
  493. goto fallback;
  494. }
  495. max_dirs = ndirs / ngroups + inodes_per_group / 16;
  496. min_inodes = avefreei - inodes_per_group*flex_size / 4;
  497. if (min_inodes < 1)
  498. min_inodes = 1;
  499. min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4;
  500. /*
  501. * Start looking in the flex group where we last allocated an
  502. * inode for this parent directory
  503. */
  504. if (EXT4_I(parent)->i_last_alloc_group != ~0) {
  505. parent_group = EXT4_I(parent)->i_last_alloc_group;
  506. if (flex_size > 1)
  507. parent_group >>= sbi->s_log_groups_per_flex;
  508. }
  509. for (i = 0; i < ngroups; i++) {
  510. grp = (parent_group + i) % ngroups;
  511. get_orlov_stats(sb, grp, flex_size, &stats);
  512. if (stats.used_dirs >= max_dirs)
  513. continue;
  514. if (stats.free_inodes < min_inodes)
  515. continue;
  516. if (stats.free_blocks < min_blocks)
  517. continue;
  518. goto found_flex_bg;
  519. }
  520. fallback:
  521. ngroups = sbi->s_groups_count;
  522. avefreei = freei / ngroups;
  523. parent_group = EXT4_I(parent)->i_block_group;
  524. for (i = 0; i < ngroups; i++) {
  525. grp = (parent_group + i) % ngroups;
  526. desc = ext4_get_group_desc(sb, grp, NULL);
  527. if (desc && ext4_free_inodes_count(sb, desc) &&
  528. ext4_free_inodes_count(sb, desc) >= avefreei) {
  529. *group = grp;
  530. return 0;
  531. }
  532. }
  533. if (avefreei) {
  534. /*
  535. * The free-inodes counter is approximate, and for really small
  536. * filesystems the above test can fail to find any blockgroups
  537. */
  538. avefreei = 0;
  539. goto fallback;
  540. }
  541. return -1;
  542. }
  543. static int find_group_other(struct super_block *sb, struct inode *parent,
  544. ext4_group_t *group, int mode)
  545. {
  546. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  547. ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
  548. struct ext4_group_desc *desc;
  549. ext4_group_t i, last;
  550. int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
  551. /*
  552. * Try to place the inode is the same flex group as its
  553. * parent. If we can't find space, use the Orlov algorithm to
  554. * find another flex group, and store that information in the
  555. * parent directory's inode information so that use that flex
  556. * group for future allocations.
  557. */
  558. if (flex_size > 1) {
  559. int retry = 0;
  560. try_again:
  561. parent_group &= ~(flex_size-1);
  562. last = parent_group + flex_size;
  563. if (last > ngroups)
  564. last = ngroups;
  565. for (i = parent_group; i < last; i++) {
  566. desc = ext4_get_group_desc(sb, i, NULL);
  567. if (desc && ext4_free_inodes_count(sb, desc)) {
  568. *group = i;
  569. return 0;
  570. }
  571. }
  572. if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
  573. retry = 1;
  574. parent_group = EXT4_I(parent)->i_last_alloc_group;
  575. goto try_again;
  576. }
  577. /*
  578. * If this didn't work, use the Orlov search algorithm
  579. * to find a new flex group; we pass in the mode to
  580. * avoid the topdir algorithms.
  581. */
  582. *group = parent_group + flex_size;
  583. if (*group > ngroups)
  584. *group = 0;
  585. return find_group_orlov(sb, parent, group, mode);
  586. }
  587. /*
  588. * Try to place the inode in its parent directory
  589. */
  590. *group = parent_group;
  591. desc = ext4_get_group_desc(sb, *group, NULL);
  592. if (desc && ext4_free_inodes_count(sb, desc) &&
  593. ext4_free_blks_count(sb, desc))
  594. return 0;
  595. /*
  596. * We're going to place this inode in a different blockgroup from its
  597. * parent. We want to cause files in a common directory to all land in
  598. * the same blockgroup. But we want files which are in a different
  599. * directory which shares a blockgroup with our parent to land in a
  600. * different blockgroup.
  601. *
  602. * So add our directory's i_ino into the starting point for the hash.
  603. */
  604. *group = (*group + parent->i_ino) % ngroups;
  605. /*
  606. * Use a quadratic hash to find a group with a free inode and some free
  607. * blocks.
  608. */
  609. for (i = 1; i < ngroups; i <<= 1) {
  610. *group += i;
  611. if (*group >= ngroups)
  612. *group -= ngroups;
  613. desc = ext4_get_group_desc(sb, *group, NULL);
  614. if (desc && ext4_free_inodes_count(sb, desc) &&
  615. ext4_free_blks_count(sb, desc))
  616. return 0;
  617. }
  618. /*
  619. * That failed: try linear search for a free inode, even if that group
  620. * has no free blocks.
  621. */
  622. *group = parent_group;
  623. for (i = 0; i < ngroups; i++) {
  624. if (++*group >= ngroups)
  625. *group = 0;
  626. desc = ext4_get_group_desc(sb, *group, NULL);
  627. if (desc && ext4_free_inodes_count(sb, desc))
  628. return 0;
  629. }
  630. return -1;
  631. }
  632. /*
  633. * claim the inode from the inode bitmap. If the group
  634. * is uninit we need to take the groups's sb_bgl_lock
  635. * and clear the uninit flag. The inode bitmap update
  636. * and group desc uninit flag clear should be done
  637. * after holding sb_bgl_lock so that ext4_read_inode_bitmap
  638. * doesn't race with the ext4_claim_inode
  639. */
  640. static int ext4_claim_inode(struct super_block *sb,
  641. struct buffer_head *inode_bitmap_bh,
  642. unsigned long ino, ext4_group_t group, int mode)
  643. {
  644. int free = 0, retval = 0, count;
  645. struct ext4_sb_info *sbi = EXT4_SB(sb);
  646. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
  647. spin_lock(sb_bgl_lock(sbi, group));
  648. if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
  649. /* not a free inode */
  650. retval = 1;
  651. goto err_ret;
  652. }
  653. ino++;
  654. if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
  655. ino > EXT4_INODES_PER_GROUP(sb)) {
  656. spin_unlock(sb_bgl_lock(sbi, group));
  657. ext4_error(sb, __func__,
  658. "reserved inode or inode > inodes count - "
  659. "block_group = %u, inode=%lu", group,
  660. ino + group * EXT4_INODES_PER_GROUP(sb));
  661. return 1;
  662. }
  663. /* If we didn't allocate from within the initialized part of the inode
  664. * table then we need to initialize up to this inode. */
  665. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
  666. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  667. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
  668. /* When marking the block group with
  669. * ~EXT4_BG_INODE_UNINIT we don't want to depend
  670. * on the value of bg_itable_unused even though
  671. * mke2fs could have initialized the same for us.
  672. * Instead we calculated the value below
  673. */
  674. free = 0;
  675. } else {
  676. free = EXT4_INODES_PER_GROUP(sb) -
  677. ext4_itable_unused_count(sb, gdp);
  678. }
  679. /*
  680. * Check the relative inode number against the last used
  681. * relative inode number in this group. if it is greater
  682. * we need to update the bg_itable_unused count
  683. *
  684. */
  685. if (ino > free)
  686. ext4_itable_unused_set(sb, gdp,
  687. (EXT4_INODES_PER_GROUP(sb) - ino));
  688. }
  689. count = ext4_free_inodes_count(sb, gdp) - 1;
  690. ext4_free_inodes_set(sb, gdp, count);
  691. if (S_ISDIR(mode)) {
  692. count = ext4_used_dirs_count(sb, gdp) + 1;
  693. ext4_used_dirs_set(sb, gdp, count);
  694. }
  695. gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
  696. err_ret:
  697. spin_unlock(sb_bgl_lock(sbi, group));
  698. return retval;
  699. }
  700. /*
  701. * There are two policies for allocating an inode. If the new inode is
  702. * a directory, then a forward search is made for a block group with both
  703. * free space and a low directory-to-inode ratio; if that fails, then of
  704. * the groups with above-average free space, that group with the fewest
  705. * directories already is chosen.
  706. *
  707. * For other inodes, search forward from the parent directory's block
  708. * group to find a free inode.
  709. */
  710. struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
  711. {
  712. struct super_block *sb;
  713. struct buffer_head *inode_bitmap_bh = NULL;
  714. struct buffer_head *group_desc_bh;
  715. ext4_group_t group = 0;
  716. unsigned long ino = 0;
  717. struct inode *inode;
  718. struct ext4_group_desc *gdp = NULL;
  719. struct ext4_super_block *es;
  720. struct ext4_inode_info *ei;
  721. struct ext4_sb_info *sbi;
  722. int ret2, err = 0;
  723. struct inode *ret;
  724. ext4_group_t i;
  725. int free = 0;
  726. static int once = 1;
  727. ext4_group_t flex_group;
  728. /* Cannot create files in a deleted directory */
  729. if (!dir || !dir->i_nlink)
  730. return ERR_PTR(-EPERM);
  731. sb = dir->i_sb;
  732. trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
  733. dir->i_ino, mode);
  734. inode = new_inode(sb);
  735. if (!inode)
  736. return ERR_PTR(-ENOMEM);
  737. ei = EXT4_I(inode);
  738. sbi = EXT4_SB(sb);
  739. es = sbi->s_es;
  740. if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) {
  741. ret2 = find_group_flex(sb, dir, &group);
  742. if (ret2 == -1) {
  743. ret2 = find_group_other(sb, dir, &group, mode);
  744. if (ret2 == 0 && once)
  745. once = 0;
  746. printk(KERN_NOTICE "ext4: find_group_flex "
  747. "failed, fallback succeeded dir %lu\n",
  748. dir->i_ino);
  749. }
  750. goto got_group;
  751. }
  752. if (S_ISDIR(mode)) {
  753. if (test_opt(sb, OLDALLOC))
  754. ret2 = find_group_dir(sb, dir, &group);
  755. else
  756. ret2 = find_group_orlov(sb, dir, &group, mode);
  757. } else
  758. ret2 = find_group_other(sb, dir, &group, mode);
  759. got_group:
  760. EXT4_I(dir)->i_last_alloc_group = group;
  761. err = -ENOSPC;
  762. if (ret2 == -1)
  763. goto out;
  764. for (i = 0; i < sbi->s_groups_count; i++) {
  765. err = -EIO;
  766. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  767. if (!gdp)
  768. goto fail;
  769. brelse(inode_bitmap_bh);
  770. inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
  771. if (!inode_bitmap_bh)
  772. goto fail;
  773. ino = 0;
  774. repeat_in_this_group:
  775. ino = ext4_find_next_zero_bit((unsigned long *)
  776. inode_bitmap_bh->b_data,
  777. EXT4_INODES_PER_GROUP(sb), ino);
  778. if (ino < EXT4_INODES_PER_GROUP(sb)) {
  779. BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
  780. err = ext4_journal_get_write_access(handle,
  781. inode_bitmap_bh);
  782. if (err)
  783. goto fail;
  784. BUFFER_TRACE(group_desc_bh, "get_write_access");
  785. err = ext4_journal_get_write_access(handle,
  786. group_desc_bh);
  787. if (err)
  788. goto fail;
  789. if (!ext4_claim_inode(sb, inode_bitmap_bh,
  790. ino, group, mode)) {
  791. /* we won it */
  792. BUFFER_TRACE(inode_bitmap_bh,
  793. "call ext4_handle_dirty_metadata");
  794. err = ext4_handle_dirty_metadata(handle,
  795. inode,
  796. inode_bitmap_bh);
  797. if (err)
  798. goto fail;
  799. /* zero bit is inode number 1*/
  800. ino++;
  801. goto got;
  802. }
  803. /* we lost it */
  804. ext4_handle_release_buffer(handle, inode_bitmap_bh);
  805. ext4_handle_release_buffer(handle, group_desc_bh);
  806. if (++ino < EXT4_INODES_PER_GROUP(sb))
  807. goto repeat_in_this_group;
  808. }
  809. /*
  810. * This case is possible in concurrent environment. It is very
  811. * rare. We cannot repeat the find_group_xxx() call because
  812. * that will simply return the same blockgroup, because the
  813. * group descriptor metadata has not yet been updated.
  814. * So we just go onto the next blockgroup.
  815. */
  816. if (++group == sbi->s_groups_count)
  817. group = 0;
  818. }
  819. err = -ENOSPC;
  820. goto out;
  821. got:
  822. /* We may have to initialize the block bitmap if it isn't already */
  823. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
  824. gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  825. struct buffer_head *block_bitmap_bh;
  826. block_bitmap_bh = ext4_read_block_bitmap(sb, group);
  827. BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
  828. err = ext4_journal_get_write_access(handle, block_bitmap_bh);
  829. if (err) {
  830. brelse(block_bitmap_bh);
  831. goto fail;
  832. }
  833. free = 0;
  834. spin_lock(sb_bgl_lock(sbi, group));
  835. /* recheck and clear flag under lock if we still need to */
  836. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  837. free = ext4_free_blocks_after_init(sb, group, gdp);
  838. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  839. ext4_free_blks_set(sb, gdp, free);
  840. gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
  841. gdp);
  842. }
  843. spin_unlock(sb_bgl_lock(sbi, group));
  844. /* Don't need to dirty bitmap block if we didn't change it */
  845. if (free) {
  846. BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
  847. err = ext4_handle_dirty_metadata(handle,
  848. NULL, block_bitmap_bh);
  849. }
  850. brelse(block_bitmap_bh);
  851. if (err)
  852. goto fail;
  853. }
  854. BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
  855. err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
  856. if (err)
  857. goto fail;
  858. percpu_counter_dec(&sbi->s_freeinodes_counter);
  859. if (S_ISDIR(mode))
  860. percpu_counter_inc(&sbi->s_dirs_counter);
  861. sb->s_dirt = 1;
  862. if (sbi->s_log_groups_per_flex) {
  863. flex_group = ext4_flex_group(sbi, group);
  864. atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
  865. }
  866. inode->i_uid = current_fsuid();
  867. if (test_opt(sb, GRPID))
  868. inode->i_gid = dir->i_gid;
  869. else if (dir->i_mode & S_ISGID) {
  870. inode->i_gid = dir->i_gid;
  871. if (S_ISDIR(mode))
  872. mode |= S_ISGID;
  873. } else
  874. inode->i_gid = current_fsgid();
  875. inode->i_mode = mode;
  876. inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
  877. /* This is the optimal IO size (for stat), not the fs block size */
  878. inode->i_blocks = 0;
  879. inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
  880. ext4_current_time(inode);
  881. memset(ei->i_data, 0, sizeof(ei->i_data));
  882. ei->i_dir_start_lookup = 0;
  883. ei->i_disksize = 0;
  884. /*
  885. * Don't inherit extent flag from directory, amongst others. We set
  886. * extent flag on newly created directory and file only if -o extent
  887. * mount option is specified
  888. */
  889. ei->i_flags =
  890. ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
  891. ei->i_file_acl = 0;
  892. ei->i_dtime = 0;
  893. ei->i_block_group = group;
  894. ei->i_last_alloc_group = ~0;
  895. ext4_set_inode_flags(inode);
  896. if (IS_DIRSYNC(inode))
  897. ext4_handle_sync(handle);
  898. if (insert_inode_locked(inode) < 0) {
  899. err = -EINVAL;
  900. goto fail_drop;
  901. }
  902. spin_lock(&sbi->s_next_gen_lock);
  903. inode->i_generation = sbi->s_next_generation++;
  904. spin_unlock(&sbi->s_next_gen_lock);
  905. ei->i_state = EXT4_STATE_NEW;
  906. ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
  907. ret = inode;
  908. if (vfs_dq_alloc_inode(inode)) {
  909. err = -EDQUOT;
  910. goto fail_drop;
  911. }
  912. err = ext4_init_acl(handle, inode, dir);
  913. if (err)
  914. goto fail_free_drop;
  915. err = ext4_init_security(handle, inode, dir);
  916. if (err)
  917. goto fail_free_drop;
  918. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  919. /* set extent flag only for directory, file and normal symlink*/
  920. if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
  921. EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
  922. ext4_ext_tree_init(handle, inode);
  923. }
  924. }
  925. err = ext4_mark_inode_dirty(handle, inode);
  926. if (err) {
  927. ext4_std_error(sb, err);
  928. goto fail_free_drop;
  929. }
  930. ext4_debug("allocating inode %lu\n", inode->i_ino);
  931. trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
  932. sb->s_id, inode->i_ino, dir->i_ino, mode);
  933. goto really_out;
  934. fail:
  935. ext4_std_error(sb, err);
  936. out:
  937. iput(inode);
  938. ret = ERR_PTR(err);
  939. really_out:
  940. brelse(inode_bitmap_bh);
  941. return ret;
  942. fail_free_drop:
  943. vfs_dq_free_inode(inode);
  944. fail_drop:
  945. vfs_dq_drop(inode);
  946. inode->i_flags |= S_NOQUOTA;
  947. inode->i_nlink = 0;
  948. unlock_new_inode(inode);
  949. iput(inode);
  950. brelse(inode_bitmap_bh);
  951. return ERR_PTR(err);
  952. }
  953. /* Verify that we are loading a valid orphan from disk */
  954. struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
  955. {
  956. unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
  957. ext4_group_t block_group;
  958. int bit;
  959. struct buffer_head *bitmap_bh;
  960. struct inode *inode = NULL;
  961. long err = -EIO;
  962. /* Error cases - e2fsck has already cleaned up for us */
  963. if (ino > max_ino) {
  964. ext4_warning(sb, __func__,
  965. "bad orphan ino %lu! e2fsck was run?", ino);
  966. goto error;
  967. }
  968. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  969. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  970. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  971. if (!bitmap_bh) {
  972. ext4_warning(sb, __func__,
  973. "inode bitmap error for orphan %lu", ino);
  974. goto error;
  975. }
  976. /* Having the inode bit set should be a 100% indicator that this
  977. * is a valid orphan (no e2fsck run on fs). Orphans also include
  978. * inodes that were being truncated, so we can't check i_nlink==0.
  979. */
  980. if (!ext4_test_bit(bit, bitmap_bh->b_data))
  981. goto bad_orphan;
  982. inode = ext4_iget(sb, ino);
  983. if (IS_ERR(inode))
  984. goto iget_failed;
  985. /*
  986. * If the orphans has i_nlinks > 0 then it should be able to be
  987. * truncated, otherwise it won't be removed from the orphan list
  988. * during processing and an infinite loop will result.
  989. */
  990. if (inode->i_nlink && !ext4_can_truncate(inode))
  991. goto bad_orphan;
  992. if (NEXT_ORPHAN(inode) > max_ino)
  993. goto bad_orphan;
  994. brelse(bitmap_bh);
  995. return inode;
  996. iget_failed:
  997. err = PTR_ERR(inode);
  998. inode = NULL;
  999. bad_orphan:
  1000. ext4_warning(sb, __func__,
  1001. "bad orphan inode %lu! e2fsck was run?", ino);
  1002. printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
  1003. bit, (unsigned long long)bitmap_bh->b_blocknr,
  1004. ext4_test_bit(bit, bitmap_bh->b_data));
  1005. printk(KERN_NOTICE "inode=%p\n", inode);
  1006. if (inode) {
  1007. printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
  1008. is_bad_inode(inode));
  1009. printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
  1010. NEXT_ORPHAN(inode));
  1011. printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
  1012. printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
  1013. /* Avoid freeing blocks if we got a bad deleted inode */
  1014. if (inode->i_nlink == 0)
  1015. inode->i_blocks = 0;
  1016. iput(inode);
  1017. }
  1018. brelse(bitmap_bh);
  1019. error:
  1020. return ERR_PTR(err);
  1021. }
  1022. unsigned long ext4_count_free_inodes(struct super_block *sb)
  1023. {
  1024. unsigned long desc_count;
  1025. struct ext4_group_desc *gdp;
  1026. ext4_group_t i;
  1027. #ifdef EXT4FS_DEBUG
  1028. struct ext4_super_block *es;
  1029. unsigned long bitmap_count, x;
  1030. struct buffer_head *bitmap_bh = NULL;
  1031. es = EXT4_SB(sb)->s_es;
  1032. desc_count = 0;
  1033. bitmap_count = 0;
  1034. gdp = NULL;
  1035. for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
  1036. gdp = ext4_get_group_desc(sb, i, NULL);
  1037. if (!gdp)
  1038. continue;
  1039. desc_count += ext4_free_inodes_count(sb, gdp);
  1040. brelse(bitmap_bh);
  1041. bitmap_bh = ext4_read_inode_bitmap(sb, i);
  1042. if (!bitmap_bh)
  1043. continue;
  1044. x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
  1045. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  1046. i, ext4_free_inodes_count(sb, gdp), x);
  1047. bitmap_count += x;
  1048. }
  1049. brelse(bitmap_bh);
  1050. printk(KERN_DEBUG "ext4_count_free_inodes: "
  1051. "stored = %u, computed = %lu, %lu\n",
  1052. le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
  1053. return desc_count;
  1054. #else
  1055. desc_count = 0;
  1056. for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
  1057. gdp = ext4_get_group_desc(sb, i, NULL);
  1058. if (!gdp)
  1059. continue;
  1060. desc_count += ext4_free_inodes_count(sb, gdp);
  1061. cond_resched();
  1062. }
  1063. return desc_count;
  1064. #endif
  1065. }
  1066. /* Called at mount-time, super-block is locked */
  1067. unsigned long ext4_count_dirs(struct super_block * sb)
  1068. {
  1069. unsigned long count = 0;
  1070. ext4_group_t i;
  1071. for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
  1072. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  1073. if (!gdp)
  1074. continue;
  1075. count += ext4_used_dirs_count(sb, gdp);
  1076. }
  1077. return count;
  1078. }