ialloc.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304
  1. /*
  2. * linux/fs/ext4/ialloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * BSD ufs-inspired inode and directory allocation by
  10. * Stephen Tweedie (sct@redhat.com), 1993
  11. * Big-endian to little-endian byte-swapping/bitmaps by
  12. * David S. Miller (davem@caip.rutgers.edu), 1995
  13. */
  14. #include <linux/time.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/stat.h>
  18. #include <linux/string.h>
  19. #include <linux/quotaops.h>
  20. #include <linux/buffer_head.h>
  21. #include <linux/random.h>
  22. #include <linux/bitops.h>
  23. #include <linux/blkdev.h>
  24. #include <asm/byteorder.h>
  25. #include "ext4.h"
  26. #include "ext4_jbd2.h"
  27. #include "xattr.h"
  28. #include "acl.h"
  29. #include <trace/events/ext4.h>
  30. /*
  31. * ialloc.c contains the inodes allocation and deallocation routines
  32. */
  33. /*
  34. * The free inodes are managed by bitmaps. A file system contains several
  35. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  36. * block for inodes, N blocks for the inode table and data blocks.
  37. *
  38. * The file system contains group descriptors which are located after the
  39. * super block. Each descriptor contains the number of the bitmap block and
  40. * the free blocks count in the block.
  41. */
  42. /*
  43. * To avoid calling the atomic setbit hundreds or thousands of times, we only
  44. * need to use it within a single byte (to ensure we get endianness right).
  45. * We can use memset for the rest of the bitmap as there are no other users.
  46. */
  47. void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  48. {
  49. int i;
  50. if (start_bit >= end_bit)
  51. return;
  52. ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  53. for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  54. ext4_set_bit(i, bitmap);
  55. if (i < end_bit)
  56. memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  57. }
  58. /* Initializes an uninitialized inode bitmap */
  59. static unsigned ext4_init_inode_bitmap(struct super_block *sb,
  60. struct buffer_head *bh,
  61. ext4_group_t block_group,
  62. struct ext4_group_desc *gdp)
  63. {
  64. J_ASSERT_BH(bh, buffer_locked(bh));
  65. /* If checksum is bad mark all blocks and inodes use to prevent
  66. * allocation, essentially implementing a per-group read-only flag. */
  67. if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
  68. ext4_error(sb, "Checksum bad for group %u", block_group);
  69. ext4_free_group_clusters_set(sb, gdp, 0);
  70. ext4_free_inodes_set(sb, gdp, 0);
  71. ext4_itable_unused_set(sb, gdp, 0);
  72. memset(bh->b_data, 0xff, sb->s_blocksize);
  73. ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
  74. EXT4_INODES_PER_GROUP(sb) / 8);
  75. return 0;
  76. }
  77. memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
  78. ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
  79. bh->b_data);
  80. ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
  81. EXT4_INODES_PER_GROUP(sb) / 8);
  82. ext4_group_desc_csum_set(sb, block_group, gdp);
  83. return EXT4_INODES_PER_GROUP(sb);
  84. }
  85. void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
  86. {
  87. if (uptodate) {
  88. set_buffer_uptodate(bh);
  89. set_bitmap_uptodate(bh);
  90. }
  91. unlock_buffer(bh);
  92. put_bh(bh);
  93. }
  94. /*
  95. * Read the inode allocation bitmap for a given block_group, reading
  96. * into the specified slot in the superblock's bitmap cache.
  97. *
  98. * Return buffer_head of bitmap on success or NULL.
  99. */
  100. static struct buffer_head *
  101. ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
  102. {
  103. struct ext4_group_desc *desc;
  104. struct buffer_head *bh = NULL;
  105. ext4_fsblk_t bitmap_blk;
  106. struct ext4_group_info *grp;
  107. desc = ext4_get_group_desc(sb, block_group, NULL);
  108. if (!desc)
  109. return NULL;
  110. bitmap_blk = ext4_inode_bitmap(sb, desc);
  111. bh = sb_getblk(sb, bitmap_blk);
  112. if (unlikely(!bh)) {
  113. ext4_error(sb, "Cannot read inode bitmap - "
  114. "block_group = %u, inode_bitmap = %llu",
  115. block_group, bitmap_blk);
  116. return NULL;
  117. }
  118. if (bitmap_uptodate(bh))
  119. goto verify;
  120. lock_buffer(bh);
  121. if (bitmap_uptodate(bh)) {
  122. unlock_buffer(bh);
  123. goto verify;
  124. }
  125. ext4_lock_group(sb, block_group);
  126. if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  127. ext4_init_inode_bitmap(sb, bh, block_group, desc);
  128. set_bitmap_uptodate(bh);
  129. set_buffer_uptodate(bh);
  130. set_buffer_verified(bh);
  131. ext4_unlock_group(sb, block_group);
  132. unlock_buffer(bh);
  133. return bh;
  134. }
  135. ext4_unlock_group(sb, block_group);
  136. if (buffer_uptodate(bh)) {
  137. /*
  138. * if not uninit if bh is uptodate,
  139. * bitmap is also uptodate
  140. */
  141. set_bitmap_uptodate(bh);
  142. unlock_buffer(bh);
  143. goto verify;
  144. }
  145. /*
  146. * submit the buffer_head for reading
  147. */
  148. trace_ext4_load_inode_bitmap(sb, block_group);
  149. bh->b_end_io = ext4_end_bitmap_read;
  150. get_bh(bh);
  151. submit_bh(READ | REQ_META | REQ_PRIO, bh);
  152. wait_on_buffer(bh);
  153. if (!buffer_uptodate(bh)) {
  154. put_bh(bh);
  155. ext4_error(sb, "Cannot read inode bitmap - "
  156. "block_group = %u, inode_bitmap = %llu",
  157. block_group, bitmap_blk);
  158. return NULL;
  159. }
  160. verify:
  161. ext4_lock_group(sb, block_group);
  162. if (!buffer_verified(bh) &&
  163. !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
  164. EXT4_INODES_PER_GROUP(sb) / 8)) {
  165. ext4_unlock_group(sb, block_group);
  166. put_bh(bh);
  167. ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
  168. "inode_bitmap = %llu", block_group, bitmap_blk);
  169. grp = ext4_get_group_info(sb, block_group);
  170. set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
  171. return NULL;
  172. }
  173. ext4_unlock_group(sb, block_group);
  174. set_buffer_verified(bh);
  175. return bh;
  176. }
  177. /*
  178. * NOTE! When we get the inode, we're the only people
  179. * that have access to it, and as such there are no
  180. * race conditions we have to worry about. The inode
  181. * is not on the hash-lists, and it cannot be reached
  182. * through the filesystem because the directory entry
  183. * has been deleted earlier.
  184. *
  185. * HOWEVER: we must make sure that we get no aliases,
  186. * which means that we have to call "clear_inode()"
  187. * _before_ we mark the inode not in use in the inode
  188. * bitmaps. Otherwise a newly created file might use
  189. * the same inode number (not actually the same pointer
  190. * though), and then we'd have two inodes sharing the
  191. * same inode number and space on the harddisk.
  192. */
  193. void ext4_free_inode(handle_t *handle, struct inode *inode)
  194. {
  195. struct super_block *sb = inode->i_sb;
  196. int is_directory;
  197. unsigned long ino;
  198. struct buffer_head *bitmap_bh = NULL;
  199. struct buffer_head *bh2;
  200. ext4_group_t block_group;
  201. unsigned long bit;
  202. struct ext4_group_desc *gdp;
  203. struct ext4_super_block *es;
  204. struct ext4_sb_info *sbi;
  205. int fatal = 0, err, count, cleared;
  206. struct ext4_group_info *grp;
  207. if (!sb) {
  208. printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
  209. "nonexistent device\n", __func__, __LINE__);
  210. return;
  211. }
  212. if (atomic_read(&inode->i_count) > 1) {
  213. ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
  214. __func__, __LINE__, inode->i_ino,
  215. atomic_read(&inode->i_count));
  216. return;
  217. }
  218. if (inode->i_nlink) {
  219. ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
  220. __func__, __LINE__, inode->i_ino, inode->i_nlink);
  221. return;
  222. }
  223. sbi = EXT4_SB(sb);
  224. ino = inode->i_ino;
  225. ext4_debug("freeing inode %lu\n", ino);
  226. trace_ext4_free_inode(inode);
  227. /*
  228. * Note: we must free any quota before locking the superblock,
  229. * as writing the quota to disk may need the lock as well.
  230. */
  231. dquot_initialize(inode);
  232. ext4_xattr_delete_inode(handle, inode);
  233. dquot_free_inode(inode);
  234. dquot_drop(inode);
  235. is_directory = S_ISDIR(inode->i_mode);
  236. /* Do this BEFORE marking the inode not in use or returning an error */
  237. ext4_clear_inode(inode);
  238. es = EXT4_SB(sb)->s_es;
  239. if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
  240. ext4_error(sb, "reserved or nonexistent inode %lu", ino);
  241. goto error_return;
  242. }
  243. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  244. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  245. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  246. /* Don't bother if the inode bitmap is corrupt. */
  247. grp = ext4_get_group_info(sb, block_group);
  248. if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
  249. goto error_return;
  250. BUFFER_TRACE(bitmap_bh, "get_write_access");
  251. fatal = ext4_journal_get_write_access(handle, bitmap_bh);
  252. if (fatal)
  253. goto error_return;
  254. fatal = -ESRCH;
  255. gdp = ext4_get_group_desc(sb, block_group, &bh2);
  256. if (gdp) {
  257. BUFFER_TRACE(bh2, "get_write_access");
  258. fatal = ext4_journal_get_write_access(handle, bh2);
  259. }
  260. ext4_lock_group(sb, block_group);
  261. cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
  262. if (fatal || !cleared) {
  263. ext4_unlock_group(sb, block_group);
  264. goto out;
  265. }
  266. count = ext4_free_inodes_count(sb, gdp) + 1;
  267. ext4_free_inodes_set(sb, gdp, count);
  268. if (is_directory) {
  269. count = ext4_used_dirs_count(sb, gdp) - 1;
  270. ext4_used_dirs_set(sb, gdp, count);
  271. percpu_counter_dec(&sbi->s_dirs_counter);
  272. }
  273. ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
  274. EXT4_INODES_PER_GROUP(sb) / 8);
  275. ext4_group_desc_csum_set(sb, block_group, gdp);
  276. ext4_unlock_group(sb, block_group);
  277. percpu_counter_inc(&sbi->s_freeinodes_counter);
  278. if (sbi->s_log_groups_per_flex) {
  279. ext4_group_t f = ext4_flex_group(sbi, block_group);
  280. atomic_inc(&sbi->s_flex_groups[f].free_inodes);
  281. if (is_directory)
  282. atomic_dec(&sbi->s_flex_groups[f].used_dirs);
  283. }
  284. BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
  285. fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
  286. out:
  287. if (cleared) {
  288. BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
  289. err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
  290. if (!fatal)
  291. fatal = err;
  292. } else {
  293. ext4_error(sb, "bit already cleared for inode %lu", ino);
  294. set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
  295. }
  296. error_return:
  297. brelse(bitmap_bh);
  298. ext4_std_error(sb, fatal);
  299. }
  300. struct orlov_stats {
  301. __u64 free_clusters;
  302. __u32 free_inodes;
  303. __u32 used_dirs;
  304. };
  305. /*
  306. * Helper function for Orlov's allocator; returns critical information
  307. * for a particular block group or flex_bg. If flex_size is 1, then g
  308. * is a block group number; otherwise it is flex_bg number.
  309. */
  310. static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
  311. int flex_size, struct orlov_stats *stats)
  312. {
  313. struct ext4_group_desc *desc;
  314. struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
  315. if (flex_size > 1) {
  316. stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
  317. stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
  318. stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
  319. return;
  320. }
  321. desc = ext4_get_group_desc(sb, g, NULL);
  322. if (desc) {
  323. stats->free_inodes = ext4_free_inodes_count(sb, desc);
  324. stats->free_clusters = ext4_free_group_clusters(sb, desc);
  325. stats->used_dirs = ext4_used_dirs_count(sb, desc);
  326. } else {
  327. stats->free_inodes = 0;
  328. stats->free_clusters = 0;
  329. stats->used_dirs = 0;
  330. }
  331. }
  332. /*
  333. * Orlov's allocator for directories.
  334. *
  335. * We always try to spread first-level directories.
  336. *
  337. * If there are blockgroups with both free inodes and free blocks counts
  338. * not worse than average we return one with smallest directory count.
  339. * Otherwise we simply return a random group.
  340. *
  341. * For the rest rules look so:
  342. *
  343. * It's OK to put directory into a group unless
  344. * it has too many directories already (max_dirs) or
  345. * it has too few free inodes left (min_inodes) or
  346. * it has too few free blocks left (min_blocks) or
  347. * Parent's group is preferred, if it doesn't satisfy these
  348. * conditions we search cyclically through the rest. If none
  349. * of the groups look good we just look for a group with more
  350. * free inodes than average (starting at parent's group).
  351. */
  352. static int find_group_orlov(struct super_block *sb, struct inode *parent,
  353. ext4_group_t *group, umode_t mode,
  354. const struct qstr *qstr)
  355. {
  356. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  357. struct ext4_sb_info *sbi = EXT4_SB(sb);
  358. ext4_group_t real_ngroups = ext4_get_groups_count(sb);
  359. int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
  360. unsigned int freei, avefreei, grp_free;
  361. ext4_fsblk_t freeb, avefreec;
  362. unsigned int ndirs;
  363. int max_dirs, min_inodes;
  364. ext4_grpblk_t min_clusters;
  365. ext4_group_t i, grp, g, ngroups;
  366. struct ext4_group_desc *desc;
  367. struct orlov_stats stats;
  368. int flex_size = ext4_flex_bg_size(sbi);
  369. struct dx_hash_info hinfo;
  370. ngroups = real_ngroups;
  371. if (flex_size > 1) {
  372. ngroups = (real_ngroups + flex_size - 1) >>
  373. sbi->s_log_groups_per_flex;
  374. parent_group >>= sbi->s_log_groups_per_flex;
  375. }
  376. freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
  377. avefreei = freei / ngroups;
  378. freeb = EXT4_C2B(sbi,
  379. percpu_counter_read_positive(&sbi->s_freeclusters_counter));
  380. avefreec = freeb;
  381. do_div(avefreec, ngroups);
  382. ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
  383. if (S_ISDIR(mode) &&
  384. ((parent == sb->s_root->d_inode) ||
  385. (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
  386. int best_ndir = inodes_per_group;
  387. int ret = -1;
  388. if (qstr) {
  389. hinfo.hash_version = DX_HASH_HALF_MD4;
  390. hinfo.seed = sbi->s_hash_seed;
  391. ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
  392. grp = hinfo.hash;
  393. } else
  394. get_random_bytes(&grp, sizeof(grp));
  395. parent_group = (unsigned)grp % ngroups;
  396. for (i = 0; i < ngroups; i++) {
  397. g = (parent_group + i) % ngroups;
  398. get_orlov_stats(sb, g, flex_size, &stats);
  399. if (!stats.free_inodes)
  400. continue;
  401. if (stats.used_dirs >= best_ndir)
  402. continue;
  403. if (stats.free_inodes < avefreei)
  404. continue;
  405. if (stats.free_clusters < avefreec)
  406. continue;
  407. grp = g;
  408. ret = 0;
  409. best_ndir = stats.used_dirs;
  410. }
  411. if (ret)
  412. goto fallback;
  413. found_flex_bg:
  414. if (flex_size == 1) {
  415. *group = grp;
  416. return 0;
  417. }
  418. /*
  419. * We pack inodes at the beginning of the flexgroup's
  420. * inode tables. Block allocation decisions will do
  421. * something similar, although regular files will
  422. * start at 2nd block group of the flexgroup. See
  423. * ext4_ext_find_goal() and ext4_find_near().
  424. */
  425. grp *= flex_size;
  426. for (i = 0; i < flex_size; i++) {
  427. if (grp+i >= real_ngroups)
  428. break;
  429. desc = ext4_get_group_desc(sb, grp+i, NULL);
  430. if (desc && ext4_free_inodes_count(sb, desc)) {
  431. *group = grp+i;
  432. return 0;
  433. }
  434. }
  435. goto fallback;
  436. }
  437. max_dirs = ndirs / ngroups + inodes_per_group / 16;
  438. min_inodes = avefreei - inodes_per_group*flex_size / 4;
  439. if (min_inodes < 1)
  440. min_inodes = 1;
  441. min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
  442. /*
  443. * Start looking in the flex group where we last allocated an
  444. * inode for this parent directory
  445. */
  446. if (EXT4_I(parent)->i_last_alloc_group != ~0) {
  447. parent_group = EXT4_I(parent)->i_last_alloc_group;
  448. if (flex_size > 1)
  449. parent_group >>= sbi->s_log_groups_per_flex;
  450. }
  451. for (i = 0; i < ngroups; i++) {
  452. grp = (parent_group + i) % ngroups;
  453. get_orlov_stats(sb, grp, flex_size, &stats);
  454. if (stats.used_dirs >= max_dirs)
  455. continue;
  456. if (stats.free_inodes < min_inodes)
  457. continue;
  458. if (stats.free_clusters < min_clusters)
  459. continue;
  460. goto found_flex_bg;
  461. }
  462. fallback:
  463. ngroups = real_ngroups;
  464. avefreei = freei / ngroups;
  465. fallback_retry:
  466. parent_group = EXT4_I(parent)->i_block_group;
  467. for (i = 0; i < ngroups; i++) {
  468. grp = (parent_group + i) % ngroups;
  469. desc = ext4_get_group_desc(sb, grp, NULL);
  470. if (desc) {
  471. grp_free = ext4_free_inodes_count(sb, desc);
  472. if (grp_free && grp_free >= avefreei) {
  473. *group = grp;
  474. return 0;
  475. }
  476. }
  477. }
  478. if (avefreei) {
  479. /*
  480. * The free-inodes counter is approximate, and for really small
  481. * filesystems the above test can fail to find any blockgroups
  482. */
  483. avefreei = 0;
  484. goto fallback_retry;
  485. }
  486. return -1;
  487. }
  488. static int find_group_other(struct super_block *sb, struct inode *parent,
  489. ext4_group_t *group, umode_t mode)
  490. {
  491. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  492. ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
  493. struct ext4_group_desc *desc;
  494. int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
  495. /*
  496. * Try to place the inode is the same flex group as its
  497. * parent. If we can't find space, use the Orlov algorithm to
  498. * find another flex group, and store that information in the
  499. * parent directory's inode information so that use that flex
  500. * group for future allocations.
  501. */
  502. if (flex_size > 1) {
  503. int retry = 0;
  504. try_again:
  505. parent_group &= ~(flex_size-1);
  506. last = parent_group + flex_size;
  507. if (last > ngroups)
  508. last = ngroups;
  509. for (i = parent_group; i < last; i++) {
  510. desc = ext4_get_group_desc(sb, i, NULL);
  511. if (desc && ext4_free_inodes_count(sb, desc)) {
  512. *group = i;
  513. return 0;
  514. }
  515. }
  516. if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
  517. retry = 1;
  518. parent_group = EXT4_I(parent)->i_last_alloc_group;
  519. goto try_again;
  520. }
  521. /*
  522. * If this didn't work, use the Orlov search algorithm
  523. * to find a new flex group; we pass in the mode to
  524. * avoid the topdir algorithms.
  525. */
  526. *group = parent_group + flex_size;
  527. if (*group > ngroups)
  528. *group = 0;
  529. return find_group_orlov(sb, parent, group, mode, NULL);
  530. }
  531. /*
  532. * Try to place the inode in its parent directory
  533. */
  534. *group = parent_group;
  535. desc = ext4_get_group_desc(sb, *group, NULL);
  536. if (desc && ext4_free_inodes_count(sb, desc) &&
  537. ext4_free_group_clusters(sb, desc))
  538. return 0;
  539. /*
  540. * We're going to place this inode in a different blockgroup from its
  541. * parent. We want to cause files in a common directory to all land in
  542. * the same blockgroup. But we want files which are in a different
  543. * directory which shares a blockgroup with our parent to land in a
  544. * different blockgroup.
  545. *
  546. * So add our directory's i_ino into the starting point for the hash.
  547. */
  548. *group = (*group + parent->i_ino) % ngroups;
  549. /*
  550. * Use a quadratic hash to find a group with a free inode and some free
  551. * blocks.
  552. */
  553. for (i = 1; i < ngroups; i <<= 1) {
  554. *group += i;
  555. if (*group >= ngroups)
  556. *group -= ngroups;
  557. desc = ext4_get_group_desc(sb, *group, NULL);
  558. if (desc && ext4_free_inodes_count(sb, desc) &&
  559. ext4_free_group_clusters(sb, desc))
  560. return 0;
  561. }
  562. /*
  563. * That failed: try linear search for a free inode, even if that group
  564. * has no free blocks.
  565. */
  566. *group = parent_group;
  567. for (i = 0; i < ngroups; i++) {
  568. if (++*group >= ngroups)
  569. *group = 0;
  570. desc = ext4_get_group_desc(sb, *group, NULL);
  571. if (desc && ext4_free_inodes_count(sb, desc))
  572. return 0;
  573. }
  574. return -1;
  575. }
  576. /*
  577. * In no journal mode, if an inode has recently been deleted, we want
  578. * to avoid reusing it until we're reasonably sure the inode table
  579. * block has been written back to disk. (Yes, these values are
  580. * somewhat arbitrary...)
  581. */
  582. #define RECENTCY_MIN 5
  583. #define RECENTCY_DIRTY 30
  584. static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
  585. {
  586. struct ext4_group_desc *gdp;
  587. struct ext4_inode *raw_inode;
  588. struct buffer_head *bh;
  589. unsigned long dtime, now;
  590. int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
  591. int offset, ret = 0, recentcy = RECENTCY_MIN;
  592. gdp = ext4_get_group_desc(sb, group, NULL);
  593. if (unlikely(!gdp))
  594. return 0;
  595. bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
  596. (ino / inodes_per_block));
  597. if (unlikely(!bh) || !buffer_uptodate(bh))
  598. /*
  599. * If the block is not in the buffer cache, then it
  600. * must have been written out.
  601. */
  602. goto out;
  603. offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
  604. raw_inode = (struct ext4_inode *) (bh->b_data + offset);
  605. dtime = le32_to_cpu(raw_inode->i_dtime);
  606. now = get_seconds();
  607. if (buffer_dirty(bh))
  608. recentcy += RECENTCY_DIRTY;
  609. if (dtime && (dtime < now) && (now < dtime + recentcy))
  610. ret = 1;
  611. out:
  612. brelse(bh);
  613. return ret;
  614. }
  615. /*
  616. * There are two policies for allocating an inode. If the new inode is
  617. * a directory, then a forward search is made for a block group with both
  618. * free space and a low directory-to-inode ratio; if that fails, then of
  619. * the groups with above-average free space, that group with the fewest
  620. * directories already is chosen.
  621. *
  622. * For other inodes, search forward from the parent directory's block
  623. * group to find a free inode.
  624. */
  625. struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
  626. umode_t mode, const struct qstr *qstr,
  627. __u32 goal, uid_t *owner, int handle_type,
  628. unsigned int line_no, int nblocks)
  629. {
  630. struct super_block *sb;
  631. struct buffer_head *inode_bitmap_bh = NULL;
  632. struct buffer_head *group_desc_bh;
  633. ext4_group_t ngroups, group = 0;
  634. unsigned long ino = 0;
  635. struct inode *inode;
  636. struct ext4_group_desc *gdp = NULL;
  637. struct ext4_inode_info *ei;
  638. struct ext4_sb_info *sbi;
  639. int ret2, err = 0;
  640. struct inode *ret;
  641. ext4_group_t i;
  642. ext4_group_t flex_group;
  643. struct ext4_group_info *grp;
  644. /* Cannot create files in a deleted directory */
  645. if (!dir || !dir->i_nlink)
  646. return ERR_PTR(-EPERM);
  647. sb = dir->i_sb;
  648. ngroups = ext4_get_groups_count(sb);
  649. trace_ext4_request_inode(dir, mode);
  650. inode = new_inode(sb);
  651. if (!inode)
  652. return ERR_PTR(-ENOMEM);
  653. ei = EXT4_I(inode);
  654. sbi = EXT4_SB(sb);
  655. /*
  656. * Initalize owners and quota early so that we don't have to account
  657. * for quota initialization worst case in standard inode creating
  658. * transaction
  659. */
  660. if (owner) {
  661. inode->i_mode = mode;
  662. i_uid_write(inode, owner[0]);
  663. i_gid_write(inode, owner[1]);
  664. } else if (test_opt(sb, GRPID)) {
  665. inode->i_mode = mode;
  666. inode->i_uid = current_fsuid();
  667. inode->i_gid = dir->i_gid;
  668. } else
  669. inode_init_owner(inode, dir, mode);
  670. dquot_initialize(inode);
  671. if (!goal)
  672. goal = sbi->s_inode_goal;
  673. if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
  674. group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
  675. ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
  676. ret2 = 0;
  677. goto got_group;
  678. }
  679. if (S_ISDIR(mode))
  680. ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
  681. else
  682. ret2 = find_group_other(sb, dir, &group, mode);
  683. got_group:
  684. EXT4_I(dir)->i_last_alloc_group = group;
  685. err = -ENOSPC;
  686. if (ret2 == -1)
  687. goto out;
  688. /*
  689. * Normally we will only go through one pass of this loop,
  690. * unless we get unlucky and it turns out the group we selected
  691. * had its last inode grabbed by someone else.
  692. */
  693. for (i = 0; i < ngroups; i++, ino = 0) {
  694. err = -EIO;
  695. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  696. if (!gdp)
  697. goto out;
  698. /*
  699. * Check free inodes count before loading bitmap.
  700. */
  701. if (ext4_free_inodes_count(sb, gdp) == 0) {
  702. if (++group == ngroups)
  703. group = 0;
  704. continue;
  705. }
  706. grp = ext4_get_group_info(sb, group);
  707. /* Skip groups with already-known suspicious inode tables */
  708. if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
  709. if (++group == ngroups)
  710. group = 0;
  711. continue;
  712. }
  713. brelse(inode_bitmap_bh);
  714. inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
  715. /* Skip groups with suspicious inode tables */
  716. if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
  717. if (++group == ngroups)
  718. group = 0;
  719. continue;
  720. }
  721. repeat_in_this_group:
  722. ino = ext4_find_next_zero_bit((unsigned long *)
  723. inode_bitmap_bh->b_data,
  724. EXT4_INODES_PER_GROUP(sb), ino);
  725. if (ino >= EXT4_INODES_PER_GROUP(sb))
  726. goto next_group;
  727. if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
  728. ext4_error(sb, "reserved inode found cleared - "
  729. "inode=%lu", ino + 1);
  730. continue;
  731. }
  732. if ((EXT4_SB(sb)->s_journal == NULL) &&
  733. recently_deleted(sb, group, ino)) {
  734. ino++;
  735. goto next_inode;
  736. }
  737. if (!handle) {
  738. BUG_ON(nblocks <= 0);
  739. handle = __ext4_journal_start_sb(dir->i_sb, line_no,
  740. handle_type, nblocks,
  741. 0);
  742. if (IS_ERR(handle)) {
  743. err = PTR_ERR(handle);
  744. ext4_std_error(sb, err);
  745. goto out;
  746. }
  747. }
  748. BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
  749. err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
  750. if (err) {
  751. ext4_std_error(sb, err);
  752. goto out;
  753. }
  754. ext4_lock_group(sb, group);
  755. ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
  756. ext4_unlock_group(sb, group);
  757. ino++; /* the inode bitmap is zero-based */
  758. if (!ret2)
  759. goto got; /* we grabbed the inode! */
  760. next_inode:
  761. if (ino < EXT4_INODES_PER_GROUP(sb))
  762. goto repeat_in_this_group;
  763. next_group:
  764. if (++group == ngroups)
  765. group = 0;
  766. }
  767. err = -ENOSPC;
  768. goto out;
  769. got:
  770. BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
  771. err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
  772. if (err) {
  773. ext4_std_error(sb, err);
  774. goto out;
  775. }
  776. /* We may have to initialize the block bitmap if it isn't already */
  777. if (ext4_has_group_desc_csum(sb) &&
  778. gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  779. struct buffer_head *block_bitmap_bh;
  780. block_bitmap_bh = ext4_read_block_bitmap(sb, group);
  781. BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
  782. err = ext4_journal_get_write_access(handle, block_bitmap_bh);
  783. if (err) {
  784. brelse(block_bitmap_bh);
  785. ext4_std_error(sb, err);
  786. goto out;
  787. }
  788. BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
  789. err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
  790. /* recheck and clear flag under lock if we still need to */
  791. ext4_lock_group(sb, group);
  792. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  793. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  794. ext4_free_group_clusters_set(sb, gdp,
  795. ext4_free_clusters_after_init(sb, group, gdp));
  796. ext4_block_bitmap_csum_set(sb, group, gdp,
  797. block_bitmap_bh);
  798. ext4_group_desc_csum_set(sb, group, gdp);
  799. }
  800. ext4_unlock_group(sb, group);
  801. brelse(block_bitmap_bh);
  802. if (err) {
  803. ext4_std_error(sb, err);
  804. goto out;
  805. }
  806. }
  807. BUFFER_TRACE(group_desc_bh, "get_write_access");
  808. err = ext4_journal_get_write_access(handle, group_desc_bh);
  809. if (err) {
  810. ext4_std_error(sb, err);
  811. goto out;
  812. }
  813. /* Update the relevant bg descriptor fields */
  814. if (ext4_has_group_desc_csum(sb)) {
  815. int free;
  816. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  817. down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
  818. ext4_lock_group(sb, group); /* while we modify the bg desc */
  819. free = EXT4_INODES_PER_GROUP(sb) -
  820. ext4_itable_unused_count(sb, gdp);
  821. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  822. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
  823. free = 0;
  824. }
  825. /*
  826. * Check the relative inode number against the last used
  827. * relative inode number in this group. if it is greater
  828. * we need to update the bg_itable_unused count
  829. */
  830. if (ino > free)
  831. ext4_itable_unused_set(sb, gdp,
  832. (EXT4_INODES_PER_GROUP(sb) - ino));
  833. up_read(&grp->alloc_sem);
  834. } else {
  835. ext4_lock_group(sb, group);
  836. }
  837. ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
  838. if (S_ISDIR(mode)) {
  839. ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
  840. if (sbi->s_log_groups_per_flex) {
  841. ext4_group_t f = ext4_flex_group(sbi, group);
  842. atomic_inc(&sbi->s_flex_groups[f].used_dirs);
  843. }
  844. }
  845. if (ext4_has_group_desc_csum(sb)) {
  846. ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
  847. EXT4_INODES_PER_GROUP(sb) / 8);
  848. ext4_group_desc_csum_set(sb, group, gdp);
  849. }
  850. ext4_unlock_group(sb, group);
  851. BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
  852. err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
  853. if (err) {
  854. ext4_std_error(sb, err);
  855. goto out;
  856. }
  857. percpu_counter_dec(&sbi->s_freeinodes_counter);
  858. if (S_ISDIR(mode))
  859. percpu_counter_inc(&sbi->s_dirs_counter);
  860. if (sbi->s_log_groups_per_flex) {
  861. flex_group = ext4_flex_group(sbi, group);
  862. atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
  863. }
  864. inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
  865. /* This is the optimal IO size (for stat), not the fs block size */
  866. inode->i_blocks = 0;
  867. inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
  868. ext4_current_time(inode);
  869. memset(ei->i_data, 0, sizeof(ei->i_data));
  870. ei->i_dir_start_lookup = 0;
  871. ei->i_disksize = 0;
  872. /* Don't inherit extent flag from directory, amongst others. */
  873. ei->i_flags =
  874. ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
  875. ei->i_file_acl = 0;
  876. ei->i_dtime = 0;
  877. ei->i_block_group = group;
  878. ei->i_last_alloc_group = ~0;
  879. ext4_set_inode_flags(inode);
  880. if (IS_DIRSYNC(inode))
  881. ext4_handle_sync(handle);
  882. if (insert_inode_locked(inode) < 0) {
  883. /*
  884. * Likely a bitmap corruption causing inode to be allocated
  885. * twice.
  886. */
  887. err = -EIO;
  888. ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
  889. inode->i_ino);
  890. goto out;
  891. }
  892. spin_lock(&sbi->s_next_gen_lock);
  893. inode->i_generation = sbi->s_next_generation++;
  894. spin_unlock(&sbi->s_next_gen_lock);
  895. /* Precompute checksum seed for inode metadata */
  896. if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
  897. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
  898. __u32 csum;
  899. __le32 inum = cpu_to_le32(inode->i_ino);
  900. __le32 gen = cpu_to_le32(inode->i_generation);
  901. csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
  902. sizeof(inum));
  903. ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
  904. sizeof(gen));
  905. }
  906. ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
  907. ext4_set_inode_state(inode, EXT4_STATE_NEW);
  908. ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
  909. ei->i_inline_off = 0;
  910. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
  911. ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
  912. ret = inode;
  913. err = dquot_alloc_inode(inode);
  914. if (err)
  915. goto fail_drop;
  916. err = ext4_init_acl(handle, inode, dir);
  917. if (err)
  918. goto fail_free_drop;
  919. err = ext4_init_security(handle, inode, dir, qstr);
  920. if (err)
  921. goto fail_free_drop;
  922. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  923. /* set extent flag only for directory, file and normal symlink*/
  924. if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
  925. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  926. ext4_ext_tree_init(handle, inode);
  927. }
  928. }
  929. if (ext4_handle_valid(handle)) {
  930. ei->i_sync_tid = handle->h_transaction->t_tid;
  931. ei->i_datasync_tid = handle->h_transaction->t_tid;
  932. }
  933. err = ext4_mark_inode_dirty(handle, inode);
  934. if (err) {
  935. ext4_std_error(sb, err);
  936. goto fail_free_drop;
  937. }
  938. ext4_debug("allocating inode %lu\n", inode->i_ino);
  939. trace_ext4_allocate_inode(inode, dir, mode);
  940. brelse(inode_bitmap_bh);
  941. return ret;
  942. fail_free_drop:
  943. dquot_free_inode(inode);
  944. fail_drop:
  945. clear_nlink(inode);
  946. unlock_new_inode(inode);
  947. out:
  948. dquot_drop(inode);
  949. inode->i_flags |= S_NOQUOTA;
  950. iput(inode);
  951. brelse(inode_bitmap_bh);
  952. return ERR_PTR(err);
  953. }
  954. /* Verify that we are loading a valid orphan from disk */
  955. struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
  956. {
  957. unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
  958. ext4_group_t block_group;
  959. int bit;
  960. struct buffer_head *bitmap_bh;
  961. struct inode *inode = NULL;
  962. long err = -EIO;
  963. /* Error cases - e2fsck has already cleaned up for us */
  964. if (ino > max_ino) {
  965. ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
  966. goto error;
  967. }
  968. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  969. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  970. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  971. if (!bitmap_bh) {
  972. ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
  973. goto error;
  974. }
  975. /* Having the inode bit set should be a 100% indicator that this
  976. * is a valid orphan (no e2fsck run on fs). Orphans also include
  977. * inodes that were being truncated, so we can't check i_nlink==0.
  978. */
  979. if (!ext4_test_bit(bit, bitmap_bh->b_data))
  980. goto bad_orphan;
  981. inode = ext4_iget(sb, ino);
  982. if (IS_ERR(inode))
  983. goto iget_failed;
  984. /*
  985. * If the orphans has i_nlinks > 0 then it should be able to be
  986. * truncated, otherwise it won't be removed from the orphan list
  987. * during processing and an infinite loop will result.
  988. */
  989. if (inode->i_nlink && !ext4_can_truncate(inode))
  990. goto bad_orphan;
  991. if (NEXT_ORPHAN(inode) > max_ino)
  992. goto bad_orphan;
  993. brelse(bitmap_bh);
  994. return inode;
  995. iget_failed:
  996. err = PTR_ERR(inode);
  997. inode = NULL;
  998. bad_orphan:
  999. ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
  1000. printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
  1001. bit, (unsigned long long)bitmap_bh->b_blocknr,
  1002. ext4_test_bit(bit, bitmap_bh->b_data));
  1003. printk(KERN_WARNING "inode=%p\n", inode);
  1004. if (inode) {
  1005. printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
  1006. is_bad_inode(inode));
  1007. printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
  1008. NEXT_ORPHAN(inode));
  1009. printk(KERN_WARNING "max_ino=%lu\n", max_ino);
  1010. printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
  1011. /* Avoid freeing blocks if we got a bad deleted inode */
  1012. if (inode->i_nlink == 0)
  1013. inode->i_blocks = 0;
  1014. iput(inode);
  1015. }
  1016. brelse(bitmap_bh);
  1017. error:
  1018. return ERR_PTR(err);
  1019. }
  1020. unsigned long ext4_count_free_inodes(struct super_block *sb)
  1021. {
  1022. unsigned long desc_count;
  1023. struct ext4_group_desc *gdp;
  1024. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  1025. #ifdef EXT4FS_DEBUG
  1026. struct ext4_super_block *es;
  1027. unsigned long bitmap_count, x;
  1028. struct buffer_head *bitmap_bh = NULL;
  1029. es = EXT4_SB(sb)->s_es;
  1030. desc_count = 0;
  1031. bitmap_count = 0;
  1032. gdp = NULL;
  1033. for (i = 0; i < ngroups; i++) {
  1034. gdp = ext4_get_group_desc(sb, i, NULL);
  1035. if (!gdp)
  1036. continue;
  1037. desc_count += ext4_free_inodes_count(sb, gdp);
  1038. brelse(bitmap_bh);
  1039. bitmap_bh = ext4_read_inode_bitmap(sb, i);
  1040. if (!bitmap_bh)
  1041. continue;
  1042. x = ext4_count_free(bitmap_bh->b_data,
  1043. EXT4_INODES_PER_GROUP(sb) / 8);
  1044. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  1045. (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
  1046. bitmap_count += x;
  1047. }
  1048. brelse(bitmap_bh);
  1049. printk(KERN_DEBUG "ext4_count_free_inodes: "
  1050. "stored = %u, computed = %lu, %lu\n",
  1051. le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
  1052. return desc_count;
  1053. #else
  1054. desc_count = 0;
  1055. for (i = 0; i < ngroups; i++) {
  1056. gdp = ext4_get_group_desc(sb, i, NULL);
  1057. if (!gdp)
  1058. continue;
  1059. desc_count += ext4_free_inodes_count(sb, gdp);
  1060. cond_resched();
  1061. }
  1062. return desc_count;
  1063. #endif
  1064. }
  1065. /* Called at mount-time, super-block is locked */
  1066. unsigned long ext4_count_dirs(struct super_block * sb)
  1067. {
  1068. unsigned long count = 0;
  1069. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  1070. for (i = 0; i < ngroups; i++) {
  1071. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  1072. if (!gdp)
  1073. continue;
  1074. count += ext4_used_dirs_count(sb, gdp);
  1075. }
  1076. return count;
  1077. }
  1078. /*
  1079. * Zeroes not yet zeroed inode table - just write zeroes through the whole
  1080. * inode table. Must be called without any spinlock held. The only place
  1081. * where it is called from on active part of filesystem is ext4lazyinit
  1082. * thread, so we do not need any special locks, however we have to prevent
  1083. * inode allocation from the current group, so we take alloc_sem lock, to
  1084. * block ext4_new_inode() until we are finished.
  1085. */
  1086. int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
  1087. int barrier)
  1088. {
  1089. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  1090. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1091. struct ext4_group_desc *gdp = NULL;
  1092. struct buffer_head *group_desc_bh;
  1093. handle_t *handle;
  1094. ext4_fsblk_t blk;
  1095. int num, ret = 0, used_blks = 0;
  1096. /* This should not happen, but just to be sure check this */
  1097. if (sb->s_flags & MS_RDONLY) {
  1098. ret = 1;
  1099. goto out;
  1100. }
  1101. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  1102. if (!gdp)
  1103. goto out;
  1104. /*
  1105. * We do not need to lock this, because we are the only one
  1106. * handling this flag.
  1107. */
  1108. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
  1109. goto out;
  1110. handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
  1111. if (IS_ERR(handle)) {
  1112. ret = PTR_ERR(handle);
  1113. goto out;
  1114. }
  1115. down_write(&grp->alloc_sem);
  1116. /*
  1117. * If inode bitmap was already initialized there may be some
  1118. * used inodes so we need to skip blocks with used inodes in
  1119. * inode table.
  1120. */
  1121. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
  1122. used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
  1123. ext4_itable_unused_count(sb, gdp)),
  1124. sbi->s_inodes_per_block);
  1125. if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
  1126. ext4_error(sb, "Something is wrong with group %u: "
  1127. "used itable blocks: %d; "
  1128. "itable unused count: %u",
  1129. group, used_blks,
  1130. ext4_itable_unused_count(sb, gdp));
  1131. ret = 1;
  1132. goto err_out;
  1133. }
  1134. blk = ext4_inode_table(sb, gdp) + used_blks;
  1135. num = sbi->s_itb_per_group - used_blks;
  1136. BUFFER_TRACE(group_desc_bh, "get_write_access");
  1137. ret = ext4_journal_get_write_access(handle,
  1138. group_desc_bh);
  1139. if (ret)
  1140. goto err_out;
  1141. /*
  1142. * Skip zeroout if the inode table is full. But we set the ZEROED
  1143. * flag anyway, because obviously, when it is full it does not need
  1144. * further zeroing.
  1145. */
  1146. if (unlikely(num == 0))
  1147. goto skip_zeroout;
  1148. ext4_debug("going to zero out inode table in group %d\n",
  1149. group);
  1150. ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
  1151. if (ret < 0)
  1152. goto err_out;
  1153. if (barrier)
  1154. blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
  1155. skip_zeroout:
  1156. ext4_lock_group(sb, group);
  1157. gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
  1158. ext4_group_desc_csum_set(sb, group, gdp);
  1159. ext4_unlock_group(sb, group);
  1160. BUFFER_TRACE(group_desc_bh,
  1161. "call ext4_handle_dirty_metadata");
  1162. ret = ext4_handle_dirty_metadata(handle, NULL,
  1163. group_desc_bh);
  1164. err_out:
  1165. up_write(&grp->alloc_sem);
  1166. ext4_journal_stop(handle);
  1167. out:
  1168. return ret;
  1169. }