ialloc.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * linux/fs/ext4/ialloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * BSD ufs-inspired inode and directory allocation by
  10. * Stephen Tweedie (sct@redhat.com), 1993
  11. * Big-endian to little-endian byte-swapping/bitmaps by
  12. * David S. Miller (davem@caip.rutgers.edu), 1995
  13. */
  14. #include <linux/time.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/stat.h>
  18. #include <linux/string.h>
  19. #include <linux/quotaops.h>
  20. #include <linux/buffer_head.h>
  21. #include <linux/random.h>
  22. #include <linux/bitops.h>
  23. #include <linux/blkdev.h>
  24. #include <asm/byteorder.h>
  25. #include "ext4.h"
  26. #include "ext4_jbd2.h"
  27. #include "xattr.h"
  28. #include "acl.h"
  29. #include <trace/events/ext4.h>
  30. /*
  31. * ialloc.c contains the inodes allocation and deallocation routines
  32. */
  33. /*
  34. * The free inodes are managed by bitmaps. A file system contains several
  35. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  36. * block for inodes, N blocks for the inode table and data blocks.
  37. *
  38. * The file system contains group descriptors which are located after the
  39. * super block. Each descriptor contains the number of the bitmap block and
  40. * the free blocks count in the block.
  41. */
  42. /*
  43. * To avoid calling the atomic setbit hundreds or thousands of times, we only
  44. * need to use it within a single byte (to ensure we get endianness right).
  45. * We can use memset for the rest of the bitmap as there are no other users.
  46. */
  47. void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  48. {
  49. int i;
  50. if (start_bit >= end_bit)
  51. return;
  52. ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  53. for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  54. ext4_set_bit(i, bitmap);
  55. if (i < end_bit)
  56. memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  57. }
  58. /* Initializes an uninitialized inode bitmap */
  59. static unsigned ext4_init_inode_bitmap(struct super_block *sb,
  60. struct buffer_head *bh,
  61. ext4_group_t block_group,
  62. struct ext4_group_desc *gdp)
  63. {
  64. struct ext4_sb_info *sbi = EXT4_SB(sb);
  65. J_ASSERT_BH(bh, buffer_locked(bh));
  66. /* If checksum is bad mark all blocks and inodes use to prevent
  67. * allocation, essentially implementing a per-group read-only flag. */
  68. if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  69. ext4_error(sb, "Checksum bad for group %u", block_group);
  70. ext4_free_group_clusters_set(sb, gdp, 0);
  71. ext4_free_inodes_set(sb, gdp, 0);
  72. ext4_itable_unused_set(sb, gdp, 0);
  73. memset(bh->b_data, 0xff, sb->s_blocksize);
  74. return 0;
  75. }
  76. memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
  77. ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
  78. bh->b_data);
  79. return EXT4_INODES_PER_GROUP(sb);
  80. }
  81. void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
  82. {
  83. if (uptodate) {
  84. set_buffer_uptodate(bh);
  85. set_bitmap_uptodate(bh);
  86. }
  87. unlock_buffer(bh);
  88. put_bh(bh);
  89. }
  90. /*
  91. * Read the inode allocation bitmap for a given block_group, reading
  92. * into the specified slot in the superblock's bitmap cache.
  93. *
  94. * Return buffer_head of bitmap on success or NULL.
  95. */
  96. static struct buffer_head *
  97. ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
  98. {
  99. struct ext4_group_desc *desc;
  100. struct buffer_head *bh = NULL;
  101. ext4_fsblk_t bitmap_blk;
  102. desc = ext4_get_group_desc(sb, block_group, NULL);
  103. if (!desc)
  104. return NULL;
  105. bitmap_blk = ext4_inode_bitmap(sb, desc);
  106. bh = sb_getblk(sb, bitmap_blk);
  107. if (unlikely(!bh)) {
  108. ext4_error(sb, "Cannot read inode bitmap - "
  109. "block_group = %u, inode_bitmap = %llu",
  110. block_group, bitmap_blk);
  111. return NULL;
  112. }
  113. if (bitmap_uptodate(bh))
  114. return bh;
  115. lock_buffer(bh);
  116. if (bitmap_uptodate(bh)) {
  117. unlock_buffer(bh);
  118. return bh;
  119. }
  120. ext4_lock_group(sb, block_group);
  121. if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  122. ext4_init_inode_bitmap(sb, bh, block_group, desc);
  123. set_bitmap_uptodate(bh);
  124. set_buffer_uptodate(bh);
  125. ext4_unlock_group(sb, block_group);
  126. unlock_buffer(bh);
  127. return bh;
  128. }
  129. ext4_unlock_group(sb, block_group);
  130. if (buffer_uptodate(bh)) {
  131. /*
  132. * if not uninit if bh is uptodate,
  133. * bitmap is also uptodate
  134. */
  135. set_bitmap_uptodate(bh);
  136. unlock_buffer(bh);
  137. return bh;
  138. }
  139. /*
  140. * submit the buffer_head for reading
  141. */
  142. trace_ext4_load_inode_bitmap(sb, block_group);
  143. bh->b_end_io = ext4_end_bitmap_read;
  144. get_bh(bh);
  145. submit_bh(READ, bh);
  146. wait_on_buffer(bh);
  147. if (!buffer_uptodate(bh)) {
  148. put_bh(bh);
  149. ext4_error(sb, "Cannot read inode bitmap - "
  150. "block_group = %u, inode_bitmap = %llu",
  151. block_group, bitmap_blk);
  152. return NULL;
  153. }
  154. return bh;
  155. }
  156. /*
  157. * NOTE! When we get the inode, we're the only people
  158. * that have access to it, and as such there are no
  159. * race conditions we have to worry about. The inode
  160. * is not on the hash-lists, and it cannot be reached
  161. * through the filesystem because the directory entry
  162. * has been deleted earlier.
  163. *
  164. * HOWEVER: we must make sure that we get no aliases,
  165. * which means that we have to call "clear_inode()"
  166. * _before_ we mark the inode not in use in the inode
  167. * bitmaps. Otherwise a newly created file might use
  168. * the same inode number (not actually the same pointer
  169. * though), and then we'd have two inodes sharing the
  170. * same inode number and space on the harddisk.
  171. */
  172. void ext4_free_inode(handle_t *handle, struct inode *inode)
  173. {
  174. struct super_block *sb = inode->i_sb;
  175. int is_directory;
  176. unsigned long ino;
  177. struct buffer_head *bitmap_bh = NULL;
  178. struct buffer_head *bh2;
  179. ext4_group_t block_group;
  180. unsigned long bit;
  181. struct ext4_group_desc *gdp;
  182. struct ext4_super_block *es;
  183. struct ext4_sb_info *sbi;
  184. int fatal = 0, err, count, cleared;
  185. if (atomic_read(&inode->i_count) > 1) {
  186. printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
  187. atomic_read(&inode->i_count));
  188. return;
  189. }
  190. if (inode->i_nlink) {
  191. printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
  192. inode->i_nlink);
  193. return;
  194. }
  195. if (!sb) {
  196. printk(KERN_ERR "ext4_free_inode: inode on "
  197. "nonexistent device\n");
  198. return;
  199. }
  200. sbi = EXT4_SB(sb);
  201. ino = inode->i_ino;
  202. ext4_debug("freeing inode %lu\n", ino);
  203. trace_ext4_free_inode(inode);
  204. /*
  205. * Note: we must free any quota before locking the superblock,
  206. * as writing the quota to disk may need the lock as well.
  207. */
  208. dquot_initialize(inode);
  209. ext4_xattr_delete_inode(handle, inode);
  210. dquot_free_inode(inode);
  211. dquot_drop(inode);
  212. is_directory = S_ISDIR(inode->i_mode);
  213. /* Do this BEFORE marking the inode not in use or returning an error */
  214. ext4_clear_inode(inode);
  215. es = EXT4_SB(sb)->s_es;
  216. if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
  217. ext4_error(sb, "reserved or nonexistent inode %lu", ino);
  218. goto error_return;
  219. }
  220. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  221. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  222. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  223. if (!bitmap_bh)
  224. goto error_return;
  225. BUFFER_TRACE(bitmap_bh, "get_write_access");
  226. fatal = ext4_journal_get_write_access(handle, bitmap_bh);
  227. if (fatal)
  228. goto error_return;
  229. fatal = -ESRCH;
  230. gdp = ext4_get_group_desc(sb, block_group, &bh2);
  231. if (gdp) {
  232. BUFFER_TRACE(bh2, "get_write_access");
  233. fatal = ext4_journal_get_write_access(handle, bh2);
  234. }
  235. ext4_lock_group(sb, block_group);
  236. cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
  237. if (fatal || !cleared) {
  238. ext4_unlock_group(sb, block_group);
  239. goto out;
  240. }
  241. count = ext4_free_inodes_count(sb, gdp) + 1;
  242. ext4_free_inodes_set(sb, gdp, count);
  243. if (is_directory) {
  244. count = ext4_used_dirs_count(sb, gdp) - 1;
  245. ext4_used_dirs_set(sb, gdp, count);
  246. percpu_counter_dec(&sbi->s_dirs_counter);
  247. }
  248. gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
  249. ext4_unlock_group(sb, block_group);
  250. percpu_counter_inc(&sbi->s_freeinodes_counter);
  251. if (sbi->s_log_groups_per_flex) {
  252. ext4_group_t f = ext4_flex_group(sbi, block_group);
  253. atomic_inc(&sbi->s_flex_groups[f].free_inodes);
  254. if (is_directory)
  255. atomic_dec(&sbi->s_flex_groups[f].used_dirs);
  256. }
  257. BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
  258. fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
  259. out:
  260. if (cleared) {
  261. BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
  262. err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
  263. if (!fatal)
  264. fatal = err;
  265. ext4_mark_super_dirty(sb);
  266. } else
  267. ext4_error(sb, "bit already cleared for inode %lu", ino);
  268. error_return:
  269. brelse(bitmap_bh);
  270. ext4_std_error(sb, fatal);
  271. }
  272. struct orlov_stats {
  273. __u32 free_inodes;
  274. __u32 free_clusters;
  275. __u32 used_dirs;
  276. };
  277. /*
  278. * Helper function for Orlov's allocator; returns critical information
  279. * for a particular block group or flex_bg. If flex_size is 1, then g
  280. * is a block group number; otherwise it is flex_bg number.
  281. */
  282. static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
  283. int flex_size, struct orlov_stats *stats)
  284. {
  285. struct ext4_group_desc *desc;
  286. struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
  287. if (flex_size > 1) {
  288. stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
  289. stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
  290. stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
  291. return;
  292. }
  293. desc = ext4_get_group_desc(sb, g, NULL);
  294. if (desc) {
  295. stats->free_inodes = ext4_free_inodes_count(sb, desc);
  296. stats->free_clusters = ext4_free_group_clusters(sb, desc);
  297. stats->used_dirs = ext4_used_dirs_count(sb, desc);
  298. } else {
  299. stats->free_inodes = 0;
  300. stats->free_clusters = 0;
  301. stats->used_dirs = 0;
  302. }
  303. }
  304. /*
  305. * Orlov's allocator for directories.
  306. *
  307. * We always try to spread first-level directories.
  308. *
  309. * If there are blockgroups with both free inodes and free blocks counts
  310. * not worse than average we return one with smallest directory count.
  311. * Otherwise we simply return a random group.
  312. *
  313. * For the rest rules look so:
  314. *
  315. * It's OK to put directory into a group unless
  316. * it has too many directories already (max_dirs) or
  317. * it has too few free inodes left (min_inodes) or
  318. * it has too few free blocks left (min_blocks) or
  319. * Parent's group is preferred, if it doesn't satisfy these
  320. * conditions we search cyclically through the rest. If none
  321. * of the groups look good we just look for a group with more
  322. * free inodes than average (starting at parent's group).
  323. */
  324. static int find_group_orlov(struct super_block *sb, struct inode *parent,
  325. ext4_group_t *group, umode_t mode,
  326. const struct qstr *qstr)
  327. {
  328. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  329. struct ext4_sb_info *sbi = EXT4_SB(sb);
  330. ext4_group_t real_ngroups = ext4_get_groups_count(sb);
  331. int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
  332. unsigned int freei, avefreei, grp_free;
  333. ext4_fsblk_t freeb, avefreec;
  334. unsigned int ndirs;
  335. int max_dirs, min_inodes;
  336. ext4_grpblk_t min_clusters;
  337. ext4_group_t i, grp, g, ngroups;
  338. struct ext4_group_desc *desc;
  339. struct orlov_stats stats;
  340. int flex_size = ext4_flex_bg_size(sbi);
  341. struct dx_hash_info hinfo;
  342. ngroups = real_ngroups;
  343. if (flex_size > 1) {
  344. ngroups = (real_ngroups + flex_size - 1) >>
  345. sbi->s_log_groups_per_flex;
  346. parent_group >>= sbi->s_log_groups_per_flex;
  347. }
  348. freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
  349. avefreei = freei / ngroups;
  350. freeb = EXT4_C2B(sbi,
  351. percpu_counter_read_positive(&sbi->s_freeclusters_counter));
  352. avefreec = freeb;
  353. do_div(avefreec, ngroups);
  354. ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
  355. if (S_ISDIR(mode) &&
  356. ((parent == sb->s_root->d_inode) ||
  357. (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
  358. int best_ndir = inodes_per_group;
  359. int ret = -1;
  360. if (qstr) {
  361. hinfo.hash_version = DX_HASH_HALF_MD4;
  362. hinfo.seed = sbi->s_hash_seed;
  363. ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
  364. grp = hinfo.hash;
  365. } else
  366. get_random_bytes(&grp, sizeof(grp));
  367. parent_group = (unsigned)grp % ngroups;
  368. for (i = 0; i < ngroups; i++) {
  369. g = (parent_group + i) % ngroups;
  370. get_orlov_stats(sb, g, flex_size, &stats);
  371. if (!stats.free_inodes)
  372. continue;
  373. if (stats.used_dirs >= best_ndir)
  374. continue;
  375. if (stats.free_inodes < avefreei)
  376. continue;
  377. if (stats.free_clusters < avefreec)
  378. continue;
  379. grp = g;
  380. ret = 0;
  381. best_ndir = stats.used_dirs;
  382. }
  383. if (ret)
  384. goto fallback;
  385. found_flex_bg:
  386. if (flex_size == 1) {
  387. *group = grp;
  388. return 0;
  389. }
  390. /*
  391. * We pack inodes at the beginning of the flexgroup's
  392. * inode tables. Block allocation decisions will do
  393. * something similar, although regular files will
  394. * start at 2nd block group of the flexgroup. See
  395. * ext4_ext_find_goal() and ext4_find_near().
  396. */
  397. grp *= flex_size;
  398. for (i = 0; i < flex_size; i++) {
  399. if (grp+i >= real_ngroups)
  400. break;
  401. desc = ext4_get_group_desc(sb, grp+i, NULL);
  402. if (desc && ext4_free_inodes_count(sb, desc)) {
  403. *group = grp+i;
  404. return 0;
  405. }
  406. }
  407. goto fallback;
  408. }
  409. max_dirs = ndirs / ngroups + inodes_per_group / 16;
  410. min_inodes = avefreei - inodes_per_group*flex_size / 4;
  411. if (min_inodes < 1)
  412. min_inodes = 1;
  413. min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
  414. /*
  415. * Start looking in the flex group where we last allocated an
  416. * inode for this parent directory
  417. */
  418. if (EXT4_I(parent)->i_last_alloc_group != ~0) {
  419. parent_group = EXT4_I(parent)->i_last_alloc_group;
  420. if (flex_size > 1)
  421. parent_group >>= sbi->s_log_groups_per_flex;
  422. }
  423. for (i = 0; i < ngroups; i++) {
  424. grp = (parent_group + i) % ngroups;
  425. get_orlov_stats(sb, grp, flex_size, &stats);
  426. if (stats.used_dirs >= max_dirs)
  427. continue;
  428. if (stats.free_inodes < min_inodes)
  429. continue;
  430. if (stats.free_clusters < min_clusters)
  431. continue;
  432. goto found_flex_bg;
  433. }
  434. fallback:
  435. ngroups = real_ngroups;
  436. avefreei = freei / ngroups;
  437. fallback_retry:
  438. parent_group = EXT4_I(parent)->i_block_group;
  439. for (i = 0; i < ngroups; i++) {
  440. grp = (parent_group + i) % ngroups;
  441. desc = ext4_get_group_desc(sb, grp, NULL);
  442. grp_free = ext4_free_inodes_count(sb, desc);
  443. if (desc && grp_free && grp_free >= avefreei) {
  444. *group = grp;
  445. return 0;
  446. }
  447. }
  448. if (avefreei) {
  449. /*
  450. * The free-inodes counter is approximate, and for really small
  451. * filesystems the above test can fail to find any blockgroups
  452. */
  453. avefreei = 0;
  454. goto fallback_retry;
  455. }
  456. return -1;
  457. }
  458. static int find_group_other(struct super_block *sb, struct inode *parent,
  459. ext4_group_t *group, umode_t mode)
  460. {
  461. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  462. ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
  463. struct ext4_group_desc *desc;
  464. int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
  465. /*
  466. * Try to place the inode is the same flex group as its
  467. * parent. If we can't find space, use the Orlov algorithm to
  468. * find another flex group, and store that information in the
  469. * parent directory's inode information so that use that flex
  470. * group for future allocations.
  471. */
  472. if (flex_size > 1) {
  473. int retry = 0;
  474. try_again:
  475. parent_group &= ~(flex_size-1);
  476. last = parent_group + flex_size;
  477. if (last > ngroups)
  478. last = ngroups;
  479. for (i = parent_group; i < last; i++) {
  480. desc = ext4_get_group_desc(sb, i, NULL);
  481. if (desc && ext4_free_inodes_count(sb, desc)) {
  482. *group = i;
  483. return 0;
  484. }
  485. }
  486. if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
  487. retry = 1;
  488. parent_group = EXT4_I(parent)->i_last_alloc_group;
  489. goto try_again;
  490. }
  491. /*
  492. * If this didn't work, use the Orlov search algorithm
  493. * to find a new flex group; we pass in the mode to
  494. * avoid the topdir algorithms.
  495. */
  496. *group = parent_group + flex_size;
  497. if (*group > ngroups)
  498. *group = 0;
  499. return find_group_orlov(sb, parent, group, mode, NULL);
  500. }
  501. /*
  502. * Try to place the inode in its parent directory
  503. */
  504. *group = parent_group;
  505. desc = ext4_get_group_desc(sb, *group, NULL);
  506. if (desc && ext4_free_inodes_count(sb, desc) &&
  507. ext4_free_group_clusters(sb, desc))
  508. return 0;
  509. /*
  510. * We're going to place this inode in a different blockgroup from its
  511. * parent. We want to cause files in a common directory to all land in
  512. * the same blockgroup. But we want files which are in a different
  513. * directory which shares a blockgroup with our parent to land in a
  514. * different blockgroup.
  515. *
  516. * So add our directory's i_ino into the starting point for the hash.
  517. */
  518. *group = (*group + parent->i_ino) % ngroups;
  519. /*
  520. * Use a quadratic hash to find a group with a free inode and some free
  521. * blocks.
  522. */
  523. for (i = 1; i < ngroups; i <<= 1) {
  524. *group += i;
  525. if (*group >= ngroups)
  526. *group -= ngroups;
  527. desc = ext4_get_group_desc(sb, *group, NULL);
  528. if (desc && ext4_free_inodes_count(sb, desc) &&
  529. ext4_free_group_clusters(sb, desc))
  530. return 0;
  531. }
  532. /*
  533. * That failed: try linear search for a free inode, even if that group
  534. * has no free blocks.
  535. */
  536. *group = parent_group;
  537. for (i = 0; i < ngroups; i++) {
  538. if (++*group >= ngroups)
  539. *group = 0;
  540. desc = ext4_get_group_desc(sb, *group, NULL);
  541. if (desc && ext4_free_inodes_count(sb, desc))
  542. return 0;
  543. }
  544. return -1;
  545. }
  546. /*
  547. * There are two policies for allocating an inode. If the new inode is
  548. * a directory, then a forward search is made for a block group with both
  549. * free space and a low directory-to-inode ratio; if that fails, then of
  550. * the groups with above-average free space, that group with the fewest
  551. * directories already is chosen.
  552. *
  553. * For other inodes, search forward from the parent directory's block
  554. * group to find a free inode.
  555. */
  556. struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
  557. const struct qstr *qstr, __u32 goal, uid_t *owner)
  558. {
  559. struct super_block *sb;
  560. struct buffer_head *inode_bitmap_bh = NULL;
  561. struct buffer_head *group_desc_bh;
  562. ext4_group_t ngroups, group = 0;
  563. unsigned long ino = 0;
  564. struct inode *inode;
  565. struct ext4_group_desc *gdp = NULL;
  566. struct ext4_inode_info *ei;
  567. struct ext4_sb_info *sbi;
  568. int ret2, err = 0;
  569. struct inode *ret;
  570. ext4_group_t i;
  571. ext4_group_t flex_group;
  572. /* Cannot create files in a deleted directory */
  573. if (!dir || !dir->i_nlink)
  574. return ERR_PTR(-EPERM);
  575. sb = dir->i_sb;
  576. ngroups = ext4_get_groups_count(sb);
  577. trace_ext4_request_inode(dir, mode);
  578. inode = new_inode(sb);
  579. if (!inode)
  580. return ERR_PTR(-ENOMEM);
  581. ei = EXT4_I(inode);
  582. sbi = EXT4_SB(sb);
  583. if (!goal)
  584. goal = sbi->s_inode_goal;
  585. if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
  586. group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
  587. ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
  588. ret2 = 0;
  589. goto got_group;
  590. }
  591. if (S_ISDIR(mode))
  592. ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
  593. else
  594. ret2 = find_group_other(sb, dir, &group, mode);
  595. got_group:
  596. EXT4_I(dir)->i_last_alloc_group = group;
  597. err = -ENOSPC;
  598. if (ret2 == -1)
  599. goto out;
  600. /*
  601. * Normally we will only go through one pass of this loop,
  602. * unless we get unlucky and it turns out the group we selected
  603. * had its last inode grabbed by someone else.
  604. */
  605. for (i = 0; i < ngroups; i++, ino = 0) {
  606. err = -EIO;
  607. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  608. if (!gdp)
  609. goto fail;
  610. brelse(inode_bitmap_bh);
  611. inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
  612. if (!inode_bitmap_bh)
  613. goto fail;
  614. repeat_in_this_group:
  615. ino = ext4_find_next_zero_bit((unsigned long *)
  616. inode_bitmap_bh->b_data,
  617. EXT4_INODES_PER_GROUP(sb), ino);
  618. if (ino >= EXT4_INODES_PER_GROUP(sb)) {
  619. if (++group == ngroups)
  620. group = 0;
  621. continue;
  622. }
  623. if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
  624. ext4_error(sb, "reserved inode found cleared - "
  625. "inode=%lu", ino + 1);
  626. continue;
  627. }
  628. ext4_lock_group(sb, group);
  629. ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
  630. ext4_unlock_group(sb, group);
  631. ino++; /* the inode bitmap is zero-based */
  632. if (!ret2)
  633. goto got; /* we grabbed the inode! */
  634. if (ino < EXT4_INODES_PER_GROUP(sb))
  635. goto repeat_in_this_group;
  636. }
  637. err = -ENOSPC;
  638. goto out;
  639. got:
  640. /* We may have to initialize the block bitmap if it isn't already */
  641. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
  642. gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  643. struct buffer_head *block_bitmap_bh;
  644. block_bitmap_bh = ext4_read_block_bitmap(sb, group);
  645. BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
  646. err = ext4_journal_get_write_access(handle, block_bitmap_bh);
  647. if (err) {
  648. brelse(block_bitmap_bh);
  649. goto fail;
  650. }
  651. BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
  652. err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
  653. brelse(block_bitmap_bh);
  654. /* recheck and clear flag under lock if we still need to */
  655. ext4_lock_group(sb, group);
  656. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  657. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  658. ext4_free_group_clusters_set(sb, gdp,
  659. ext4_free_clusters_after_init(sb, group, gdp));
  660. gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
  661. gdp);
  662. }
  663. ext4_unlock_group(sb, group);
  664. if (err)
  665. goto fail;
  666. }
  667. BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
  668. err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
  669. if (err)
  670. goto fail;
  671. BUFFER_TRACE(group_desc_bh, "get_write_access");
  672. err = ext4_journal_get_write_access(handle, group_desc_bh);
  673. if (err)
  674. goto fail;
  675. /* Update the relevant bg descriptor fields */
  676. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
  677. int free;
  678. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  679. down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
  680. ext4_lock_group(sb, group); /* while we modify the bg desc */
  681. free = EXT4_INODES_PER_GROUP(sb) -
  682. ext4_itable_unused_count(sb, gdp);
  683. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  684. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
  685. free = 0;
  686. }
  687. /*
  688. * Check the relative inode number against the last used
  689. * relative inode number in this group. if it is greater
  690. * we need to update the bg_itable_unused count
  691. */
  692. if (ino > free)
  693. ext4_itable_unused_set(sb, gdp,
  694. (EXT4_INODES_PER_GROUP(sb) - ino));
  695. up_read(&grp->alloc_sem);
  696. }
  697. ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
  698. if (S_ISDIR(mode)) {
  699. ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
  700. if (sbi->s_log_groups_per_flex) {
  701. ext4_group_t f = ext4_flex_group(sbi, group);
  702. atomic_inc(&sbi->s_flex_groups[f].used_dirs);
  703. }
  704. }
  705. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
  706. gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
  707. ext4_unlock_group(sb, group);
  708. }
  709. BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
  710. err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
  711. if (err)
  712. goto fail;
  713. BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
  714. err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
  715. if (err)
  716. goto fail;
  717. percpu_counter_dec(&sbi->s_freeinodes_counter);
  718. if (S_ISDIR(mode))
  719. percpu_counter_inc(&sbi->s_dirs_counter);
  720. ext4_mark_super_dirty(sb);
  721. if (sbi->s_log_groups_per_flex) {
  722. flex_group = ext4_flex_group(sbi, group);
  723. atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
  724. }
  725. if (owner) {
  726. inode->i_mode = mode;
  727. inode->i_uid = owner[0];
  728. inode->i_gid = owner[1];
  729. } else if (test_opt(sb, GRPID)) {
  730. inode->i_mode = mode;
  731. inode->i_uid = current_fsuid();
  732. inode->i_gid = dir->i_gid;
  733. } else
  734. inode_init_owner(inode, dir, mode);
  735. inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
  736. /* This is the optimal IO size (for stat), not the fs block size */
  737. inode->i_blocks = 0;
  738. inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
  739. ext4_current_time(inode);
  740. memset(ei->i_data, 0, sizeof(ei->i_data));
  741. ei->i_dir_start_lookup = 0;
  742. ei->i_disksize = 0;
  743. /* Don't inherit extent flag from directory, amongst others. */
  744. ei->i_flags =
  745. ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
  746. ei->i_file_acl = 0;
  747. ei->i_dtime = 0;
  748. ei->i_block_group = group;
  749. ei->i_last_alloc_group = ~0;
  750. ext4_set_inode_flags(inode);
  751. if (IS_DIRSYNC(inode))
  752. ext4_handle_sync(handle);
  753. if (insert_inode_locked(inode) < 0) {
  754. /*
  755. * Likely a bitmap corruption causing inode to be allocated
  756. * twice.
  757. */
  758. err = -EIO;
  759. goto fail;
  760. }
  761. spin_lock(&sbi->s_next_gen_lock);
  762. inode->i_generation = sbi->s_next_generation++;
  763. spin_unlock(&sbi->s_next_gen_lock);
  764. ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
  765. ext4_set_inode_state(inode, EXT4_STATE_NEW);
  766. ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
  767. ret = inode;
  768. dquot_initialize(inode);
  769. err = dquot_alloc_inode(inode);
  770. if (err)
  771. goto fail_drop;
  772. err = ext4_init_acl(handle, inode, dir);
  773. if (err)
  774. goto fail_free_drop;
  775. err = ext4_init_security(handle, inode, dir, qstr);
  776. if (err)
  777. goto fail_free_drop;
  778. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  779. /* set extent flag only for directory, file and normal symlink*/
  780. if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
  781. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  782. ext4_ext_tree_init(handle, inode);
  783. }
  784. }
  785. if (ext4_handle_valid(handle)) {
  786. ei->i_sync_tid = handle->h_transaction->t_tid;
  787. ei->i_datasync_tid = handle->h_transaction->t_tid;
  788. }
  789. err = ext4_mark_inode_dirty(handle, inode);
  790. if (err) {
  791. ext4_std_error(sb, err);
  792. goto fail_free_drop;
  793. }
  794. ext4_debug("allocating inode %lu\n", inode->i_ino);
  795. trace_ext4_allocate_inode(inode, dir, mode);
  796. goto really_out;
  797. fail:
  798. ext4_std_error(sb, err);
  799. out:
  800. iput(inode);
  801. ret = ERR_PTR(err);
  802. really_out:
  803. brelse(inode_bitmap_bh);
  804. return ret;
  805. fail_free_drop:
  806. dquot_free_inode(inode);
  807. fail_drop:
  808. dquot_drop(inode);
  809. inode->i_flags |= S_NOQUOTA;
  810. clear_nlink(inode);
  811. unlock_new_inode(inode);
  812. iput(inode);
  813. brelse(inode_bitmap_bh);
  814. return ERR_PTR(err);
  815. }
  816. /* Verify that we are loading a valid orphan from disk */
  817. struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
  818. {
  819. unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
  820. ext4_group_t block_group;
  821. int bit;
  822. struct buffer_head *bitmap_bh;
  823. struct inode *inode = NULL;
  824. long err = -EIO;
  825. /* Error cases - e2fsck has already cleaned up for us */
  826. if (ino > max_ino) {
  827. ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
  828. goto error;
  829. }
  830. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  831. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  832. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  833. if (!bitmap_bh) {
  834. ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
  835. goto error;
  836. }
  837. /* Having the inode bit set should be a 100% indicator that this
  838. * is a valid orphan (no e2fsck run on fs). Orphans also include
  839. * inodes that were being truncated, so we can't check i_nlink==0.
  840. */
  841. if (!ext4_test_bit(bit, bitmap_bh->b_data))
  842. goto bad_orphan;
  843. inode = ext4_iget(sb, ino);
  844. if (IS_ERR(inode))
  845. goto iget_failed;
  846. /*
  847. * If the orphans has i_nlinks > 0 then it should be able to be
  848. * truncated, otherwise it won't be removed from the orphan list
  849. * during processing and an infinite loop will result.
  850. */
  851. if (inode->i_nlink && !ext4_can_truncate(inode))
  852. goto bad_orphan;
  853. if (NEXT_ORPHAN(inode) > max_ino)
  854. goto bad_orphan;
  855. brelse(bitmap_bh);
  856. return inode;
  857. iget_failed:
  858. err = PTR_ERR(inode);
  859. inode = NULL;
  860. bad_orphan:
  861. ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
  862. printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
  863. bit, (unsigned long long)bitmap_bh->b_blocknr,
  864. ext4_test_bit(bit, bitmap_bh->b_data));
  865. printk(KERN_NOTICE "inode=%p\n", inode);
  866. if (inode) {
  867. printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
  868. is_bad_inode(inode));
  869. printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
  870. NEXT_ORPHAN(inode));
  871. printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
  872. printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
  873. /* Avoid freeing blocks if we got a bad deleted inode */
  874. if (inode->i_nlink == 0)
  875. inode->i_blocks = 0;
  876. iput(inode);
  877. }
  878. brelse(bitmap_bh);
  879. error:
  880. return ERR_PTR(err);
  881. }
  882. unsigned long ext4_count_free_inodes(struct super_block *sb)
  883. {
  884. unsigned long desc_count;
  885. struct ext4_group_desc *gdp;
  886. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  887. #ifdef EXT4FS_DEBUG
  888. struct ext4_super_block *es;
  889. unsigned long bitmap_count, x;
  890. struct buffer_head *bitmap_bh = NULL;
  891. es = EXT4_SB(sb)->s_es;
  892. desc_count = 0;
  893. bitmap_count = 0;
  894. gdp = NULL;
  895. for (i = 0; i < ngroups; i++) {
  896. gdp = ext4_get_group_desc(sb, i, NULL);
  897. if (!gdp)
  898. continue;
  899. desc_count += ext4_free_inodes_count(sb, gdp);
  900. brelse(bitmap_bh);
  901. bitmap_bh = ext4_read_inode_bitmap(sb, i);
  902. if (!bitmap_bh)
  903. continue;
  904. x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
  905. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  906. (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
  907. bitmap_count += x;
  908. }
  909. brelse(bitmap_bh);
  910. printk(KERN_DEBUG "ext4_count_free_inodes: "
  911. "stored = %u, computed = %lu, %lu\n",
  912. le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
  913. return desc_count;
  914. #else
  915. desc_count = 0;
  916. for (i = 0; i < ngroups; i++) {
  917. gdp = ext4_get_group_desc(sb, i, NULL);
  918. if (!gdp)
  919. continue;
  920. desc_count += ext4_free_inodes_count(sb, gdp);
  921. cond_resched();
  922. }
  923. return desc_count;
  924. #endif
  925. }
  926. /* Called at mount-time, super-block is locked */
  927. unsigned long ext4_count_dirs(struct super_block * sb)
  928. {
  929. unsigned long count = 0;
  930. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  931. for (i = 0; i < ngroups; i++) {
  932. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  933. if (!gdp)
  934. continue;
  935. count += ext4_used_dirs_count(sb, gdp);
  936. }
  937. return count;
  938. }
  939. /*
  940. * Zeroes not yet zeroed inode table - just write zeroes through the whole
  941. * inode table. Must be called without any spinlock held. The only place
  942. * where it is called from on active part of filesystem is ext4lazyinit
  943. * thread, so we do not need any special locks, however we have to prevent
  944. * inode allocation from the current group, so we take alloc_sem lock, to
  945. * block ext4_new_inode() until we are finished.
  946. */
  947. int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
  948. int barrier)
  949. {
  950. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  951. struct ext4_sb_info *sbi = EXT4_SB(sb);
  952. struct ext4_group_desc *gdp = NULL;
  953. struct buffer_head *group_desc_bh;
  954. handle_t *handle;
  955. ext4_fsblk_t blk;
  956. int num, ret = 0, used_blks = 0;
  957. /* This should not happen, but just to be sure check this */
  958. if (sb->s_flags & MS_RDONLY) {
  959. ret = 1;
  960. goto out;
  961. }
  962. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  963. if (!gdp)
  964. goto out;
  965. /*
  966. * We do not need to lock this, because we are the only one
  967. * handling this flag.
  968. */
  969. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
  970. goto out;
  971. handle = ext4_journal_start_sb(sb, 1);
  972. if (IS_ERR(handle)) {
  973. ret = PTR_ERR(handle);
  974. goto out;
  975. }
  976. down_write(&grp->alloc_sem);
  977. /*
  978. * If inode bitmap was already initialized there may be some
  979. * used inodes so we need to skip blocks with used inodes in
  980. * inode table.
  981. */
  982. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
  983. used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
  984. ext4_itable_unused_count(sb, gdp)),
  985. sbi->s_inodes_per_block);
  986. if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
  987. ext4_error(sb, "Something is wrong with group %u\n"
  988. "Used itable blocks: %d"
  989. "itable unused count: %u\n",
  990. group, used_blks,
  991. ext4_itable_unused_count(sb, gdp));
  992. ret = 1;
  993. goto err_out;
  994. }
  995. blk = ext4_inode_table(sb, gdp) + used_blks;
  996. num = sbi->s_itb_per_group - used_blks;
  997. BUFFER_TRACE(group_desc_bh, "get_write_access");
  998. ret = ext4_journal_get_write_access(handle,
  999. group_desc_bh);
  1000. if (ret)
  1001. goto err_out;
  1002. /*
  1003. * Skip zeroout if the inode table is full. But we set the ZEROED
  1004. * flag anyway, because obviously, when it is full it does not need
  1005. * further zeroing.
  1006. */
  1007. if (unlikely(num == 0))
  1008. goto skip_zeroout;
  1009. ext4_debug("going to zero out inode table in group %d\n",
  1010. group);
  1011. ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
  1012. if (ret < 0)
  1013. goto err_out;
  1014. if (barrier)
  1015. blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
  1016. skip_zeroout:
  1017. ext4_lock_group(sb, group);
  1018. gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
  1019. gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
  1020. ext4_unlock_group(sb, group);
  1021. BUFFER_TRACE(group_desc_bh,
  1022. "call ext4_handle_dirty_metadata");
  1023. ret = ext4_handle_dirty_metadata(handle, NULL,
  1024. group_desc_bh);
  1025. err_out:
  1026. up_write(&grp->alloc_sem);
  1027. ext4_journal_stop(handle);
  1028. out:
  1029. return ret;
  1030. }