ialloc.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156
  1. /*
  2. * linux/fs/ext4/ialloc.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * BSD ufs-inspired inode and directory allocation by
  10. * Stephen Tweedie (sct@redhat.com), 1993
  11. * Big-endian to little-endian byte-swapping/bitmaps by
  12. * David S. Miller (davem@caip.rutgers.edu), 1995
  13. */
  14. #include <linux/time.h>
  15. #include <linux/fs.h>
  16. #include <linux/jbd2.h>
  17. #include <linux/stat.h>
  18. #include <linux/string.h>
  19. #include <linux/quotaops.h>
  20. #include <linux/buffer_head.h>
  21. #include <linux/random.h>
  22. #include <linux/bitops.h>
  23. #include <linux/blkdev.h>
  24. #include <asm/byteorder.h>
  25. #include "ext4.h"
  26. #include "ext4_jbd2.h"
  27. #include "xattr.h"
  28. #include "acl.h"
  29. #include <trace/events/ext4.h>
  30. /*
  31. * ialloc.c contains the inodes allocation and deallocation routines
  32. */
  33. /*
  34. * The free inodes are managed by bitmaps. A file system contains several
  35. * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
  36. * block for inodes, N blocks for the inode table and data blocks.
  37. *
  38. * The file system contains group descriptors which are located after the
  39. * super block. Each descriptor contains the number of the bitmap block and
  40. * the free blocks count in the block.
  41. */
  42. /*
  43. * To avoid calling the atomic setbit hundreds or thousands of times, we only
  44. * need to use it within a single byte (to ensure we get endianness right).
  45. * We can use memset for the rest of the bitmap as there are no other users.
  46. */
  47. void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
  48. {
  49. int i;
  50. if (start_bit >= end_bit)
  51. return;
  52. ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
  53. for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
  54. ext4_set_bit(i, bitmap);
  55. if (i < end_bit)
  56. memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
  57. }
  58. /* Initializes an uninitialized inode bitmap */
  59. static unsigned ext4_init_inode_bitmap(struct super_block *sb,
  60. struct buffer_head *bh,
  61. ext4_group_t block_group,
  62. struct ext4_group_desc *gdp)
  63. {
  64. struct ext4_sb_info *sbi = EXT4_SB(sb);
  65. J_ASSERT_BH(bh, buffer_locked(bh));
  66. /* If checksum is bad mark all blocks and inodes use to prevent
  67. * allocation, essentially implementing a per-group read-only flag. */
  68. if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
  69. ext4_error(sb, "Checksum bad for group %u", block_group);
  70. ext4_free_group_clusters_set(sb, gdp, 0);
  71. ext4_free_inodes_set(sb, gdp, 0);
  72. ext4_itable_unused_set(sb, gdp, 0);
  73. memset(bh->b_data, 0xff, sb->s_blocksize);
  74. return 0;
  75. }
  76. memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
  77. ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
  78. bh->b_data);
  79. return EXT4_INODES_PER_GROUP(sb);
  80. }
  81. void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
  82. {
  83. if (uptodate) {
  84. set_buffer_uptodate(bh);
  85. set_bitmap_uptodate(bh);
  86. }
  87. unlock_buffer(bh);
  88. put_bh(bh);
  89. }
  90. /*
  91. * Read the inode allocation bitmap for a given block_group, reading
  92. * into the specified slot in the superblock's bitmap cache.
  93. *
  94. * Return buffer_head of bitmap on success or NULL.
  95. */
  96. static struct buffer_head *
  97. ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
  98. {
  99. struct ext4_group_desc *desc;
  100. struct buffer_head *bh = NULL;
  101. ext4_fsblk_t bitmap_blk;
  102. desc = ext4_get_group_desc(sb, block_group, NULL);
  103. if (!desc)
  104. return NULL;
  105. bitmap_blk = ext4_inode_bitmap(sb, desc);
  106. bh = sb_getblk(sb, bitmap_blk);
  107. if (unlikely(!bh)) {
  108. ext4_error(sb, "Cannot read inode bitmap - "
  109. "block_group = %u, inode_bitmap = %llu",
  110. block_group, bitmap_blk);
  111. return NULL;
  112. }
  113. if (bitmap_uptodate(bh))
  114. return bh;
  115. lock_buffer(bh);
  116. if (bitmap_uptodate(bh)) {
  117. unlock_buffer(bh);
  118. return bh;
  119. }
  120. ext4_lock_group(sb, block_group);
  121. if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  122. ext4_init_inode_bitmap(sb, bh, block_group, desc);
  123. set_bitmap_uptodate(bh);
  124. set_buffer_uptodate(bh);
  125. ext4_unlock_group(sb, block_group);
  126. unlock_buffer(bh);
  127. return bh;
  128. }
  129. ext4_unlock_group(sb, block_group);
  130. if (buffer_uptodate(bh)) {
  131. /*
  132. * if not uninit if bh is uptodate,
  133. * bitmap is also uptodate
  134. */
  135. set_bitmap_uptodate(bh);
  136. unlock_buffer(bh);
  137. return bh;
  138. }
  139. /*
  140. * submit the buffer_head for reading
  141. */
  142. trace_ext4_load_inode_bitmap(sb, block_group);
  143. bh->b_end_io = ext4_end_bitmap_read;
  144. get_bh(bh);
  145. submit_bh(READ, bh);
  146. wait_on_buffer(bh);
  147. if (!buffer_uptodate(bh)) {
  148. put_bh(bh);
  149. ext4_error(sb, "Cannot read inode bitmap - "
  150. "block_group = %u, inode_bitmap = %llu",
  151. block_group, bitmap_blk);
  152. return NULL;
  153. }
  154. return bh;
  155. }
  156. /*
  157. * NOTE! When we get the inode, we're the only people
  158. * that have access to it, and as such there are no
  159. * race conditions we have to worry about. The inode
  160. * is not on the hash-lists, and it cannot be reached
  161. * through the filesystem because the directory entry
  162. * has been deleted earlier.
  163. *
  164. * HOWEVER: we must make sure that we get no aliases,
  165. * which means that we have to call "clear_inode()"
  166. * _before_ we mark the inode not in use in the inode
  167. * bitmaps. Otherwise a newly created file might use
  168. * the same inode number (not actually the same pointer
  169. * though), and then we'd have two inodes sharing the
  170. * same inode number and space on the harddisk.
  171. */
  172. void ext4_free_inode(handle_t *handle, struct inode *inode)
  173. {
  174. struct super_block *sb = inode->i_sb;
  175. int is_directory;
  176. unsigned long ino;
  177. struct buffer_head *bitmap_bh = NULL;
  178. struct buffer_head *bh2;
  179. ext4_group_t block_group;
  180. unsigned long bit;
  181. struct ext4_group_desc *gdp;
  182. struct ext4_super_block *es;
  183. struct ext4_sb_info *sbi;
  184. int fatal = 0, err, count, cleared;
  185. if (!sb) {
  186. printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
  187. "nonexistent device\n", __func__, __LINE__);
  188. return;
  189. }
  190. if (atomic_read(&inode->i_count) > 1) {
  191. ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
  192. __func__, __LINE__, inode->i_ino,
  193. atomic_read(&inode->i_count));
  194. return;
  195. }
  196. if (inode->i_nlink) {
  197. ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
  198. __func__, __LINE__, inode->i_ino, inode->i_nlink);
  199. return;
  200. }
  201. sbi = EXT4_SB(sb);
  202. ino = inode->i_ino;
  203. ext4_debug("freeing inode %lu\n", ino);
  204. trace_ext4_free_inode(inode);
  205. /*
  206. * Note: we must free any quota before locking the superblock,
  207. * as writing the quota to disk may need the lock as well.
  208. */
  209. dquot_initialize(inode);
  210. ext4_xattr_delete_inode(handle, inode);
  211. dquot_free_inode(inode);
  212. dquot_drop(inode);
  213. is_directory = S_ISDIR(inode->i_mode);
  214. /* Do this BEFORE marking the inode not in use or returning an error */
  215. ext4_clear_inode(inode);
  216. es = EXT4_SB(sb)->s_es;
  217. if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
  218. ext4_error(sb, "reserved or nonexistent inode %lu", ino);
  219. goto error_return;
  220. }
  221. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  222. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  223. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  224. if (!bitmap_bh)
  225. goto error_return;
  226. BUFFER_TRACE(bitmap_bh, "get_write_access");
  227. fatal = ext4_journal_get_write_access(handle, bitmap_bh);
  228. if (fatal)
  229. goto error_return;
  230. fatal = -ESRCH;
  231. gdp = ext4_get_group_desc(sb, block_group, &bh2);
  232. if (gdp) {
  233. BUFFER_TRACE(bh2, "get_write_access");
  234. fatal = ext4_journal_get_write_access(handle, bh2);
  235. }
  236. ext4_lock_group(sb, block_group);
  237. cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
  238. if (fatal || !cleared) {
  239. ext4_unlock_group(sb, block_group);
  240. goto out;
  241. }
  242. count = ext4_free_inodes_count(sb, gdp) + 1;
  243. ext4_free_inodes_set(sb, gdp, count);
  244. if (is_directory) {
  245. count = ext4_used_dirs_count(sb, gdp) - 1;
  246. ext4_used_dirs_set(sb, gdp, count);
  247. percpu_counter_dec(&sbi->s_dirs_counter);
  248. }
  249. gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
  250. ext4_unlock_group(sb, block_group);
  251. percpu_counter_inc(&sbi->s_freeinodes_counter);
  252. if (sbi->s_log_groups_per_flex) {
  253. ext4_group_t f = ext4_flex_group(sbi, block_group);
  254. atomic_inc(&sbi->s_flex_groups[f].free_inodes);
  255. if (is_directory)
  256. atomic_dec(&sbi->s_flex_groups[f].used_dirs);
  257. }
  258. BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
  259. fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
  260. out:
  261. if (cleared) {
  262. BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
  263. err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
  264. if (!fatal)
  265. fatal = err;
  266. ext4_mark_super_dirty(sb);
  267. } else
  268. ext4_error(sb, "bit already cleared for inode %lu", ino);
  269. error_return:
  270. brelse(bitmap_bh);
  271. ext4_std_error(sb, fatal);
  272. }
  273. struct orlov_stats {
  274. __u32 free_inodes;
  275. __u32 free_clusters;
  276. __u32 used_dirs;
  277. };
  278. /*
  279. * Helper function for Orlov's allocator; returns critical information
  280. * for a particular block group or flex_bg. If flex_size is 1, then g
  281. * is a block group number; otherwise it is flex_bg number.
  282. */
  283. static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
  284. int flex_size, struct orlov_stats *stats)
  285. {
  286. struct ext4_group_desc *desc;
  287. struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
  288. if (flex_size > 1) {
  289. stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
  290. stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
  291. stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
  292. return;
  293. }
  294. desc = ext4_get_group_desc(sb, g, NULL);
  295. if (desc) {
  296. stats->free_inodes = ext4_free_inodes_count(sb, desc);
  297. stats->free_clusters = ext4_free_group_clusters(sb, desc);
  298. stats->used_dirs = ext4_used_dirs_count(sb, desc);
  299. } else {
  300. stats->free_inodes = 0;
  301. stats->free_clusters = 0;
  302. stats->used_dirs = 0;
  303. }
  304. }
  305. /*
  306. * Orlov's allocator for directories.
  307. *
  308. * We always try to spread first-level directories.
  309. *
  310. * If there are blockgroups with both free inodes and free blocks counts
  311. * not worse than average we return one with smallest directory count.
  312. * Otherwise we simply return a random group.
  313. *
  314. * For the rest rules look so:
  315. *
  316. * It's OK to put directory into a group unless
  317. * it has too many directories already (max_dirs) or
  318. * it has too few free inodes left (min_inodes) or
  319. * it has too few free blocks left (min_blocks) or
  320. * Parent's group is preferred, if it doesn't satisfy these
  321. * conditions we search cyclically through the rest. If none
  322. * of the groups look good we just look for a group with more
  323. * free inodes than average (starting at parent's group).
  324. */
  325. static int find_group_orlov(struct super_block *sb, struct inode *parent,
  326. ext4_group_t *group, umode_t mode,
  327. const struct qstr *qstr)
  328. {
  329. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  330. struct ext4_sb_info *sbi = EXT4_SB(sb);
  331. ext4_group_t real_ngroups = ext4_get_groups_count(sb);
  332. int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
  333. unsigned int freei, avefreei, grp_free;
  334. ext4_fsblk_t freeb, avefreec;
  335. unsigned int ndirs;
  336. int max_dirs, min_inodes;
  337. ext4_grpblk_t min_clusters;
  338. ext4_group_t i, grp, g, ngroups;
  339. struct ext4_group_desc *desc;
  340. struct orlov_stats stats;
  341. int flex_size = ext4_flex_bg_size(sbi);
  342. struct dx_hash_info hinfo;
  343. ngroups = real_ngroups;
  344. if (flex_size > 1) {
  345. ngroups = (real_ngroups + flex_size - 1) >>
  346. sbi->s_log_groups_per_flex;
  347. parent_group >>= sbi->s_log_groups_per_flex;
  348. }
  349. freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
  350. avefreei = freei / ngroups;
  351. freeb = EXT4_C2B(sbi,
  352. percpu_counter_read_positive(&sbi->s_freeclusters_counter));
  353. avefreec = freeb;
  354. do_div(avefreec, ngroups);
  355. ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
  356. if (S_ISDIR(mode) &&
  357. ((parent == sb->s_root->d_inode) ||
  358. (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
  359. int best_ndir = inodes_per_group;
  360. int ret = -1;
  361. if (qstr) {
  362. hinfo.hash_version = DX_HASH_HALF_MD4;
  363. hinfo.seed = sbi->s_hash_seed;
  364. ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
  365. grp = hinfo.hash;
  366. } else
  367. get_random_bytes(&grp, sizeof(grp));
  368. parent_group = (unsigned)grp % ngroups;
  369. for (i = 0; i < ngroups; i++) {
  370. g = (parent_group + i) % ngroups;
  371. get_orlov_stats(sb, g, flex_size, &stats);
  372. if (!stats.free_inodes)
  373. continue;
  374. if (stats.used_dirs >= best_ndir)
  375. continue;
  376. if (stats.free_inodes < avefreei)
  377. continue;
  378. if (stats.free_clusters < avefreec)
  379. continue;
  380. grp = g;
  381. ret = 0;
  382. best_ndir = stats.used_dirs;
  383. }
  384. if (ret)
  385. goto fallback;
  386. found_flex_bg:
  387. if (flex_size == 1) {
  388. *group = grp;
  389. return 0;
  390. }
  391. /*
  392. * We pack inodes at the beginning of the flexgroup's
  393. * inode tables. Block allocation decisions will do
  394. * something similar, although regular files will
  395. * start at 2nd block group of the flexgroup. See
  396. * ext4_ext_find_goal() and ext4_find_near().
  397. */
  398. grp *= flex_size;
  399. for (i = 0; i < flex_size; i++) {
  400. if (grp+i >= real_ngroups)
  401. break;
  402. desc = ext4_get_group_desc(sb, grp+i, NULL);
  403. if (desc && ext4_free_inodes_count(sb, desc)) {
  404. *group = grp+i;
  405. return 0;
  406. }
  407. }
  408. goto fallback;
  409. }
  410. max_dirs = ndirs / ngroups + inodes_per_group / 16;
  411. min_inodes = avefreei - inodes_per_group*flex_size / 4;
  412. if (min_inodes < 1)
  413. min_inodes = 1;
  414. min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
  415. /*
  416. * Start looking in the flex group where we last allocated an
  417. * inode for this parent directory
  418. */
  419. if (EXT4_I(parent)->i_last_alloc_group != ~0) {
  420. parent_group = EXT4_I(parent)->i_last_alloc_group;
  421. if (flex_size > 1)
  422. parent_group >>= sbi->s_log_groups_per_flex;
  423. }
  424. for (i = 0; i < ngroups; i++) {
  425. grp = (parent_group + i) % ngroups;
  426. get_orlov_stats(sb, grp, flex_size, &stats);
  427. if (stats.used_dirs >= max_dirs)
  428. continue;
  429. if (stats.free_inodes < min_inodes)
  430. continue;
  431. if (stats.free_clusters < min_clusters)
  432. continue;
  433. goto found_flex_bg;
  434. }
  435. fallback:
  436. ngroups = real_ngroups;
  437. avefreei = freei / ngroups;
  438. fallback_retry:
  439. parent_group = EXT4_I(parent)->i_block_group;
  440. for (i = 0; i < ngroups; i++) {
  441. grp = (parent_group + i) % ngroups;
  442. desc = ext4_get_group_desc(sb, grp, NULL);
  443. grp_free = ext4_free_inodes_count(sb, desc);
  444. if (desc && grp_free && grp_free >= avefreei) {
  445. *group = grp;
  446. return 0;
  447. }
  448. }
  449. if (avefreei) {
  450. /*
  451. * The free-inodes counter is approximate, and for really small
  452. * filesystems the above test can fail to find any blockgroups
  453. */
  454. avefreei = 0;
  455. goto fallback_retry;
  456. }
  457. return -1;
  458. }
  459. static int find_group_other(struct super_block *sb, struct inode *parent,
  460. ext4_group_t *group, umode_t mode)
  461. {
  462. ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
  463. ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
  464. struct ext4_group_desc *desc;
  465. int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
  466. /*
  467. * Try to place the inode is the same flex group as its
  468. * parent. If we can't find space, use the Orlov algorithm to
  469. * find another flex group, and store that information in the
  470. * parent directory's inode information so that use that flex
  471. * group for future allocations.
  472. */
  473. if (flex_size > 1) {
  474. int retry = 0;
  475. try_again:
  476. parent_group &= ~(flex_size-1);
  477. last = parent_group + flex_size;
  478. if (last > ngroups)
  479. last = ngroups;
  480. for (i = parent_group; i < last; i++) {
  481. desc = ext4_get_group_desc(sb, i, NULL);
  482. if (desc && ext4_free_inodes_count(sb, desc)) {
  483. *group = i;
  484. return 0;
  485. }
  486. }
  487. if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
  488. retry = 1;
  489. parent_group = EXT4_I(parent)->i_last_alloc_group;
  490. goto try_again;
  491. }
  492. /*
  493. * If this didn't work, use the Orlov search algorithm
  494. * to find a new flex group; we pass in the mode to
  495. * avoid the topdir algorithms.
  496. */
  497. *group = parent_group + flex_size;
  498. if (*group > ngroups)
  499. *group = 0;
  500. return find_group_orlov(sb, parent, group, mode, NULL);
  501. }
  502. /*
  503. * Try to place the inode in its parent directory
  504. */
  505. *group = parent_group;
  506. desc = ext4_get_group_desc(sb, *group, NULL);
  507. if (desc && ext4_free_inodes_count(sb, desc) &&
  508. ext4_free_group_clusters(sb, desc))
  509. return 0;
  510. /*
  511. * We're going to place this inode in a different blockgroup from its
  512. * parent. We want to cause files in a common directory to all land in
  513. * the same blockgroup. But we want files which are in a different
  514. * directory which shares a blockgroup with our parent to land in a
  515. * different blockgroup.
  516. *
  517. * So add our directory's i_ino into the starting point for the hash.
  518. */
  519. *group = (*group + parent->i_ino) % ngroups;
  520. /*
  521. * Use a quadratic hash to find a group with a free inode and some free
  522. * blocks.
  523. */
  524. for (i = 1; i < ngroups; i <<= 1) {
  525. *group += i;
  526. if (*group >= ngroups)
  527. *group -= ngroups;
  528. desc = ext4_get_group_desc(sb, *group, NULL);
  529. if (desc && ext4_free_inodes_count(sb, desc) &&
  530. ext4_free_group_clusters(sb, desc))
  531. return 0;
  532. }
  533. /*
  534. * That failed: try linear search for a free inode, even if that group
  535. * has no free blocks.
  536. */
  537. *group = parent_group;
  538. for (i = 0; i < ngroups; i++) {
  539. if (++*group >= ngroups)
  540. *group = 0;
  541. desc = ext4_get_group_desc(sb, *group, NULL);
  542. if (desc && ext4_free_inodes_count(sb, desc))
  543. return 0;
  544. }
  545. return -1;
  546. }
  547. /*
  548. * There are two policies for allocating an inode. If the new inode is
  549. * a directory, then a forward search is made for a block group with both
  550. * free space and a low directory-to-inode ratio; if that fails, then of
  551. * the groups with above-average free space, that group with the fewest
  552. * directories already is chosen.
  553. *
  554. * For other inodes, search forward from the parent directory's block
  555. * group to find a free inode.
  556. */
  557. struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
  558. const struct qstr *qstr, __u32 goal, uid_t *owner)
  559. {
  560. struct super_block *sb;
  561. struct buffer_head *inode_bitmap_bh = NULL;
  562. struct buffer_head *group_desc_bh;
  563. ext4_group_t ngroups, group = 0;
  564. unsigned long ino = 0;
  565. struct inode *inode;
  566. struct ext4_group_desc *gdp = NULL;
  567. struct ext4_inode_info *ei;
  568. struct ext4_sb_info *sbi;
  569. int ret2, err = 0;
  570. struct inode *ret;
  571. ext4_group_t i;
  572. ext4_group_t flex_group;
  573. /* Cannot create files in a deleted directory */
  574. if (!dir || !dir->i_nlink)
  575. return ERR_PTR(-EPERM);
  576. sb = dir->i_sb;
  577. ngroups = ext4_get_groups_count(sb);
  578. trace_ext4_request_inode(dir, mode);
  579. inode = new_inode(sb);
  580. if (!inode)
  581. return ERR_PTR(-ENOMEM);
  582. ei = EXT4_I(inode);
  583. sbi = EXT4_SB(sb);
  584. if (!goal)
  585. goal = sbi->s_inode_goal;
  586. if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
  587. group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
  588. ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
  589. ret2 = 0;
  590. goto got_group;
  591. }
  592. if (S_ISDIR(mode))
  593. ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
  594. else
  595. ret2 = find_group_other(sb, dir, &group, mode);
  596. got_group:
  597. EXT4_I(dir)->i_last_alloc_group = group;
  598. err = -ENOSPC;
  599. if (ret2 == -1)
  600. goto out;
  601. /*
  602. * Normally we will only go through one pass of this loop,
  603. * unless we get unlucky and it turns out the group we selected
  604. * had its last inode grabbed by someone else.
  605. */
  606. for (i = 0; i < ngroups; i++, ino = 0) {
  607. err = -EIO;
  608. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  609. if (!gdp)
  610. goto fail;
  611. brelse(inode_bitmap_bh);
  612. inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
  613. if (!inode_bitmap_bh)
  614. goto fail;
  615. repeat_in_this_group:
  616. ino = ext4_find_next_zero_bit((unsigned long *)
  617. inode_bitmap_bh->b_data,
  618. EXT4_INODES_PER_GROUP(sb), ino);
  619. if (ino >= EXT4_INODES_PER_GROUP(sb)) {
  620. if (++group == ngroups)
  621. group = 0;
  622. continue;
  623. }
  624. if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
  625. ext4_error(sb, "reserved inode found cleared - "
  626. "inode=%lu", ino + 1);
  627. continue;
  628. }
  629. ext4_lock_group(sb, group);
  630. ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
  631. ext4_unlock_group(sb, group);
  632. ino++; /* the inode bitmap is zero-based */
  633. if (!ret2)
  634. goto got; /* we grabbed the inode! */
  635. if (ino < EXT4_INODES_PER_GROUP(sb))
  636. goto repeat_in_this_group;
  637. }
  638. err = -ENOSPC;
  639. goto out;
  640. got:
  641. /* We may have to initialize the block bitmap if it isn't already */
  642. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
  643. gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  644. struct buffer_head *block_bitmap_bh;
  645. block_bitmap_bh = ext4_read_block_bitmap(sb, group);
  646. BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
  647. err = ext4_journal_get_write_access(handle, block_bitmap_bh);
  648. if (err) {
  649. brelse(block_bitmap_bh);
  650. goto fail;
  651. }
  652. BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
  653. err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
  654. brelse(block_bitmap_bh);
  655. /* recheck and clear flag under lock if we still need to */
  656. ext4_lock_group(sb, group);
  657. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
  658. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
  659. ext4_free_group_clusters_set(sb, gdp,
  660. ext4_free_clusters_after_init(sb, group, gdp));
  661. gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
  662. gdp);
  663. }
  664. ext4_unlock_group(sb, group);
  665. if (err)
  666. goto fail;
  667. }
  668. BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
  669. err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
  670. if (err)
  671. goto fail;
  672. BUFFER_TRACE(group_desc_bh, "get_write_access");
  673. err = ext4_journal_get_write_access(handle, group_desc_bh);
  674. if (err)
  675. goto fail;
  676. /* Update the relevant bg descriptor fields */
  677. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
  678. int free;
  679. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  680. down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
  681. ext4_lock_group(sb, group); /* while we modify the bg desc */
  682. free = EXT4_INODES_PER_GROUP(sb) -
  683. ext4_itable_unused_count(sb, gdp);
  684. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
  685. gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
  686. free = 0;
  687. }
  688. /*
  689. * Check the relative inode number against the last used
  690. * relative inode number in this group. if it is greater
  691. * we need to update the bg_itable_unused count
  692. */
  693. if (ino > free)
  694. ext4_itable_unused_set(sb, gdp,
  695. (EXT4_INODES_PER_GROUP(sb) - ino));
  696. up_read(&grp->alloc_sem);
  697. }
  698. ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
  699. if (S_ISDIR(mode)) {
  700. ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
  701. if (sbi->s_log_groups_per_flex) {
  702. ext4_group_t f = ext4_flex_group(sbi, group);
  703. atomic_inc(&sbi->s_flex_groups[f].used_dirs);
  704. }
  705. }
  706. if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
  707. gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
  708. ext4_unlock_group(sb, group);
  709. }
  710. BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
  711. err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
  712. if (err)
  713. goto fail;
  714. BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
  715. err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
  716. if (err)
  717. goto fail;
  718. percpu_counter_dec(&sbi->s_freeinodes_counter);
  719. if (S_ISDIR(mode))
  720. percpu_counter_inc(&sbi->s_dirs_counter);
  721. ext4_mark_super_dirty(sb);
  722. if (sbi->s_log_groups_per_flex) {
  723. flex_group = ext4_flex_group(sbi, group);
  724. atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
  725. }
  726. if (owner) {
  727. inode->i_mode = mode;
  728. inode->i_uid = owner[0];
  729. inode->i_gid = owner[1];
  730. } else if (test_opt(sb, GRPID)) {
  731. inode->i_mode = mode;
  732. inode->i_uid = current_fsuid();
  733. inode->i_gid = dir->i_gid;
  734. } else
  735. inode_init_owner(inode, dir, mode);
  736. inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
  737. /* This is the optimal IO size (for stat), not the fs block size */
  738. inode->i_blocks = 0;
  739. inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
  740. ext4_current_time(inode);
  741. memset(ei->i_data, 0, sizeof(ei->i_data));
  742. ei->i_dir_start_lookup = 0;
  743. ei->i_disksize = 0;
  744. /* Don't inherit extent flag from directory, amongst others. */
  745. ei->i_flags =
  746. ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
  747. ei->i_file_acl = 0;
  748. ei->i_dtime = 0;
  749. ei->i_block_group = group;
  750. ei->i_last_alloc_group = ~0;
  751. ext4_set_inode_flags(inode);
  752. if (IS_DIRSYNC(inode))
  753. ext4_handle_sync(handle);
  754. if (insert_inode_locked(inode) < 0) {
  755. /*
  756. * Likely a bitmap corruption causing inode to be allocated
  757. * twice.
  758. */
  759. err = -EIO;
  760. goto fail;
  761. }
  762. spin_lock(&sbi->s_next_gen_lock);
  763. inode->i_generation = sbi->s_next_generation++;
  764. spin_unlock(&sbi->s_next_gen_lock);
  765. ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
  766. ext4_set_inode_state(inode, EXT4_STATE_NEW);
  767. ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
  768. ret = inode;
  769. dquot_initialize(inode);
  770. err = dquot_alloc_inode(inode);
  771. if (err)
  772. goto fail_drop;
  773. err = ext4_init_acl(handle, inode, dir);
  774. if (err)
  775. goto fail_free_drop;
  776. err = ext4_init_security(handle, inode, dir, qstr);
  777. if (err)
  778. goto fail_free_drop;
  779. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  780. /* set extent flag only for directory, file and normal symlink*/
  781. if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
  782. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  783. ext4_ext_tree_init(handle, inode);
  784. }
  785. }
  786. if (ext4_handle_valid(handle)) {
  787. ei->i_sync_tid = handle->h_transaction->t_tid;
  788. ei->i_datasync_tid = handle->h_transaction->t_tid;
  789. }
  790. err = ext4_mark_inode_dirty(handle, inode);
  791. if (err) {
  792. ext4_std_error(sb, err);
  793. goto fail_free_drop;
  794. }
  795. ext4_debug("allocating inode %lu\n", inode->i_ino);
  796. trace_ext4_allocate_inode(inode, dir, mode);
  797. goto really_out;
  798. fail:
  799. ext4_std_error(sb, err);
  800. out:
  801. iput(inode);
  802. ret = ERR_PTR(err);
  803. really_out:
  804. brelse(inode_bitmap_bh);
  805. return ret;
  806. fail_free_drop:
  807. dquot_free_inode(inode);
  808. fail_drop:
  809. dquot_drop(inode);
  810. inode->i_flags |= S_NOQUOTA;
  811. clear_nlink(inode);
  812. unlock_new_inode(inode);
  813. iput(inode);
  814. brelse(inode_bitmap_bh);
  815. return ERR_PTR(err);
  816. }
  817. /* Verify that we are loading a valid orphan from disk */
  818. struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
  819. {
  820. unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
  821. ext4_group_t block_group;
  822. int bit;
  823. struct buffer_head *bitmap_bh;
  824. struct inode *inode = NULL;
  825. long err = -EIO;
  826. /* Error cases - e2fsck has already cleaned up for us */
  827. if (ino > max_ino) {
  828. ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
  829. goto error;
  830. }
  831. block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
  832. bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
  833. bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
  834. if (!bitmap_bh) {
  835. ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
  836. goto error;
  837. }
  838. /* Having the inode bit set should be a 100% indicator that this
  839. * is a valid orphan (no e2fsck run on fs). Orphans also include
  840. * inodes that were being truncated, so we can't check i_nlink==0.
  841. */
  842. if (!ext4_test_bit(bit, bitmap_bh->b_data))
  843. goto bad_orphan;
  844. inode = ext4_iget(sb, ino);
  845. if (IS_ERR(inode))
  846. goto iget_failed;
  847. /*
  848. * If the orphans has i_nlinks > 0 then it should be able to be
  849. * truncated, otherwise it won't be removed from the orphan list
  850. * during processing and an infinite loop will result.
  851. */
  852. if (inode->i_nlink && !ext4_can_truncate(inode))
  853. goto bad_orphan;
  854. if (NEXT_ORPHAN(inode) > max_ino)
  855. goto bad_orphan;
  856. brelse(bitmap_bh);
  857. return inode;
  858. iget_failed:
  859. err = PTR_ERR(inode);
  860. inode = NULL;
  861. bad_orphan:
  862. ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
  863. printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
  864. bit, (unsigned long long)bitmap_bh->b_blocknr,
  865. ext4_test_bit(bit, bitmap_bh->b_data));
  866. printk(KERN_NOTICE "inode=%p\n", inode);
  867. if (inode) {
  868. printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
  869. is_bad_inode(inode));
  870. printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
  871. NEXT_ORPHAN(inode));
  872. printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
  873. printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
  874. /* Avoid freeing blocks if we got a bad deleted inode */
  875. if (inode->i_nlink == 0)
  876. inode->i_blocks = 0;
  877. iput(inode);
  878. }
  879. brelse(bitmap_bh);
  880. error:
  881. return ERR_PTR(err);
  882. }
  883. unsigned long ext4_count_free_inodes(struct super_block *sb)
  884. {
  885. unsigned long desc_count;
  886. struct ext4_group_desc *gdp;
  887. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  888. #ifdef EXT4FS_DEBUG
  889. struct ext4_super_block *es;
  890. unsigned long bitmap_count, x;
  891. struct buffer_head *bitmap_bh = NULL;
  892. es = EXT4_SB(sb)->s_es;
  893. desc_count = 0;
  894. bitmap_count = 0;
  895. gdp = NULL;
  896. for (i = 0; i < ngroups; i++) {
  897. gdp = ext4_get_group_desc(sb, i, NULL);
  898. if (!gdp)
  899. continue;
  900. desc_count += ext4_free_inodes_count(sb, gdp);
  901. brelse(bitmap_bh);
  902. bitmap_bh = ext4_read_inode_bitmap(sb, i);
  903. if (!bitmap_bh)
  904. continue;
  905. x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
  906. printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
  907. (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
  908. bitmap_count += x;
  909. }
  910. brelse(bitmap_bh);
  911. printk(KERN_DEBUG "ext4_count_free_inodes: "
  912. "stored = %u, computed = %lu, %lu\n",
  913. le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
  914. return desc_count;
  915. #else
  916. desc_count = 0;
  917. for (i = 0; i < ngroups; i++) {
  918. gdp = ext4_get_group_desc(sb, i, NULL);
  919. if (!gdp)
  920. continue;
  921. desc_count += ext4_free_inodes_count(sb, gdp);
  922. cond_resched();
  923. }
  924. return desc_count;
  925. #endif
  926. }
  927. /* Called at mount-time, super-block is locked */
  928. unsigned long ext4_count_dirs(struct super_block * sb)
  929. {
  930. unsigned long count = 0;
  931. ext4_group_t i, ngroups = ext4_get_groups_count(sb);
  932. for (i = 0; i < ngroups; i++) {
  933. struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
  934. if (!gdp)
  935. continue;
  936. count += ext4_used_dirs_count(sb, gdp);
  937. }
  938. return count;
  939. }
  940. /*
  941. * Zeroes not yet zeroed inode table - just write zeroes through the whole
  942. * inode table. Must be called without any spinlock held. The only place
  943. * where it is called from on active part of filesystem is ext4lazyinit
  944. * thread, so we do not need any special locks, however we have to prevent
  945. * inode allocation from the current group, so we take alloc_sem lock, to
  946. * block ext4_new_inode() until we are finished.
  947. */
  948. int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
  949. int barrier)
  950. {
  951. struct ext4_group_info *grp = ext4_get_group_info(sb, group);
  952. struct ext4_sb_info *sbi = EXT4_SB(sb);
  953. struct ext4_group_desc *gdp = NULL;
  954. struct buffer_head *group_desc_bh;
  955. handle_t *handle;
  956. ext4_fsblk_t blk;
  957. int num, ret = 0, used_blks = 0;
  958. /* This should not happen, but just to be sure check this */
  959. if (sb->s_flags & MS_RDONLY) {
  960. ret = 1;
  961. goto out;
  962. }
  963. gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
  964. if (!gdp)
  965. goto out;
  966. /*
  967. * We do not need to lock this, because we are the only one
  968. * handling this flag.
  969. */
  970. if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
  971. goto out;
  972. handle = ext4_journal_start_sb(sb, 1);
  973. if (IS_ERR(handle)) {
  974. ret = PTR_ERR(handle);
  975. goto out;
  976. }
  977. down_write(&grp->alloc_sem);
  978. /*
  979. * If inode bitmap was already initialized there may be some
  980. * used inodes so we need to skip blocks with used inodes in
  981. * inode table.
  982. */
  983. if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
  984. used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
  985. ext4_itable_unused_count(sb, gdp)),
  986. sbi->s_inodes_per_block);
  987. if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
  988. ext4_error(sb, "Something is wrong with group %u: "
  989. "used itable blocks: %d; "
  990. "itable unused count: %u",
  991. group, used_blks,
  992. ext4_itable_unused_count(sb, gdp));
  993. ret = 1;
  994. goto err_out;
  995. }
  996. blk = ext4_inode_table(sb, gdp) + used_blks;
  997. num = sbi->s_itb_per_group - used_blks;
  998. BUFFER_TRACE(group_desc_bh, "get_write_access");
  999. ret = ext4_journal_get_write_access(handle,
  1000. group_desc_bh);
  1001. if (ret)
  1002. goto err_out;
  1003. /*
  1004. * Skip zeroout if the inode table is full. But we set the ZEROED
  1005. * flag anyway, because obviously, when it is full it does not need
  1006. * further zeroing.
  1007. */
  1008. if (unlikely(num == 0))
  1009. goto skip_zeroout;
  1010. ext4_debug("going to zero out inode table in group %d\n",
  1011. group);
  1012. ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
  1013. if (ret < 0)
  1014. goto err_out;
  1015. if (barrier)
  1016. blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
  1017. skip_zeroout:
  1018. ext4_lock_group(sb, group);
  1019. gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
  1020. gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
  1021. ext4_unlock_group(sb, group);
  1022. BUFFER_TRACE(group_desc_bh,
  1023. "call ext4_handle_dirty_metadata");
  1024. ret = ext4_handle_dirty_metadata(handle, NULL,
  1025. group_desc_bh);
  1026. err_out:
  1027. up_write(&grp->alloc_sem);
  1028. ext4_journal_stop(handle);
  1029. out:
  1030. return ret;
  1031. }