balloc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. /*
  2. * linux/fs/ufs/balloc.c
  3. *
  4. * Copyright (C) 1998
  5. * Daniel Pirkl <daniel.pirkl@email.cz>
  6. * Charles University, Faculty of Mathematics and Physics
  7. *
  8. * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/stat.h>
  12. #include <linux/time.h>
  13. #include <linux/string.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/capability.h>
  16. #include <linux/bitops.h>
  17. #include <asm/byteorder.h>
  18. #include "ufs_fs.h"
  19. #include "ufs.h"
  20. #include "swab.h"
  21. #include "util.h"
  22. #define INVBLOCK ((u64)-1L)
  23. static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
  24. static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
  25. static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
  26. static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
  27. static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
  28. static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
  29. /*
  30. * Free 'count' fragments from fragment number 'fragment'
  31. */
  32. void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
  33. {
  34. struct super_block * sb;
  35. struct ufs_sb_private_info * uspi;
  36. struct ufs_super_block_first * usb1;
  37. struct ufs_cg_private_info * ucpi;
  38. struct ufs_cylinder_group * ucg;
  39. unsigned cgno, bit, end_bit, bbase, blkmap, i;
  40. u64 blkno;
  41. sb = inode->i_sb;
  42. uspi = UFS_SB(sb)->s_uspi;
  43. usb1 = ubh_get_usb_first(uspi);
  44. UFSD("ENTER, fragment %llu, count %u\n",
  45. (unsigned long long)fragment, count);
  46. if (ufs_fragnum(fragment) + count > uspi->s_fpg)
  47. ufs_error (sb, "ufs_free_fragments", "internal error");
  48. lock_super(sb);
  49. cgno = ufs_dtog(uspi, fragment);
  50. bit = ufs_dtogd(uspi, fragment);
  51. if (cgno >= uspi->s_ncg) {
  52. ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
  53. goto failed;
  54. }
  55. ucpi = ufs_load_cylinder (sb, cgno);
  56. if (!ucpi)
  57. goto failed;
  58. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  59. if (!ufs_cg_chkmagic(sb, ucg)) {
  60. ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
  61. goto failed;
  62. }
  63. end_bit = bit + count;
  64. bbase = ufs_blknum (bit);
  65. blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
  66. ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
  67. for (i = bit; i < end_bit; i++) {
  68. if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
  69. ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
  70. else
  71. ufs_error (sb, "ufs_free_fragments",
  72. "bit already cleared for fragment %u", i);
  73. }
  74. fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
  75. uspi->cs_total.cs_nffree += count;
  76. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
  77. blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
  78. ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
  79. /*
  80. * Trying to reassemble free fragments into block
  81. */
  82. blkno = ufs_fragstoblks (bbase);
  83. if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
  84. fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
  85. uspi->cs_total.cs_nffree -= uspi->s_fpb;
  86. fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
  87. if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
  88. ufs_clusteracct (sb, ucpi, blkno, 1);
  89. fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
  90. uspi->cs_total.cs_nbfree++;
  91. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
  92. if (uspi->fs_magic != UFS2_MAGIC) {
  93. unsigned cylno = ufs_cbtocylno (bbase);
  94. fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
  95. ufs_cbtorpos(bbase)), 1);
  96. fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
  97. }
  98. }
  99. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  100. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  101. if (sb->s_flags & MS_SYNCHRONOUS) {
  102. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  103. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  104. }
  105. sb->s_dirt = 1;
  106. unlock_super (sb);
  107. UFSD("EXIT\n");
  108. return;
  109. failed:
  110. unlock_super (sb);
  111. UFSD("EXIT (FAILED)\n");
  112. return;
  113. }
  114. /*
  115. * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
  116. */
  117. void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
  118. {
  119. struct super_block * sb;
  120. struct ufs_sb_private_info * uspi;
  121. struct ufs_super_block_first * usb1;
  122. struct ufs_cg_private_info * ucpi;
  123. struct ufs_cylinder_group * ucg;
  124. unsigned overflow, cgno, bit, end_bit, i;
  125. u64 blkno;
  126. sb = inode->i_sb;
  127. uspi = UFS_SB(sb)->s_uspi;
  128. usb1 = ubh_get_usb_first(uspi);
  129. UFSD("ENTER, fragment %llu, count %u\n",
  130. (unsigned long long)fragment, count);
  131. if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
  132. ufs_error (sb, "ufs_free_blocks", "internal error, "
  133. "fragment %llu, count %u\n",
  134. (unsigned long long)fragment, count);
  135. goto failed;
  136. }
  137. lock_super(sb);
  138. do_more:
  139. overflow = 0;
  140. cgno = ufs_dtog(uspi, fragment);
  141. bit = ufs_dtogd(uspi, fragment);
  142. if (cgno >= uspi->s_ncg) {
  143. ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
  144. goto failed_unlock;
  145. }
  146. end_bit = bit + count;
  147. if (end_bit > uspi->s_fpg) {
  148. overflow = bit + count - uspi->s_fpg;
  149. count -= overflow;
  150. end_bit -= overflow;
  151. }
  152. ucpi = ufs_load_cylinder (sb, cgno);
  153. if (!ucpi)
  154. goto failed_unlock;
  155. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  156. if (!ufs_cg_chkmagic(sb, ucg)) {
  157. ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
  158. goto failed_unlock;
  159. }
  160. for (i = bit; i < end_bit; i += uspi->s_fpb) {
  161. blkno = ufs_fragstoblks(i);
  162. if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
  163. ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
  164. }
  165. ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
  166. if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
  167. ufs_clusteracct (sb, ucpi, blkno, 1);
  168. fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
  169. uspi->cs_total.cs_nbfree++;
  170. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
  171. if (uspi->fs_magic != UFS2_MAGIC) {
  172. unsigned cylno = ufs_cbtocylno(i);
  173. fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
  174. ufs_cbtorpos(i)), 1);
  175. fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
  176. }
  177. }
  178. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  179. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  180. if (sb->s_flags & MS_SYNCHRONOUS) {
  181. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  182. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  183. }
  184. if (overflow) {
  185. fragment += count;
  186. count = overflow;
  187. goto do_more;
  188. }
  189. sb->s_dirt = 1;
  190. unlock_super (sb);
  191. UFSD("EXIT\n");
  192. return;
  193. failed_unlock:
  194. unlock_super (sb);
  195. failed:
  196. UFSD("EXIT (FAILED)\n");
  197. return;
  198. }
  199. /*
  200. * Modify inode page cache in such way:
  201. * have - blocks with b_blocknr equal to oldb...oldb+count-1
  202. * get - blocks with b_blocknr equal to newb...newb+count-1
  203. * also we suppose that oldb...oldb+count-1 blocks
  204. * situated at the end of file.
  205. *
  206. * We can come here from ufs_writepage or ufs_prepare_write,
  207. * locked_page is argument of these functions, so we already lock it.
  208. */
  209. static void ufs_change_blocknr(struct inode *inode, sector_t beg,
  210. unsigned int count, sector_t oldb,
  211. sector_t newb, struct page *locked_page)
  212. {
  213. const unsigned blks_per_page =
  214. 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  215. const unsigned mask = blks_per_page - 1;
  216. struct address_space * const mapping = inode->i_mapping;
  217. pgoff_t index, cur_index, last_index;
  218. unsigned pos, j, lblock;
  219. sector_t end, i;
  220. struct page *page;
  221. struct buffer_head *head, *bh;
  222. UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
  223. inode->i_ino, count,
  224. (unsigned long long)oldb, (unsigned long long)newb);
  225. BUG_ON(!locked_page);
  226. BUG_ON(!PageLocked(locked_page));
  227. cur_index = locked_page->index;
  228. end = count + beg;
  229. last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  230. for (i = beg; i < end; i = (i | mask) + 1) {
  231. index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  232. if (likely(cur_index != index)) {
  233. page = ufs_get_locked_page(mapping, index);
  234. if (!page)/* it was truncated */
  235. continue;
  236. if (IS_ERR(page)) {/* or EIO */
  237. ufs_error(inode->i_sb, __func__,
  238. "read of page %llu failed\n",
  239. (unsigned long long)index);
  240. continue;
  241. }
  242. } else
  243. page = locked_page;
  244. head = page_buffers(page);
  245. bh = head;
  246. pos = i & mask;
  247. for (j = 0; j < pos; ++j)
  248. bh = bh->b_this_page;
  249. if (unlikely(index == last_index))
  250. lblock = end & mask;
  251. else
  252. lblock = blks_per_page;
  253. do {
  254. if (j >= lblock)
  255. break;
  256. pos = (i - beg) + j;
  257. if (!buffer_mapped(bh))
  258. map_bh(bh, inode->i_sb, oldb + pos);
  259. if (!buffer_uptodate(bh)) {
  260. ll_rw_block(READ, 1, &bh);
  261. wait_on_buffer(bh);
  262. if (!buffer_uptodate(bh)) {
  263. ufs_error(inode->i_sb, __func__,
  264. "read of block failed\n");
  265. break;
  266. }
  267. }
  268. UFSD(" change from %llu to %llu, pos %u\n",
  269. (unsigned long long)(pos + oldb),
  270. (unsigned long long)(pos + newb), pos);
  271. bh->b_blocknr = newb + pos;
  272. unmap_underlying_metadata(bh->b_bdev,
  273. bh->b_blocknr);
  274. mark_buffer_dirty(bh);
  275. ++j;
  276. bh = bh->b_this_page;
  277. } while (bh != head);
  278. if (likely(cur_index != index))
  279. ufs_put_locked_page(page);
  280. }
  281. UFSD("EXIT\n");
  282. }
  283. static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
  284. int sync)
  285. {
  286. struct buffer_head *bh;
  287. sector_t end = beg + n;
  288. for (; beg < end; ++beg) {
  289. bh = sb_getblk(inode->i_sb, beg);
  290. lock_buffer(bh);
  291. memset(bh->b_data, 0, inode->i_sb->s_blocksize);
  292. set_buffer_uptodate(bh);
  293. mark_buffer_dirty(bh);
  294. unlock_buffer(bh);
  295. if (IS_SYNC(inode) || sync)
  296. sync_dirty_buffer(bh);
  297. brelse(bh);
  298. }
  299. }
  300. u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
  301. u64 goal, unsigned count, int *err,
  302. struct page *locked_page)
  303. {
  304. struct super_block * sb;
  305. struct ufs_sb_private_info * uspi;
  306. struct ufs_super_block_first * usb1;
  307. unsigned cgno, oldcount, newcount;
  308. u64 tmp, request, result;
  309. UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
  310. inode->i_ino, (unsigned long long)fragment,
  311. (unsigned long long)goal, count);
  312. sb = inode->i_sb;
  313. uspi = UFS_SB(sb)->s_uspi;
  314. usb1 = ubh_get_usb_first(uspi);
  315. *err = -ENOSPC;
  316. lock_super (sb);
  317. tmp = ufs_data_ptr_to_cpu(sb, p);
  318. if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
  319. ufs_warning(sb, "ufs_new_fragments", "internal warning"
  320. " fragment %llu, count %u",
  321. (unsigned long long)fragment, count);
  322. count = uspi->s_fpb - ufs_fragnum(fragment);
  323. }
  324. oldcount = ufs_fragnum (fragment);
  325. newcount = oldcount + count;
  326. /*
  327. * Somebody else has just allocated our fragments
  328. */
  329. if (oldcount) {
  330. if (!tmp) {
  331. ufs_error(sb, "ufs_new_fragments", "internal error, "
  332. "fragment %llu, tmp %llu\n",
  333. (unsigned long long)fragment,
  334. (unsigned long long)tmp);
  335. unlock_super(sb);
  336. return INVBLOCK;
  337. }
  338. if (fragment < UFS_I(inode)->i_lastfrag) {
  339. UFSD("EXIT (ALREADY ALLOCATED)\n");
  340. unlock_super (sb);
  341. return 0;
  342. }
  343. }
  344. else {
  345. if (tmp) {
  346. UFSD("EXIT (ALREADY ALLOCATED)\n");
  347. unlock_super(sb);
  348. return 0;
  349. }
  350. }
  351. /*
  352. * There is not enough space for user on the device
  353. */
  354. if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
  355. unlock_super (sb);
  356. UFSD("EXIT (FAILED)\n");
  357. return 0;
  358. }
  359. if (goal >= uspi->s_size)
  360. goal = 0;
  361. if (goal == 0)
  362. cgno = ufs_inotocg (inode->i_ino);
  363. else
  364. cgno = ufs_dtog(uspi, goal);
  365. /*
  366. * allocate new fragment
  367. */
  368. if (oldcount == 0) {
  369. result = ufs_alloc_fragments (inode, cgno, goal, count, err);
  370. if (result) {
  371. ufs_cpu_to_data_ptr(sb, p, result);
  372. *err = 0;
  373. UFS_I(inode)->i_lastfrag =
  374. max_t(u32, UFS_I(inode)->i_lastfrag,
  375. fragment + count);
  376. ufs_clear_frags(inode, result + oldcount,
  377. newcount - oldcount, locked_page != NULL);
  378. }
  379. unlock_super(sb);
  380. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  381. return result;
  382. }
  383. /*
  384. * resize block
  385. */
  386. result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
  387. if (result) {
  388. *err = 0;
  389. UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
  390. ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
  391. locked_page != NULL);
  392. unlock_super(sb);
  393. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  394. return result;
  395. }
  396. /*
  397. * allocate new block and move data
  398. */
  399. switch (fs32_to_cpu(sb, usb1->fs_optim)) {
  400. case UFS_OPTSPACE:
  401. request = newcount;
  402. if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
  403. > uspi->s_dsize * uspi->s_minfree / (2 * 100))
  404. break;
  405. usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
  406. break;
  407. default:
  408. usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
  409. case UFS_OPTTIME:
  410. request = uspi->s_fpb;
  411. if (uspi->cs_total.cs_nffree < uspi->s_dsize *
  412. (uspi->s_minfree - 2) / 100)
  413. break;
  414. usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
  415. break;
  416. }
  417. result = ufs_alloc_fragments (inode, cgno, goal, request, err);
  418. if (result) {
  419. ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
  420. locked_page != NULL);
  421. ufs_change_blocknr(inode, fragment - oldcount, oldcount,
  422. uspi->s_sbbase + tmp,
  423. uspi->s_sbbase + result, locked_page);
  424. ufs_cpu_to_data_ptr(sb, p, result);
  425. *err = 0;
  426. UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
  427. unlock_super(sb);
  428. if (newcount < request)
  429. ufs_free_fragments (inode, result + newcount, request - newcount);
  430. ufs_free_fragments (inode, tmp, oldcount);
  431. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  432. return result;
  433. }
  434. unlock_super(sb);
  435. UFSD("EXIT (FAILED)\n");
  436. return 0;
  437. }
  438. static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
  439. unsigned oldcount, unsigned newcount, int *err)
  440. {
  441. struct super_block * sb;
  442. struct ufs_sb_private_info * uspi;
  443. struct ufs_super_block_first * usb1;
  444. struct ufs_cg_private_info * ucpi;
  445. struct ufs_cylinder_group * ucg;
  446. unsigned cgno, fragno, fragoff, count, fragsize, i;
  447. UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
  448. (unsigned long long)fragment, oldcount, newcount);
  449. sb = inode->i_sb;
  450. uspi = UFS_SB(sb)->s_uspi;
  451. usb1 = ubh_get_usb_first (uspi);
  452. count = newcount - oldcount;
  453. cgno = ufs_dtog(uspi, fragment);
  454. if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
  455. return 0;
  456. if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
  457. return 0;
  458. ucpi = ufs_load_cylinder (sb, cgno);
  459. if (!ucpi)
  460. return 0;
  461. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  462. if (!ufs_cg_chkmagic(sb, ucg)) {
  463. ufs_panic (sb, "ufs_add_fragments",
  464. "internal error, bad magic number on cg %u", cgno);
  465. return 0;
  466. }
  467. fragno = ufs_dtogd(uspi, fragment);
  468. fragoff = ufs_fragnum (fragno);
  469. for (i = oldcount; i < newcount; i++)
  470. if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
  471. return 0;
  472. /*
  473. * Block can be extended
  474. */
  475. ucg->cg_time = cpu_to_fs32(sb, get_seconds());
  476. for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
  477. if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
  478. break;
  479. fragsize = i - oldcount;
  480. if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
  481. ufs_panic (sb, "ufs_add_fragments",
  482. "internal error or corrupted bitmap on cg %u", cgno);
  483. fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
  484. if (fragsize != count)
  485. fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
  486. for (i = oldcount; i < newcount; i++)
  487. ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
  488. fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
  489. fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
  490. uspi->cs_total.cs_nffree -= count;
  491. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  492. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  493. if (sb->s_flags & MS_SYNCHRONOUS) {
  494. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  495. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  496. }
  497. sb->s_dirt = 1;
  498. UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
  499. return fragment;
  500. }
  501. #define UFS_TEST_FREE_SPACE_CG \
  502. ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
  503. if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
  504. goto cg_found; \
  505. for (k = count; k < uspi->s_fpb; k++) \
  506. if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
  507. goto cg_found;
  508. static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
  509. u64 goal, unsigned count, int *err)
  510. {
  511. struct super_block * sb;
  512. struct ufs_sb_private_info * uspi;
  513. struct ufs_super_block_first * usb1;
  514. struct ufs_cg_private_info * ucpi;
  515. struct ufs_cylinder_group * ucg;
  516. unsigned oldcg, i, j, k, allocsize;
  517. u64 result;
  518. UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
  519. inode->i_ino, cgno, (unsigned long long)goal, count);
  520. sb = inode->i_sb;
  521. uspi = UFS_SB(sb)->s_uspi;
  522. usb1 = ubh_get_usb_first(uspi);
  523. oldcg = cgno;
  524. /*
  525. * 1. searching on preferred cylinder group
  526. */
  527. UFS_TEST_FREE_SPACE_CG
  528. /*
  529. * 2. quadratic rehash
  530. */
  531. for (j = 1; j < uspi->s_ncg; j *= 2) {
  532. cgno += j;
  533. if (cgno >= uspi->s_ncg)
  534. cgno -= uspi->s_ncg;
  535. UFS_TEST_FREE_SPACE_CG
  536. }
  537. /*
  538. * 3. brute force search
  539. * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
  540. */
  541. cgno = (oldcg + 1) % uspi->s_ncg;
  542. for (j = 2; j < uspi->s_ncg; j++) {
  543. cgno++;
  544. if (cgno >= uspi->s_ncg)
  545. cgno = 0;
  546. UFS_TEST_FREE_SPACE_CG
  547. }
  548. UFSD("EXIT (FAILED)\n");
  549. return 0;
  550. cg_found:
  551. ucpi = ufs_load_cylinder (sb, cgno);
  552. if (!ucpi)
  553. return 0;
  554. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  555. if (!ufs_cg_chkmagic(sb, ucg))
  556. ufs_panic (sb, "ufs_alloc_fragments",
  557. "internal error, bad magic number on cg %u", cgno);
  558. ucg->cg_time = cpu_to_fs32(sb, get_seconds());
  559. if (count == uspi->s_fpb) {
  560. result = ufs_alloccg_block (inode, ucpi, goal, err);
  561. if (result == INVBLOCK)
  562. return 0;
  563. goto succed;
  564. }
  565. for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
  566. if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
  567. break;
  568. if (allocsize == uspi->s_fpb) {
  569. result = ufs_alloccg_block (inode, ucpi, goal, err);
  570. if (result == INVBLOCK)
  571. return 0;
  572. goal = ufs_dtogd(uspi, result);
  573. for (i = count; i < uspi->s_fpb; i++)
  574. ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
  575. i = uspi->s_fpb - count;
  576. fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
  577. uspi->cs_total.cs_nffree += i;
  578. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
  579. fs32_add(sb, &ucg->cg_frsum[i], 1);
  580. goto succed;
  581. }
  582. result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
  583. if (result == INVBLOCK)
  584. return 0;
  585. for (i = 0; i < count; i++)
  586. ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
  587. fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
  588. uspi->cs_total.cs_nffree -= count;
  589. fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
  590. fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
  591. if (count != allocsize)
  592. fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
  593. succed:
  594. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  595. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  596. if (sb->s_flags & MS_SYNCHRONOUS) {
  597. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  598. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  599. }
  600. sb->s_dirt = 1;
  601. result += cgno * uspi->s_fpg;
  602. UFSD("EXIT3, result %llu\n", (unsigned long long)result);
  603. return result;
  604. }
  605. static u64 ufs_alloccg_block(struct inode *inode,
  606. struct ufs_cg_private_info *ucpi,
  607. u64 goal, int *err)
  608. {
  609. struct super_block * sb;
  610. struct ufs_sb_private_info * uspi;
  611. struct ufs_super_block_first * usb1;
  612. struct ufs_cylinder_group * ucg;
  613. u64 result, blkno;
  614. UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
  615. sb = inode->i_sb;
  616. uspi = UFS_SB(sb)->s_uspi;
  617. usb1 = ubh_get_usb_first(uspi);
  618. ucg = ubh_get_ucg(UCPI_UBH(ucpi));
  619. if (goal == 0) {
  620. goal = ucpi->c_rotor;
  621. goto norot;
  622. }
  623. goal = ufs_blknum (goal);
  624. goal = ufs_dtogd(uspi, goal);
  625. /*
  626. * If the requested block is available, use it.
  627. */
  628. if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
  629. result = goal;
  630. goto gotit;
  631. }
  632. norot:
  633. result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
  634. if (result == INVBLOCK)
  635. return INVBLOCK;
  636. ucpi->c_rotor = result;
  637. gotit:
  638. blkno = ufs_fragstoblks(result);
  639. ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
  640. if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
  641. ufs_clusteracct (sb, ucpi, blkno, -1);
  642. fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
  643. uspi->cs_total.cs_nbfree--;
  644. fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
  645. if (uspi->fs_magic != UFS2_MAGIC) {
  646. unsigned cylno = ufs_cbtocylno((unsigned)result);
  647. fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
  648. ufs_cbtorpos((unsigned)result)), 1);
  649. fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
  650. }
  651. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  652. return result;
  653. }
  654. static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
  655. struct ufs_buffer_head *ubh,
  656. unsigned begin, unsigned size,
  657. unsigned char *table, unsigned char mask)
  658. {
  659. unsigned rest, offset;
  660. unsigned char *cp;
  661. offset = begin & ~uspi->s_fmask;
  662. begin >>= uspi->s_fshift;
  663. for (;;) {
  664. if ((offset + size) < uspi->s_fsize)
  665. rest = size;
  666. else
  667. rest = uspi->s_fsize - offset;
  668. size -= rest;
  669. cp = ubh->bh[begin]->b_data + offset;
  670. while ((table[*cp++] & mask) == 0 && --rest)
  671. ;
  672. if (rest || !size)
  673. break;
  674. begin++;
  675. offset = 0;
  676. }
  677. return (size + rest);
  678. }
  679. /*
  680. * Find a block of the specified size in the specified cylinder group.
  681. * @sp: pointer to super block
  682. * @ucpi: pointer to cylinder group info
  683. * @goal: near which block we want find new one
  684. * @count: specified size
  685. */
  686. static u64 ufs_bitmap_search(struct super_block *sb,
  687. struct ufs_cg_private_info *ucpi,
  688. u64 goal, unsigned count)
  689. {
  690. /*
  691. * Bit patterns for identifying fragments in the block map
  692. * used as ((map & mask_arr) == want_arr)
  693. */
  694. static const int mask_arr[9] = {
  695. 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
  696. };
  697. static const int want_arr[9] = {
  698. 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
  699. };
  700. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  701. struct ufs_super_block_first *usb1;
  702. struct ufs_cylinder_group *ucg;
  703. unsigned start, length, loc;
  704. unsigned pos, want, blockmap, mask, end;
  705. u64 result;
  706. UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
  707. (unsigned long long)goal, count);
  708. usb1 = ubh_get_usb_first (uspi);
  709. ucg = ubh_get_ucg(UCPI_UBH(ucpi));
  710. if (goal)
  711. start = ufs_dtogd(uspi, goal) >> 3;
  712. else
  713. start = ucpi->c_frotor >> 3;
  714. length = ((uspi->s_fpg + 7) >> 3) - start;
  715. loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
  716. (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
  717. 1 << (count - 1 + (uspi->s_fpb & 7)));
  718. if (loc == 0) {
  719. length = start + 1;
  720. loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
  721. (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
  722. ufs_fragtable_other,
  723. 1 << (count - 1 + (uspi->s_fpb & 7)));
  724. if (loc == 0) {
  725. ufs_error(sb, "ufs_bitmap_search",
  726. "bitmap corrupted on cg %u, start %u,"
  727. " length %u, count %u, freeoff %u\n",
  728. ucpi->c_cgx, start, length, count,
  729. ucpi->c_freeoff);
  730. return INVBLOCK;
  731. }
  732. start = 0;
  733. }
  734. result = (start + length - loc) << 3;
  735. ucpi->c_frotor = result;
  736. /*
  737. * found the byte in the map
  738. */
  739. for (end = result + 8; result < end; result += uspi->s_fpb) {
  740. blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
  741. blockmap <<= 1;
  742. mask = mask_arr[count];
  743. want = want_arr[count];
  744. for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
  745. if ((blockmap & mask) == want) {
  746. UFSD("EXIT, result %llu\n",
  747. (unsigned long long)result);
  748. return result + pos;
  749. }
  750. mask <<= 1;
  751. want <<= 1;
  752. }
  753. }
  754. ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
  755. ucpi->c_cgx);
  756. UFSD("EXIT (FAILED)\n");
  757. return INVBLOCK;
  758. }
  759. static void ufs_clusteracct(struct super_block * sb,
  760. struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
  761. {
  762. struct ufs_sb_private_info * uspi;
  763. int i, start, end, forw, back;
  764. uspi = UFS_SB(sb)->s_uspi;
  765. if (uspi->s_contigsumsize <= 0)
  766. return;
  767. if (cnt > 0)
  768. ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
  769. else
  770. ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
  771. /*
  772. * Find the size of the cluster going forward.
  773. */
  774. start = blkno + 1;
  775. end = start + uspi->s_contigsumsize;
  776. if ( end >= ucpi->c_nclusterblks)
  777. end = ucpi->c_nclusterblks;
  778. i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
  779. if (i > end)
  780. i = end;
  781. forw = i - start;
  782. /*
  783. * Find the size of the cluster going backward.
  784. */
  785. start = blkno - 1;
  786. end = start - uspi->s_contigsumsize;
  787. if (end < 0 )
  788. end = -1;
  789. i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
  790. if ( i < end)
  791. i = end;
  792. back = start - i;
  793. /*
  794. * Account for old cluster and the possibly new forward and
  795. * back clusters.
  796. */
  797. i = back + forw + 1;
  798. if (i > uspi->s_contigsumsize)
  799. i = uspi->s_contigsumsize;
  800. fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
  801. if (back > 0)
  802. fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
  803. if (forw > 0)
  804. fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
  805. }
  806. static unsigned char ufs_fragtable_8fpb[] = {
  807. 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
  808. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
  809. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  810. 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
  811. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  812. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
  813. 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
  814. 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
  815. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  816. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
  817. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  818. 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
  819. 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
  820. 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
  821. 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
  822. 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
  823. };
  824. static unsigned char ufs_fragtable_other[] = {
  825. 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
  826. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  827. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  828. 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
  829. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  830. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  831. 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
  832. 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
  833. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  834. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  835. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  836. 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
  837. 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
  838. 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
  839. 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
  840. 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
  841. };