balloc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. * linux/fs/ufs/balloc.c
  3. *
  4. * Copyright (C) 1998
  5. * Daniel Pirkl <daniel.pirkl@email.cz>
  6. * Charles University, Faculty of Mathematics and Physics
  7. *
  8. * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/ufs_fs.h>
  12. #include <linux/stat.h>
  13. #include <linux/time.h>
  14. #include <linux/string.h>
  15. #include <linux/quotaops.h>
  16. #include <linux/buffer_head.h>
  17. #include <linux/capability.h>
  18. #include <linux/bitops.h>
  19. #include <asm/byteorder.h>
  20. #include "swab.h"
  21. #include "util.h"
  22. #define INVBLOCK ((u64)-1L)
  23. static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
  24. static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
  25. static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
  26. static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
  27. static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
  28. static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
  29. /*
  30. * Free 'count' fragments from fragment number 'fragment'
  31. */
  32. void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
  33. {
  34. struct super_block * sb;
  35. struct ufs_sb_private_info * uspi;
  36. struct ufs_super_block_first * usb1;
  37. struct ufs_cg_private_info * ucpi;
  38. struct ufs_cylinder_group * ucg;
  39. unsigned cgno, bit, end_bit, bbase, blkmap, i;
  40. u64 blkno;
  41. sb = inode->i_sb;
  42. uspi = UFS_SB(sb)->s_uspi;
  43. usb1 = ubh_get_usb_first(uspi);
  44. UFSD("ENTER, fragment %llu, count %u\n",
  45. (unsigned long long)fragment, count);
  46. if (ufs_fragnum(fragment) + count > uspi->s_fpg)
  47. ufs_error (sb, "ufs_free_fragments", "internal error");
  48. lock_super(sb);
  49. cgno = ufs_dtog(uspi, fragment);
  50. bit = ufs_dtogd(uspi, fragment);
  51. if (cgno >= uspi->s_ncg) {
  52. ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
  53. goto failed;
  54. }
  55. ucpi = ufs_load_cylinder (sb, cgno);
  56. if (!ucpi)
  57. goto failed;
  58. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  59. if (!ufs_cg_chkmagic(sb, ucg)) {
  60. ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
  61. goto failed;
  62. }
  63. end_bit = bit + count;
  64. bbase = ufs_blknum (bit);
  65. blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
  66. ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
  67. for (i = bit; i < end_bit; i++) {
  68. if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
  69. ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
  70. else
  71. ufs_error (sb, "ufs_free_fragments",
  72. "bit already cleared for fragment %u", i);
  73. }
  74. DQUOT_FREE_BLOCK (inode, count);
  75. fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
  76. uspi->cs_total.cs_nffree += count;
  77. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
  78. blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
  79. ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
  80. /*
  81. * Trying to reassemble free fragments into block
  82. */
  83. blkno = ufs_fragstoblks (bbase);
  84. if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
  85. fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
  86. uspi->cs_total.cs_nffree -= uspi->s_fpb;
  87. fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
  88. if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
  89. ufs_clusteracct (sb, ucpi, blkno, 1);
  90. fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
  91. uspi->cs_total.cs_nbfree++;
  92. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
  93. if (uspi->fs_magic != UFS2_MAGIC) {
  94. unsigned cylno = ufs_cbtocylno (bbase);
  95. fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
  96. ufs_cbtorpos(bbase)), 1);
  97. fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
  98. }
  99. }
  100. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  101. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  102. if (sb->s_flags & MS_SYNCHRONOUS) {
  103. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  104. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  105. }
  106. sb->s_dirt = 1;
  107. unlock_super (sb);
  108. UFSD("EXIT\n");
  109. return;
  110. failed:
  111. unlock_super (sb);
  112. UFSD("EXIT (FAILED)\n");
  113. return;
  114. }
  115. /*
  116. * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
  117. */
  118. void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
  119. {
  120. struct super_block * sb;
  121. struct ufs_sb_private_info * uspi;
  122. struct ufs_super_block_first * usb1;
  123. struct ufs_cg_private_info * ucpi;
  124. struct ufs_cylinder_group * ucg;
  125. unsigned overflow, cgno, bit, end_bit, i;
  126. u64 blkno;
  127. sb = inode->i_sb;
  128. uspi = UFS_SB(sb)->s_uspi;
  129. usb1 = ubh_get_usb_first(uspi);
  130. UFSD("ENTER, fragment %llu, count %u\n",
  131. (unsigned long long)fragment, count);
  132. if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
  133. ufs_error (sb, "ufs_free_blocks", "internal error, "
  134. "fragment %llu, count %u\n",
  135. (unsigned long long)fragment, count);
  136. goto failed;
  137. }
  138. lock_super(sb);
  139. do_more:
  140. overflow = 0;
  141. cgno = ufs_dtog(uspi, fragment);
  142. bit = ufs_dtogd(uspi, fragment);
  143. if (cgno >= uspi->s_ncg) {
  144. ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
  145. goto failed_unlock;
  146. }
  147. end_bit = bit + count;
  148. if (end_bit > uspi->s_fpg) {
  149. overflow = bit + count - uspi->s_fpg;
  150. count -= overflow;
  151. end_bit -= overflow;
  152. }
  153. ucpi = ufs_load_cylinder (sb, cgno);
  154. if (!ucpi)
  155. goto failed_unlock;
  156. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  157. if (!ufs_cg_chkmagic(sb, ucg)) {
  158. ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
  159. goto failed_unlock;
  160. }
  161. for (i = bit; i < end_bit; i += uspi->s_fpb) {
  162. blkno = ufs_fragstoblks(i);
  163. if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
  164. ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
  165. }
  166. ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
  167. if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
  168. ufs_clusteracct (sb, ucpi, blkno, 1);
  169. DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
  170. fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
  171. uspi->cs_total.cs_nbfree++;
  172. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
  173. if (uspi->fs_magic != UFS2_MAGIC) {
  174. unsigned cylno = ufs_cbtocylno(i);
  175. fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
  176. ufs_cbtorpos(i)), 1);
  177. fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
  178. }
  179. }
  180. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  181. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  182. if (sb->s_flags & MS_SYNCHRONOUS) {
  183. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  184. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  185. }
  186. if (overflow) {
  187. fragment += count;
  188. count = overflow;
  189. goto do_more;
  190. }
  191. sb->s_dirt = 1;
  192. unlock_super (sb);
  193. UFSD("EXIT\n");
  194. return;
  195. failed_unlock:
  196. unlock_super (sb);
  197. failed:
  198. UFSD("EXIT (FAILED)\n");
  199. return;
  200. }
  201. /*
  202. * Modify inode page cache in such way:
  203. * have - blocks with b_blocknr equal to oldb...oldb+count-1
  204. * get - blocks with b_blocknr equal to newb...newb+count-1
  205. * also we suppose that oldb...oldb+count-1 blocks
  206. * situated at the end of file.
  207. *
  208. * We can come here from ufs_writepage or ufs_prepare_write,
  209. * locked_page is argument of these functions, so we already lock it.
  210. */
  211. static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
  212. unsigned int count, unsigned int oldb,
  213. unsigned int newb, struct page *locked_page)
  214. {
  215. const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
  216. struct address_space * const mapping = inode->i_mapping;
  217. pgoff_t index, cur_index;
  218. unsigned end, pos, j;
  219. struct page *page;
  220. struct buffer_head *head, *bh;
  221. UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
  222. inode->i_ino, count, oldb, newb);
  223. BUG_ON(!locked_page);
  224. BUG_ON(!PageLocked(locked_page));
  225. cur_index = locked_page->index;
  226. for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
  227. index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
  228. if (likely(cur_index != index)) {
  229. page = ufs_get_locked_page(mapping, index);
  230. if (!page || IS_ERR(page)) /* it was truncated or EIO */
  231. continue;
  232. } else
  233. page = locked_page;
  234. head = page_buffers(page);
  235. bh = head;
  236. pos = beg & mask;
  237. for (j = 0; j < pos; ++j)
  238. bh = bh->b_this_page;
  239. j = 0;
  240. do {
  241. if (buffer_mapped(bh)) {
  242. pos = bh->b_blocknr - oldb;
  243. if (pos < count) {
  244. UFSD(" change from %llu to %llu\n",
  245. (unsigned long long)pos + oldb,
  246. (unsigned long long)pos + newb);
  247. bh->b_blocknr = newb + pos;
  248. unmap_underlying_metadata(bh->b_bdev,
  249. bh->b_blocknr);
  250. mark_buffer_dirty(bh);
  251. ++j;
  252. }
  253. }
  254. bh = bh->b_this_page;
  255. } while (bh != head);
  256. if (j)
  257. set_page_dirty(page);
  258. if (likely(cur_index != index))
  259. ufs_put_locked_page(page);
  260. }
  261. UFSD("EXIT\n");
  262. }
  263. static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
  264. int sync)
  265. {
  266. struct buffer_head *bh;
  267. sector_t end = beg + n;
  268. for (; beg < end; ++beg) {
  269. bh = sb_getblk(inode->i_sb, beg);
  270. lock_buffer(bh);
  271. memset(bh->b_data, 0, inode->i_sb->s_blocksize);
  272. set_buffer_uptodate(bh);
  273. mark_buffer_dirty(bh);
  274. unlock_buffer(bh);
  275. if (IS_SYNC(inode) || sync)
  276. sync_dirty_buffer(bh);
  277. brelse(bh);
  278. }
  279. }
  280. u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
  281. u64 goal, unsigned count, int *err,
  282. struct page *locked_page)
  283. {
  284. struct super_block * sb;
  285. struct ufs_sb_private_info * uspi;
  286. struct ufs_super_block_first * usb1;
  287. unsigned cgno, oldcount, newcount;
  288. u64 tmp, request, result;
  289. UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
  290. inode->i_ino, (unsigned long long)fragment,
  291. (unsigned long long)goal, count);
  292. sb = inode->i_sb;
  293. uspi = UFS_SB(sb)->s_uspi;
  294. usb1 = ubh_get_usb_first(uspi);
  295. *err = -ENOSPC;
  296. lock_super (sb);
  297. tmp = ufs_data_ptr_to_cpu(sb, p);
  298. if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
  299. ufs_warning(sb, "ufs_new_fragments", "internal warning"
  300. " fragment %llu, count %u",
  301. (unsigned long long)fragment, count);
  302. count = uspi->s_fpb - ufs_fragnum(fragment);
  303. }
  304. oldcount = ufs_fragnum (fragment);
  305. newcount = oldcount + count;
  306. /*
  307. * Somebody else has just allocated our fragments
  308. */
  309. if (oldcount) {
  310. if (!tmp) {
  311. ufs_error(sb, "ufs_new_fragments", "internal error, "
  312. "fragment %llu, tmp %llu\n",
  313. (unsigned long long)fragment,
  314. (unsigned long long)tmp);
  315. unlock_super(sb);
  316. return INVBLOCK;
  317. }
  318. if (fragment < UFS_I(inode)->i_lastfrag) {
  319. UFSD("EXIT (ALREADY ALLOCATED)\n");
  320. unlock_super (sb);
  321. return 0;
  322. }
  323. }
  324. else {
  325. if (tmp) {
  326. UFSD("EXIT (ALREADY ALLOCATED)\n");
  327. unlock_super(sb);
  328. return 0;
  329. }
  330. }
  331. /*
  332. * There is not enough space for user on the device
  333. */
  334. if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
  335. unlock_super (sb);
  336. UFSD("EXIT (FAILED)\n");
  337. return 0;
  338. }
  339. if (goal >= uspi->s_size)
  340. goal = 0;
  341. if (goal == 0)
  342. cgno = ufs_inotocg (inode->i_ino);
  343. else
  344. cgno = ufs_dtog(uspi, goal);
  345. /*
  346. * allocate new fragment
  347. */
  348. if (oldcount == 0) {
  349. result = ufs_alloc_fragments (inode, cgno, goal, count, err);
  350. if (result) {
  351. ufs_cpu_to_data_ptr(sb, p, result);
  352. *err = 0;
  353. UFS_I(inode)->i_lastfrag =
  354. max_t(u32, UFS_I(inode)->i_lastfrag,
  355. fragment + count);
  356. ufs_clear_frags(inode, result + oldcount,
  357. newcount - oldcount, locked_page != NULL);
  358. }
  359. unlock_super(sb);
  360. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  361. return result;
  362. }
  363. /*
  364. * resize block
  365. */
  366. result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
  367. if (result) {
  368. *err = 0;
  369. UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
  370. ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
  371. locked_page != NULL);
  372. unlock_super(sb);
  373. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  374. return result;
  375. }
  376. /*
  377. * allocate new block and move data
  378. */
  379. switch (fs32_to_cpu(sb, usb1->fs_optim)) {
  380. case UFS_OPTSPACE:
  381. request = newcount;
  382. if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
  383. > uspi->s_dsize * uspi->s_minfree / (2 * 100))
  384. break;
  385. usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
  386. break;
  387. default:
  388. usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
  389. case UFS_OPTTIME:
  390. request = uspi->s_fpb;
  391. if (uspi->cs_total.cs_nffree < uspi->s_dsize *
  392. (uspi->s_minfree - 2) / 100)
  393. break;
  394. usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
  395. break;
  396. }
  397. result = ufs_alloc_fragments (inode, cgno, goal, request, err);
  398. if (result) {
  399. ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
  400. locked_page != NULL);
  401. ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
  402. result, locked_page);
  403. ufs_cpu_to_data_ptr(sb, p, result);
  404. *err = 0;
  405. UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
  406. unlock_super(sb);
  407. if (newcount < request)
  408. ufs_free_fragments (inode, result + newcount, request - newcount);
  409. ufs_free_fragments (inode, tmp, oldcount);
  410. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  411. return result;
  412. }
  413. unlock_super(sb);
  414. UFSD("EXIT (FAILED)\n");
  415. return 0;
  416. }
  417. static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
  418. unsigned oldcount, unsigned newcount, int *err)
  419. {
  420. struct super_block * sb;
  421. struct ufs_sb_private_info * uspi;
  422. struct ufs_super_block_first * usb1;
  423. struct ufs_cg_private_info * ucpi;
  424. struct ufs_cylinder_group * ucg;
  425. unsigned cgno, fragno, fragoff, count, fragsize, i;
  426. UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
  427. (unsigned long long)fragment, oldcount, newcount);
  428. sb = inode->i_sb;
  429. uspi = UFS_SB(sb)->s_uspi;
  430. usb1 = ubh_get_usb_first (uspi);
  431. count = newcount - oldcount;
  432. cgno = ufs_dtog(uspi, fragment);
  433. if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
  434. return 0;
  435. if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
  436. return 0;
  437. ucpi = ufs_load_cylinder (sb, cgno);
  438. if (!ucpi)
  439. return 0;
  440. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  441. if (!ufs_cg_chkmagic(sb, ucg)) {
  442. ufs_panic (sb, "ufs_add_fragments",
  443. "internal error, bad magic number on cg %u", cgno);
  444. return 0;
  445. }
  446. fragno = ufs_dtogd(uspi, fragment);
  447. fragoff = ufs_fragnum (fragno);
  448. for (i = oldcount; i < newcount; i++)
  449. if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
  450. return 0;
  451. /*
  452. * Block can be extended
  453. */
  454. ucg->cg_time = cpu_to_fs32(sb, get_seconds());
  455. for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
  456. if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
  457. break;
  458. fragsize = i - oldcount;
  459. if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
  460. ufs_panic (sb, "ufs_add_fragments",
  461. "internal error or corrupted bitmap on cg %u", cgno);
  462. fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
  463. if (fragsize != count)
  464. fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
  465. for (i = oldcount; i < newcount; i++)
  466. ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
  467. if(DQUOT_ALLOC_BLOCK(inode, count)) {
  468. *err = -EDQUOT;
  469. return 0;
  470. }
  471. fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
  472. fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
  473. uspi->cs_total.cs_nffree -= count;
  474. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  475. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  476. if (sb->s_flags & MS_SYNCHRONOUS) {
  477. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  478. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  479. }
  480. sb->s_dirt = 1;
  481. UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
  482. return fragment;
  483. }
  484. #define UFS_TEST_FREE_SPACE_CG \
  485. ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
  486. if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
  487. goto cg_found; \
  488. for (k = count; k < uspi->s_fpb; k++) \
  489. if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
  490. goto cg_found;
  491. static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
  492. u64 goal, unsigned count, int *err)
  493. {
  494. struct super_block * sb;
  495. struct ufs_sb_private_info * uspi;
  496. struct ufs_super_block_first * usb1;
  497. struct ufs_cg_private_info * ucpi;
  498. struct ufs_cylinder_group * ucg;
  499. unsigned oldcg, i, j, k, allocsize;
  500. u64 result;
  501. UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
  502. inode->i_ino, cgno, (unsigned long long)goal, count);
  503. sb = inode->i_sb;
  504. uspi = UFS_SB(sb)->s_uspi;
  505. usb1 = ubh_get_usb_first(uspi);
  506. oldcg = cgno;
  507. /*
  508. * 1. searching on preferred cylinder group
  509. */
  510. UFS_TEST_FREE_SPACE_CG
  511. /*
  512. * 2. quadratic rehash
  513. */
  514. for (j = 1; j < uspi->s_ncg; j *= 2) {
  515. cgno += j;
  516. if (cgno >= uspi->s_ncg)
  517. cgno -= uspi->s_ncg;
  518. UFS_TEST_FREE_SPACE_CG
  519. }
  520. /*
  521. * 3. brute force search
  522. * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
  523. */
  524. cgno = (oldcg + 1) % uspi->s_ncg;
  525. for (j = 2; j < uspi->s_ncg; j++) {
  526. cgno++;
  527. if (cgno >= uspi->s_ncg)
  528. cgno = 0;
  529. UFS_TEST_FREE_SPACE_CG
  530. }
  531. UFSD("EXIT (FAILED)\n");
  532. return 0;
  533. cg_found:
  534. ucpi = ufs_load_cylinder (sb, cgno);
  535. if (!ucpi)
  536. return 0;
  537. ucg = ubh_get_ucg (UCPI_UBH(ucpi));
  538. if (!ufs_cg_chkmagic(sb, ucg))
  539. ufs_panic (sb, "ufs_alloc_fragments",
  540. "internal error, bad magic number on cg %u", cgno);
  541. ucg->cg_time = cpu_to_fs32(sb, get_seconds());
  542. if (count == uspi->s_fpb) {
  543. result = ufs_alloccg_block (inode, ucpi, goal, err);
  544. if (result == INVBLOCK)
  545. return 0;
  546. goto succed;
  547. }
  548. for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
  549. if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
  550. break;
  551. if (allocsize == uspi->s_fpb) {
  552. result = ufs_alloccg_block (inode, ucpi, goal, err);
  553. if (result == INVBLOCK)
  554. return 0;
  555. goal = ufs_dtogd(uspi, result);
  556. for (i = count; i < uspi->s_fpb; i++)
  557. ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
  558. i = uspi->s_fpb - count;
  559. DQUOT_FREE_BLOCK(inode, i);
  560. fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
  561. uspi->cs_total.cs_nffree += i;
  562. fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
  563. fs32_add(sb, &ucg->cg_frsum[i], 1);
  564. goto succed;
  565. }
  566. result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
  567. if (result == INVBLOCK)
  568. return 0;
  569. if(DQUOT_ALLOC_BLOCK(inode, count)) {
  570. *err = -EDQUOT;
  571. return 0;
  572. }
  573. for (i = 0; i < count; i++)
  574. ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
  575. fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
  576. uspi->cs_total.cs_nffree -= count;
  577. fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
  578. fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
  579. if (count != allocsize)
  580. fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
  581. succed:
  582. ubh_mark_buffer_dirty (USPI_UBH(uspi));
  583. ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
  584. if (sb->s_flags & MS_SYNCHRONOUS) {
  585. ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
  586. ubh_wait_on_buffer (UCPI_UBH(ucpi));
  587. }
  588. sb->s_dirt = 1;
  589. result += cgno * uspi->s_fpg;
  590. UFSD("EXIT3, result %llu\n", (unsigned long long)result);
  591. return result;
  592. }
  593. static u64 ufs_alloccg_block(struct inode *inode,
  594. struct ufs_cg_private_info *ucpi,
  595. u64 goal, int *err)
  596. {
  597. struct super_block * sb;
  598. struct ufs_sb_private_info * uspi;
  599. struct ufs_super_block_first * usb1;
  600. struct ufs_cylinder_group * ucg;
  601. u64 result, blkno;
  602. UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
  603. sb = inode->i_sb;
  604. uspi = UFS_SB(sb)->s_uspi;
  605. usb1 = ubh_get_usb_first(uspi);
  606. ucg = ubh_get_ucg(UCPI_UBH(ucpi));
  607. if (goal == 0) {
  608. goal = ucpi->c_rotor;
  609. goto norot;
  610. }
  611. goal = ufs_blknum (goal);
  612. goal = ufs_dtogd(uspi, goal);
  613. /*
  614. * If the requested block is available, use it.
  615. */
  616. if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
  617. result = goal;
  618. goto gotit;
  619. }
  620. norot:
  621. result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
  622. if (result == INVBLOCK)
  623. return INVBLOCK;
  624. ucpi->c_rotor = result;
  625. gotit:
  626. blkno = ufs_fragstoblks(result);
  627. ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
  628. if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
  629. ufs_clusteracct (sb, ucpi, blkno, -1);
  630. if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
  631. *err = -EDQUOT;
  632. return INVBLOCK;
  633. }
  634. fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
  635. uspi->cs_total.cs_nbfree--;
  636. fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
  637. if (uspi->fs_magic != UFS2_MAGIC) {
  638. unsigned cylno = ufs_cbtocylno((unsigned)result);
  639. fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
  640. ufs_cbtorpos((unsigned)result)), 1);
  641. fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
  642. }
  643. UFSD("EXIT, result %llu\n", (unsigned long long)result);
  644. return result;
  645. }
  646. static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
  647. struct ufs_buffer_head *ubh,
  648. unsigned begin, unsigned size,
  649. unsigned char *table, unsigned char mask)
  650. {
  651. unsigned rest, offset;
  652. unsigned char *cp;
  653. offset = begin & ~uspi->s_fmask;
  654. begin >>= uspi->s_fshift;
  655. for (;;) {
  656. if ((offset + size) < uspi->s_fsize)
  657. rest = size;
  658. else
  659. rest = uspi->s_fsize - offset;
  660. size -= rest;
  661. cp = ubh->bh[begin]->b_data + offset;
  662. while ((table[*cp++] & mask) == 0 && --rest)
  663. ;
  664. if (rest || !size)
  665. break;
  666. begin++;
  667. offset = 0;
  668. }
  669. return (size + rest);
  670. }
  671. /*
  672. * Find a block of the specified size in the specified cylinder group.
  673. * @sp: pointer to super block
  674. * @ucpi: pointer to cylinder group info
  675. * @goal: near which block we want find new one
  676. * @count: specified size
  677. */
  678. static u64 ufs_bitmap_search(struct super_block *sb,
  679. struct ufs_cg_private_info *ucpi,
  680. u64 goal, unsigned count)
  681. {
  682. /*
  683. * Bit patterns for identifying fragments in the block map
  684. * used as ((map & mask_arr) == want_arr)
  685. */
  686. static const int mask_arr[9] = {
  687. 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
  688. };
  689. static const int want_arr[9] = {
  690. 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
  691. };
  692. struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
  693. struct ufs_super_block_first *usb1;
  694. struct ufs_cylinder_group *ucg;
  695. unsigned start, length, loc;
  696. unsigned pos, want, blockmap, mask, end;
  697. u64 result;
  698. UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
  699. (unsigned long long)goal, count);
  700. usb1 = ubh_get_usb_first (uspi);
  701. ucg = ubh_get_ucg(UCPI_UBH(ucpi));
  702. if (goal)
  703. start = ufs_dtogd(uspi, goal) >> 3;
  704. else
  705. start = ucpi->c_frotor >> 3;
  706. length = ((uspi->s_fpg + 7) >> 3) - start;
  707. loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
  708. (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
  709. 1 << (count - 1 + (uspi->s_fpb & 7)));
  710. if (loc == 0) {
  711. length = start + 1;
  712. loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
  713. (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
  714. ufs_fragtable_other,
  715. 1 << (count - 1 + (uspi->s_fpb & 7)));
  716. if (loc == 0) {
  717. ufs_error(sb, "ufs_bitmap_search",
  718. "bitmap corrupted on cg %u, start %u,"
  719. " length %u, count %u, freeoff %u\n",
  720. ucpi->c_cgx, start, length, count,
  721. ucpi->c_freeoff);
  722. return INVBLOCK;
  723. }
  724. start = 0;
  725. }
  726. result = (start + length - loc) << 3;
  727. ucpi->c_frotor = result;
  728. /*
  729. * found the byte in the map
  730. */
  731. for (end = result + 8; result < end; result += uspi->s_fpb) {
  732. blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
  733. blockmap <<= 1;
  734. mask = mask_arr[count];
  735. want = want_arr[count];
  736. for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
  737. if ((blockmap & mask) == want) {
  738. UFSD("EXIT, result %llu\n",
  739. (unsigned long long)result);
  740. return result + pos;
  741. }
  742. mask <<= 1;
  743. want <<= 1;
  744. }
  745. }
  746. ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
  747. ucpi->c_cgx);
  748. UFSD("EXIT (FAILED)\n");
  749. return INVBLOCK;
  750. }
  751. static void ufs_clusteracct(struct super_block * sb,
  752. struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
  753. {
  754. struct ufs_sb_private_info * uspi;
  755. int i, start, end, forw, back;
  756. uspi = UFS_SB(sb)->s_uspi;
  757. if (uspi->s_contigsumsize <= 0)
  758. return;
  759. if (cnt > 0)
  760. ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
  761. else
  762. ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
  763. /*
  764. * Find the size of the cluster going forward.
  765. */
  766. start = blkno + 1;
  767. end = start + uspi->s_contigsumsize;
  768. if ( end >= ucpi->c_nclusterblks)
  769. end = ucpi->c_nclusterblks;
  770. i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
  771. if (i > end)
  772. i = end;
  773. forw = i - start;
  774. /*
  775. * Find the size of the cluster going backward.
  776. */
  777. start = blkno - 1;
  778. end = start - uspi->s_contigsumsize;
  779. if (end < 0 )
  780. end = -1;
  781. i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
  782. if ( i < end)
  783. i = end;
  784. back = start - i;
  785. /*
  786. * Account for old cluster and the possibly new forward and
  787. * back clusters.
  788. */
  789. i = back + forw + 1;
  790. if (i > uspi->s_contigsumsize)
  791. i = uspi->s_contigsumsize;
  792. fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
  793. if (back > 0)
  794. fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
  795. if (forw > 0)
  796. fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
  797. }
  798. static unsigned char ufs_fragtable_8fpb[] = {
  799. 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
  800. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
  801. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  802. 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
  803. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  804. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
  805. 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
  806. 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
  807. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  808. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
  809. 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
  810. 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
  811. 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
  812. 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
  813. 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
  814. 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
  815. };
  816. static unsigned char ufs_fragtable_other[] = {
  817. 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
  818. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  819. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  820. 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
  821. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  822. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  823. 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
  824. 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
  825. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  826. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  827. 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
  828. 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
  829. 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
  830. 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
  831. 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
  832. 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
  833. };