balloc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. /*
  2. * balloc.c
  3. *
  4. * PURPOSE
  5. * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * COPYRIGHT
  8. * This file is distributed under the terms of the GNU General Public
  9. * License (GPL). Copies of the GPL can be obtained from:
  10. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  11. * Each contributing author retains all rights to their own work.
  12. *
  13. * (C) 1999-2001 Ben Fennema
  14. * (C) 1999 Stelias Computing Inc
  15. *
  16. * HISTORY
  17. *
  18. * 02/24/99 blf Created.
  19. *
  20. */
  21. #include "udfdecl.h"
  22. #include <linux/quotaops.h>
  23. #include <linux/buffer_head.h>
  24. #include <linux/bitops.h>
  25. #include "udf_i.h"
  26. #include "udf_sb.h"
  27. #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
  28. #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
  29. #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
  30. #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
  31. #define udf_find_next_one_bit(addr, size, offset) \
  32. find_next_one_bit(addr, size, offset)
  33. #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
  34. #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
  35. #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
  36. #define uintBPL_t uint(BITS_PER_LONG)
  37. #define uint(x) xuint(x)
  38. #define xuint(x) __le ## x
  39. static inline int find_next_one_bit(void *addr, int size, int offset)
  40. {
  41. uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
  42. int result = offset & ~(BITS_PER_LONG - 1);
  43. unsigned long tmp;
  44. if (offset >= size)
  45. return size;
  46. size -= result;
  47. offset &= (BITS_PER_LONG - 1);
  48. if (offset) {
  49. tmp = leBPL_to_cpup(p++);
  50. tmp &= ~0UL << offset;
  51. if (size < BITS_PER_LONG)
  52. goto found_first;
  53. if (tmp)
  54. goto found_middle;
  55. size -= BITS_PER_LONG;
  56. result += BITS_PER_LONG;
  57. }
  58. while (size & ~(BITS_PER_LONG - 1)) {
  59. tmp = leBPL_to_cpup(p++);
  60. if (tmp)
  61. goto found_middle;
  62. result += BITS_PER_LONG;
  63. size -= BITS_PER_LONG;
  64. }
  65. if (!size)
  66. return result;
  67. tmp = leBPL_to_cpup(p);
  68. found_first:
  69. tmp &= ~0UL >> (BITS_PER_LONG - size);
  70. found_middle:
  71. return result + ffz(~tmp);
  72. }
  73. #define find_first_one_bit(addr, size)\
  74. find_next_one_bit((addr), (size), 0)
  75. static int read_block_bitmap(struct super_block *sb,
  76. struct udf_bitmap *bitmap, unsigned int block,
  77. unsigned long bitmap_nr)
  78. {
  79. struct buffer_head *bh = NULL;
  80. int retval = 0;
  81. kernel_lb_addr loc;
  82. loc.logicalBlockNum = bitmap->s_extPosition;
  83. loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
  84. bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
  85. if (!bh)
  86. retval = -EIO;
  87. bitmap->s_block_bitmap[bitmap_nr] = bh;
  88. return retval;
  89. }
  90. static int __load_block_bitmap(struct super_block *sb,
  91. struct udf_bitmap *bitmap,
  92. unsigned int block_group)
  93. {
  94. int retval = 0;
  95. int nr_groups = bitmap->s_nr_groups;
  96. if (block_group >= nr_groups) {
  97. udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
  98. nr_groups);
  99. }
  100. if (bitmap->s_block_bitmap[block_group]) {
  101. return block_group;
  102. } else {
  103. retval = read_block_bitmap(sb, bitmap, block_group,
  104. block_group);
  105. if (retval < 0)
  106. return retval;
  107. return block_group;
  108. }
  109. }
  110. static inline int load_block_bitmap(struct super_block *sb,
  111. struct udf_bitmap *bitmap,
  112. unsigned int block_group)
  113. {
  114. int slot;
  115. slot = __load_block_bitmap(sb, bitmap, block_group);
  116. if (slot < 0)
  117. return slot;
  118. if (!bitmap->s_block_bitmap[slot])
  119. return -EIO;
  120. return slot;
  121. }
  122. static bool udf_add_free_space(struct udf_sb_info *sbi,
  123. u16 partition, u32 cnt)
  124. {
  125. struct logicalVolIntegrityDesc *lvid;
  126. if (sbi->s_lvid_bh == NULL)
  127. return false;
  128. lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
  129. le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
  130. return true;
  131. }
  132. static void udf_bitmap_free_blocks(struct super_block *sb,
  133. struct inode *inode,
  134. struct udf_bitmap *bitmap,
  135. kernel_lb_addr bloc, uint32_t offset,
  136. uint32_t count)
  137. {
  138. struct udf_sb_info *sbi = UDF_SB(sb);
  139. struct buffer_head *bh = NULL;
  140. unsigned long block;
  141. unsigned long block_group;
  142. unsigned long bit;
  143. unsigned long i;
  144. int bitmap_nr;
  145. unsigned long overflow;
  146. mutex_lock(&sbi->s_alloc_mutex);
  147. if (bloc.logicalBlockNum < 0 ||
  148. (bloc.logicalBlockNum + count) >
  149. sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
  150. udf_debug("%d < %d || %d + %d > %d\n",
  151. bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
  152. sbi->s_partmaps[bloc.partitionReferenceNum].
  153. s_partition_len);
  154. goto error_return;
  155. }
  156. block = bloc.logicalBlockNum + offset +
  157. (sizeof(struct spaceBitmapDesc) << 3);
  158. do {
  159. overflow = 0;
  160. block_group = block >> (sb->s_blocksize_bits + 3);
  161. bit = block % (sb->s_blocksize << 3);
  162. /*
  163. * Check to see if we are freeing blocks across a group boundary.
  164. */
  165. if (bit + count > (sb->s_blocksize << 3)) {
  166. overflow = bit + count - (sb->s_blocksize << 3);
  167. count -= overflow;
  168. }
  169. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  170. if (bitmap_nr < 0)
  171. goto error_return;
  172. bh = bitmap->s_block_bitmap[bitmap_nr];
  173. for (i = 0; i < count; i++) {
  174. if (udf_set_bit(bit + i, bh->b_data)) {
  175. udf_debug("bit %ld already set\n", bit + i);
  176. udf_debug("byte=%2x\n",
  177. ((char *)bh->b_data)[(bit + i) >> 3]);
  178. } else {
  179. if (inode)
  180. DQUOT_FREE_BLOCK(inode, 1);
  181. udf_add_free_space(sbi, sbi->s_partition, 1);
  182. }
  183. }
  184. mark_buffer_dirty(bh);
  185. if (overflow) {
  186. block += count;
  187. count = overflow;
  188. }
  189. } while (overflow);
  190. error_return:
  191. sb->s_dirt = 1;
  192. if (sbi->s_lvid_bh)
  193. mark_buffer_dirty(sbi->s_lvid_bh);
  194. mutex_unlock(&sbi->s_alloc_mutex);
  195. }
  196. static int udf_bitmap_prealloc_blocks(struct super_block *sb,
  197. struct inode *inode,
  198. struct udf_bitmap *bitmap,
  199. uint16_t partition, uint32_t first_block,
  200. uint32_t block_count)
  201. {
  202. struct udf_sb_info *sbi = UDF_SB(sb);
  203. int alloc_count = 0;
  204. int bit, block, block_group, group_start;
  205. int nr_groups, bitmap_nr;
  206. struct buffer_head *bh;
  207. __u32 part_len;
  208. mutex_lock(&sbi->s_alloc_mutex);
  209. part_len = sbi->s_partmaps[partition].s_partition_len;
  210. if (first_block < 0 || first_block >= part_len)
  211. goto out;
  212. if (first_block + block_count > part_len)
  213. block_count = part_len - first_block;
  214. do {
  215. nr_groups = udf_compute_nr_groups(sb, partition);
  216. block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
  217. block_group = block >> (sb->s_blocksize_bits + 3);
  218. group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
  219. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  220. if (bitmap_nr < 0)
  221. goto out;
  222. bh = bitmap->s_block_bitmap[bitmap_nr];
  223. bit = block % (sb->s_blocksize << 3);
  224. while (bit < (sb->s_blocksize << 3) && block_count > 0) {
  225. if (!udf_test_bit(bit, bh->b_data))
  226. goto out;
  227. else if (DQUOT_PREALLOC_BLOCK(inode, 1))
  228. goto out;
  229. else if (!udf_clear_bit(bit, bh->b_data)) {
  230. udf_debug("bit already cleared for block %d\n", bit);
  231. DQUOT_FREE_BLOCK(inode, 1);
  232. goto out;
  233. }
  234. block_count--;
  235. alloc_count++;
  236. bit++;
  237. block++;
  238. }
  239. mark_buffer_dirty(bh);
  240. } while (block_count > 0);
  241. out:
  242. if (udf_add_free_space(sbi, partition, -alloc_count))
  243. mark_buffer_dirty(sbi->s_lvid_bh);
  244. sb->s_dirt = 1;
  245. mutex_unlock(&sbi->s_alloc_mutex);
  246. return alloc_count;
  247. }
  248. static int udf_bitmap_new_block(struct super_block *sb,
  249. struct inode *inode,
  250. struct udf_bitmap *bitmap, uint16_t partition,
  251. uint32_t goal, int *err)
  252. {
  253. struct udf_sb_info *sbi = UDF_SB(sb);
  254. int newbit, bit = 0, block, block_group, group_start;
  255. int end_goal, nr_groups, bitmap_nr, i;
  256. struct buffer_head *bh = NULL;
  257. char *ptr;
  258. int newblock = 0;
  259. *err = -ENOSPC;
  260. mutex_lock(&sbi->s_alloc_mutex);
  261. repeat:
  262. if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
  263. goal = 0;
  264. nr_groups = bitmap->s_nr_groups;
  265. block = goal + (sizeof(struct spaceBitmapDesc) << 3);
  266. block_group = block >> (sb->s_blocksize_bits + 3);
  267. group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
  268. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  269. if (bitmap_nr < 0)
  270. goto error_return;
  271. bh = bitmap->s_block_bitmap[bitmap_nr];
  272. ptr = memscan((char *)bh->b_data + group_start, 0xFF,
  273. sb->s_blocksize - group_start);
  274. if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
  275. bit = block % (sb->s_blocksize << 3);
  276. if (udf_test_bit(bit, bh->b_data))
  277. goto got_block;
  278. end_goal = (bit + 63) & ~63;
  279. bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
  280. if (bit < end_goal)
  281. goto got_block;
  282. ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
  283. sb->s_blocksize - ((bit + 7) >> 3));
  284. newbit = (ptr - ((char *)bh->b_data)) << 3;
  285. if (newbit < sb->s_blocksize << 3) {
  286. bit = newbit;
  287. goto search_back;
  288. }
  289. newbit = udf_find_next_one_bit(bh->b_data,
  290. sb->s_blocksize << 3, bit);
  291. if (newbit < sb->s_blocksize << 3) {
  292. bit = newbit;
  293. goto got_block;
  294. }
  295. }
  296. for (i = 0; i < (nr_groups * 2); i++) {
  297. block_group++;
  298. if (block_group >= nr_groups)
  299. block_group = 0;
  300. group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
  301. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  302. if (bitmap_nr < 0)
  303. goto error_return;
  304. bh = bitmap->s_block_bitmap[bitmap_nr];
  305. if (i < nr_groups) {
  306. ptr = memscan((char *)bh->b_data + group_start, 0xFF,
  307. sb->s_blocksize - group_start);
  308. if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
  309. bit = (ptr - ((char *)bh->b_data)) << 3;
  310. break;
  311. }
  312. } else {
  313. bit = udf_find_next_one_bit((char *)bh->b_data,
  314. sb->s_blocksize << 3,
  315. group_start << 3);
  316. if (bit < sb->s_blocksize << 3)
  317. break;
  318. }
  319. }
  320. if (i >= (nr_groups * 2)) {
  321. mutex_unlock(&sbi->s_alloc_mutex);
  322. return newblock;
  323. }
  324. if (bit < sb->s_blocksize << 3)
  325. goto search_back;
  326. else
  327. bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
  328. group_start << 3);
  329. if (bit >= sb->s_blocksize << 3) {
  330. mutex_unlock(&sbi->s_alloc_mutex);
  331. return 0;
  332. }
  333. search_back:
  334. i = 0;
  335. while (i < 7 && bit > (group_start << 3) &&
  336. udf_test_bit(bit - 1, bh->b_data)) {
  337. ++i;
  338. --bit;
  339. }
  340. got_block:
  341. /*
  342. * Check quota for allocation of this block.
  343. */
  344. if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
  345. mutex_unlock(&sbi->s_alloc_mutex);
  346. *err = -EDQUOT;
  347. return 0;
  348. }
  349. newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
  350. (sizeof(struct spaceBitmapDesc) << 3);
  351. if (!udf_clear_bit(bit, bh->b_data)) {
  352. udf_debug("bit already cleared for block %d\n", bit);
  353. goto repeat;
  354. }
  355. mark_buffer_dirty(bh);
  356. if (udf_add_free_space(sbi, partition, -1))
  357. mark_buffer_dirty(sbi->s_lvid_bh);
  358. sb->s_dirt = 1;
  359. mutex_unlock(&sbi->s_alloc_mutex);
  360. *err = 0;
  361. return newblock;
  362. error_return:
  363. *err = -EIO;
  364. mutex_unlock(&sbi->s_alloc_mutex);
  365. return 0;
  366. }
  367. static void udf_table_free_blocks(struct super_block *sb,
  368. struct inode *inode,
  369. struct inode *table,
  370. kernel_lb_addr bloc, uint32_t offset,
  371. uint32_t count)
  372. {
  373. struct udf_sb_info *sbi = UDF_SB(sb);
  374. uint32_t start, end;
  375. uint32_t elen;
  376. kernel_lb_addr eloc;
  377. struct extent_position oepos, epos;
  378. int8_t etype;
  379. int i;
  380. struct udf_inode_info *iinfo;
  381. mutex_lock(&sbi->s_alloc_mutex);
  382. if (bloc.logicalBlockNum < 0 ||
  383. (bloc.logicalBlockNum + count) >
  384. sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
  385. udf_debug("%d < %d || %d + %d > %d\n",
  386. bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
  387. sbi->s_partmaps[bloc.partitionReferenceNum].
  388. s_partition_len);
  389. goto error_return;
  390. }
  391. iinfo = UDF_I(table);
  392. /* We do this up front - There are some error conditions that
  393. could occure, but.. oh well */
  394. if (inode)
  395. DQUOT_FREE_BLOCK(inode, count);
  396. if (udf_add_free_space(sbi, sbi->s_partition, count))
  397. mark_buffer_dirty(sbi->s_lvid_bh);
  398. start = bloc.logicalBlockNum + offset;
  399. end = bloc.logicalBlockNum + offset + count - 1;
  400. epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
  401. elen = 0;
  402. epos.block = oepos.block = iinfo->i_location;
  403. epos.bh = oepos.bh = NULL;
  404. while (count &&
  405. (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
  406. if (((eloc.logicalBlockNum +
  407. (elen >> sb->s_blocksize_bits)) == start)) {
  408. if ((0x3FFFFFFF - elen) <
  409. (count << sb->s_blocksize_bits)) {
  410. uint32_t tmp = ((0x3FFFFFFF - elen) >>
  411. sb->s_blocksize_bits);
  412. count -= tmp;
  413. start += tmp;
  414. elen = (etype << 30) |
  415. (0x40000000 - sb->s_blocksize);
  416. } else {
  417. elen = (etype << 30) |
  418. (elen +
  419. (count << sb->s_blocksize_bits));
  420. start += count;
  421. count = 0;
  422. }
  423. udf_write_aext(table, &oepos, eloc, elen, 1);
  424. } else if (eloc.logicalBlockNum == (end + 1)) {
  425. if ((0x3FFFFFFF - elen) <
  426. (count << sb->s_blocksize_bits)) {
  427. uint32_t tmp = ((0x3FFFFFFF - elen) >>
  428. sb->s_blocksize_bits);
  429. count -= tmp;
  430. end -= tmp;
  431. eloc.logicalBlockNum -= tmp;
  432. elen = (etype << 30) |
  433. (0x40000000 - sb->s_blocksize);
  434. } else {
  435. eloc.logicalBlockNum = start;
  436. elen = (etype << 30) |
  437. (elen +
  438. (count << sb->s_blocksize_bits));
  439. end -= count;
  440. count = 0;
  441. }
  442. udf_write_aext(table, &oepos, eloc, elen, 1);
  443. }
  444. if (epos.bh != oepos.bh) {
  445. i = -1;
  446. oepos.block = epos.block;
  447. brelse(oepos.bh);
  448. get_bh(epos.bh);
  449. oepos.bh = epos.bh;
  450. oepos.offset = 0;
  451. } else {
  452. oepos.offset = epos.offset;
  453. }
  454. }
  455. if (count) {
  456. /*
  457. * NOTE: we CANNOT use udf_add_aext here, as it can try to
  458. * allocate a new block, and since we hold the super block
  459. * lock already very bad things would happen :)
  460. *
  461. * We copy the behavior of udf_add_aext, but instead of
  462. * trying to allocate a new block close to the existing one,
  463. * we just steal a block from the extent we are trying to add.
  464. *
  465. * It would be nice if the blocks were close together, but it
  466. * isn't required.
  467. */
  468. int adsize;
  469. short_ad *sad = NULL;
  470. long_ad *lad = NULL;
  471. struct allocExtDesc *aed;
  472. eloc.logicalBlockNum = start;
  473. elen = EXT_RECORDED_ALLOCATED |
  474. (count << sb->s_blocksize_bits);
  475. if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
  476. adsize = sizeof(short_ad);
  477. else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
  478. adsize = sizeof(long_ad);
  479. else {
  480. brelse(oepos.bh);
  481. brelse(epos.bh);
  482. goto error_return;
  483. }
  484. if (epos.offset + (2 * adsize) > sb->s_blocksize) {
  485. char *sptr, *dptr;
  486. int loffset;
  487. brelse(oepos.bh);
  488. oepos = epos;
  489. /* Steal a block from the extent being free'd */
  490. epos.block.logicalBlockNum = eloc.logicalBlockNum;
  491. eloc.logicalBlockNum++;
  492. elen -= sb->s_blocksize;
  493. epos.bh = udf_tread(sb,
  494. udf_get_lb_pblock(sb, epos.block, 0));
  495. if (!epos.bh) {
  496. brelse(oepos.bh);
  497. goto error_return;
  498. }
  499. aed = (struct allocExtDesc *)(epos.bh->b_data);
  500. aed->previousAllocExtLocation =
  501. cpu_to_le32(oepos.block.logicalBlockNum);
  502. if (epos.offset + adsize > sb->s_blocksize) {
  503. loffset = epos.offset;
  504. aed->lengthAllocDescs = cpu_to_le32(adsize);
  505. sptr = iinfo->i_ext.i_data + epos.offset
  506. - adsize;
  507. dptr = epos.bh->b_data +
  508. sizeof(struct allocExtDesc);
  509. memcpy(dptr, sptr, adsize);
  510. epos.offset = sizeof(struct allocExtDesc) +
  511. adsize;
  512. } else {
  513. loffset = epos.offset + adsize;
  514. aed->lengthAllocDescs = cpu_to_le32(0);
  515. if (oepos.bh) {
  516. sptr = oepos.bh->b_data + epos.offset;
  517. aed = (struct allocExtDesc *)
  518. oepos.bh->b_data;
  519. le32_add_cpu(&aed->lengthAllocDescs,
  520. adsize);
  521. } else {
  522. sptr = iinfo->i_ext.i_data +
  523. epos.offset;
  524. iinfo->i_lenAlloc += adsize;
  525. mark_inode_dirty(table);
  526. }
  527. epos.offset = sizeof(struct allocExtDesc);
  528. }
  529. if (sbi->s_udfrev >= 0x0200)
  530. udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
  531. 3, 1, epos.block.logicalBlockNum,
  532. sizeof(tag));
  533. else
  534. udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
  535. 2, 1, epos.block.logicalBlockNum,
  536. sizeof(tag));
  537. switch (iinfo->i_alloc_type) {
  538. case ICBTAG_FLAG_AD_SHORT:
  539. sad = (short_ad *)sptr;
  540. sad->extLength = cpu_to_le32(
  541. EXT_NEXT_EXTENT_ALLOCDECS |
  542. sb->s_blocksize);
  543. sad->extPosition =
  544. cpu_to_le32(epos.block.logicalBlockNum);
  545. break;
  546. case ICBTAG_FLAG_AD_LONG:
  547. lad = (long_ad *)sptr;
  548. lad->extLength = cpu_to_le32(
  549. EXT_NEXT_EXTENT_ALLOCDECS |
  550. sb->s_blocksize);
  551. lad->extLocation =
  552. cpu_to_lelb(epos.block);
  553. break;
  554. }
  555. if (oepos.bh) {
  556. udf_update_tag(oepos.bh->b_data, loffset);
  557. mark_buffer_dirty(oepos.bh);
  558. } else {
  559. mark_inode_dirty(table);
  560. }
  561. }
  562. /* It's possible that stealing the block emptied the extent */
  563. if (elen) {
  564. udf_write_aext(table, &epos, eloc, elen, 1);
  565. if (!epos.bh) {
  566. iinfo->i_lenAlloc += adsize;
  567. mark_inode_dirty(table);
  568. } else {
  569. aed = (struct allocExtDesc *)epos.bh->b_data;
  570. le32_add_cpu(&aed->lengthAllocDescs, adsize);
  571. udf_update_tag(epos.bh->b_data, epos.offset);
  572. mark_buffer_dirty(epos.bh);
  573. }
  574. }
  575. }
  576. brelse(epos.bh);
  577. brelse(oepos.bh);
  578. error_return:
  579. sb->s_dirt = 1;
  580. mutex_unlock(&sbi->s_alloc_mutex);
  581. return;
  582. }
  583. static int udf_table_prealloc_blocks(struct super_block *sb,
  584. struct inode *inode,
  585. struct inode *table, uint16_t partition,
  586. uint32_t first_block, uint32_t block_count)
  587. {
  588. struct udf_sb_info *sbi = UDF_SB(sb);
  589. int alloc_count = 0;
  590. uint32_t elen, adsize;
  591. kernel_lb_addr eloc;
  592. struct extent_position epos;
  593. int8_t etype = -1;
  594. struct udf_inode_info *iinfo;
  595. if (first_block < 0 ||
  596. first_block >= sbi->s_partmaps[partition].s_partition_len)
  597. return 0;
  598. iinfo = UDF_I(table);
  599. if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
  600. adsize = sizeof(short_ad);
  601. else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
  602. adsize = sizeof(long_ad);
  603. else
  604. return 0;
  605. mutex_lock(&sbi->s_alloc_mutex);
  606. epos.offset = sizeof(struct unallocSpaceEntry);
  607. epos.block = iinfo->i_location;
  608. epos.bh = NULL;
  609. eloc.logicalBlockNum = 0xFFFFFFFF;
  610. while (first_block != eloc.logicalBlockNum &&
  611. (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
  612. udf_debug("eloc=%d, elen=%d, first_block=%d\n",
  613. eloc.logicalBlockNum, elen, first_block);
  614. ; /* empty loop body */
  615. }
  616. if (first_block == eloc.logicalBlockNum) {
  617. epos.offset -= adsize;
  618. alloc_count = (elen >> sb->s_blocksize_bits);
  619. if (inode && DQUOT_PREALLOC_BLOCK(inode,
  620. alloc_count > block_count ? block_count : alloc_count))
  621. alloc_count = 0;
  622. else if (alloc_count > block_count) {
  623. alloc_count = block_count;
  624. eloc.logicalBlockNum += alloc_count;
  625. elen -= (alloc_count << sb->s_blocksize_bits);
  626. udf_write_aext(table, &epos, eloc,
  627. (etype << 30) | elen, 1);
  628. } else
  629. udf_delete_aext(table, epos, eloc,
  630. (etype << 30) | elen);
  631. } else {
  632. alloc_count = 0;
  633. }
  634. brelse(epos.bh);
  635. if (alloc_count && udf_add_free_space(sbi, partition, -alloc_count)) {
  636. mark_buffer_dirty(sbi->s_lvid_bh);
  637. sb->s_dirt = 1;
  638. }
  639. mutex_unlock(&sbi->s_alloc_mutex);
  640. return alloc_count;
  641. }
  642. static int udf_table_new_block(struct super_block *sb,
  643. struct inode *inode,
  644. struct inode *table, uint16_t partition,
  645. uint32_t goal, int *err)
  646. {
  647. struct udf_sb_info *sbi = UDF_SB(sb);
  648. uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
  649. uint32_t newblock = 0, adsize;
  650. uint32_t elen, goal_elen = 0;
  651. kernel_lb_addr eloc, uninitialized_var(goal_eloc);
  652. struct extent_position epos, goal_epos;
  653. int8_t etype;
  654. struct udf_inode_info *iinfo = UDF_I(table);
  655. *err = -ENOSPC;
  656. if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
  657. adsize = sizeof(short_ad);
  658. else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
  659. adsize = sizeof(long_ad);
  660. else
  661. return newblock;
  662. mutex_lock(&sbi->s_alloc_mutex);
  663. if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
  664. goal = 0;
  665. /* We search for the closest matching block to goal. If we find
  666. a exact hit, we stop. Otherwise we keep going till we run out
  667. of extents. We store the buffer_head, bloc, and extoffset
  668. of the current closest match and use that when we are done.
  669. */
  670. epos.offset = sizeof(struct unallocSpaceEntry);
  671. epos.block = iinfo->i_location;
  672. epos.bh = goal_epos.bh = NULL;
  673. while (spread &&
  674. (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
  675. if (goal >= eloc.logicalBlockNum) {
  676. if (goal < eloc.logicalBlockNum +
  677. (elen >> sb->s_blocksize_bits))
  678. nspread = 0;
  679. else
  680. nspread = goal - eloc.logicalBlockNum -
  681. (elen >> sb->s_blocksize_bits);
  682. } else {
  683. nspread = eloc.logicalBlockNum - goal;
  684. }
  685. if (nspread < spread) {
  686. spread = nspread;
  687. if (goal_epos.bh != epos.bh) {
  688. brelse(goal_epos.bh);
  689. goal_epos.bh = epos.bh;
  690. get_bh(goal_epos.bh);
  691. }
  692. goal_epos.block = epos.block;
  693. goal_epos.offset = epos.offset - adsize;
  694. goal_eloc = eloc;
  695. goal_elen = (etype << 30) | elen;
  696. }
  697. }
  698. brelse(epos.bh);
  699. if (spread == 0xFFFFFFFF) {
  700. brelse(goal_epos.bh);
  701. mutex_unlock(&sbi->s_alloc_mutex);
  702. return 0;
  703. }
  704. /* Only allocate blocks from the beginning of the extent.
  705. That way, we only delete (empty) extents, never have to insert an
  706. extent because of splitting */
  707. /* This works, but very poorly.... */
  708. newblock = goal_eloc.logicalBlockNum;
  709. goal_eloc.logicalBlockNum++;
  710. goal_elen -= sb->s_blocksize;
  711. if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
  712. brelse(goal_epos.bh);
  713. mutex_unlock(&sbi->s_alloc_mutex);
  714. *err = -EDQUOT;
  715. return 0;
  716. }
  717. if (goal_elen)
  718. udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
  719. else
  720. udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
  721. brelse(goal_epos.bh);
  722. if (udf_add_free_space(sbi, partition, -1))
  723. mark_buffer_dirty(sbi->s_lvid_bh);
  724. sb->s_dirt = 1;
  725. mutex_unlock(&sbi->s_alloc_mutex);
  726. *err = 0;
  727. return newblock;
  728. }
  729. inline void udf_free_blocks(struct super_block *sb,
  730. struct inode *inode,
  731. kernel_lb_addr bloc, uint32_t offset,
  732. uint32_t count)
  733. {
  734. uint16_t partition = bloc.partitionReferenceNum;
  735. struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
  736. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
  737. return udf_bitmap_free_blocks(sb, inode,
  738. map->s_uspace.s_bitmap,
  739. bloc, offset, count);
  740. } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
  741. return udf_table_free_blocks(sb, inode,
  742. map->s_uspace.s_table,
  743. bloc, offset, count);
  744. } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
  745. return udf_bitmap_free_blocks(sb, inode,
  746. map->s_fspace.s_bitmap,
  747. bloc, offset, count);
  748. } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
  749. return udf_table_free_blocks(sb, inode,
  750. map->s_fspace.s_table,
  751. bloc, offset, count);
  752. } else {
  753. return;
  754. }
  755. }
  756. inline int udf_prealloc_blocks(struct super_block *sb,
  757. struct inode *inode,
  758. uint16_t partition, uint32_t first_block,
  759. uint32_t block_count)
  760. {
  761. struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
  762. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
  763. return udf_bitmap_prealloc_blocks(sb, inode,
  764. map->s_uspace.s_bitmap,
  765. partition, first_block,
  766. block_count);
  767. else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
  768. return udf_table_prealloc_blocks(sb, inode,
  769. map->s_uspace.s_table,
  770. partition, first_block,
  771. block_count);
  772. else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
  773. return udf_bitmap_prealloc_blocks(sb, inode,
  774. map->s_fspace.s_bitmap,
  775. partition, first_block,
  776. block_count);
  777. else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
  778. return udf_table_prealloc_blocks(sb, inode,
  779. map->s_fspace.s_table,
  780. partition, first_block,
  781. block_count);
  782. else
  783. return 0;
  784. }
  785. inline int udf_new_block(struct super_block *sb,
  786. struct inode *inode,
  787. uint16_t partition, uint32_t goal, int *err)
  788. {
  789. struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
  790. if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
  791. return udf_bitmap_new_block(sb, inode,
  792. map->s_uspace.s_bitmap,
  793. partition, goal, err);
  794. else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
  795. return udf_table_new_block(sb, inode,
  796. map->s_uspace.s_table,
  797. partition, goal, err);
  798. else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
  799. return udf_bitmap_new_block(sb, inode,
  800. map->s_fspace.s_bitmap,
  801. partition, goal, err);
  802. else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
  803. return udf_table_new_block(sb, inode,
  804. map->s_fspace.s_table,
  805. partition, goal, err);
  806. else {
  807. *err = -EIO;
  808. return 0;
  809. }
  810. }