balloc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * balloc.c
  3. *
  4. * PURPOSE
  5. * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * COPYRIGHT
  8. * This file is distributed under the terms of the GNU General Public
  9. * License (GPL). Copies of the GPL can be obtained from:
  10. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  11. * Each contributing author retains all rights to their own work.
  12. *
  13. * (C) 1999-2001 Ben Fennema
  14. * (C) 1999 Stelias Computing Inc
  15. *
  16. * HISTORY
  17. *
  18. * 02/24/99 blf Created.
  19. *
  20. */
  21. #include "udfdecl.h"
  22. #include <linux/quotaops.h>
  23. #include <linux/buffer_head.h>
  24. #include <linux/bitops.h>
  25. #include "udf_i.h"
  26. #include "udf_sb.h"
  27. #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
  28. #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
  29. #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
  30. #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
  31. #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
  32. #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
  33. #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
  34. #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
  35. #define uintBPL_t uint(BITS_PER_LONG)
  36. #define uint(x) xuint(x)
  37. #define xuint(x) __le ## x
  38. static inline int find_next_one_bit(void *addr, int size, int offset)
  39. {
  40. uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
  41. int result = offset & ~(BITS_PER_LONG - 1);
  42. unsigned long tmp;
  43. if (offset >= size)
  44. return size;
  45. size -= result;
  46. offset &= (BITS_PER_LONG - 1);
  47. if (offset) {
  48. tmp = leBPL_to_cpup(p++);
  49. tmp &= ~0UL << offset;
  50. if (size < BITS_PER_LONG)
  51. goto found_first;
  52. if (tmp)
  53. goto found_middle;
  54. size -= BITS_PER_LONG;
  55. result += BITS_PER_LONG;
  56. }
  57. while (size & ~(BITS_PER_LONG - 1)) {
  58. if ((tmp = leBPL_to_cpup(p++)))
  59. goto found_middle;
  60. result += BITS_PER_LONG;
  61. size -= BITS_PER_LONG;
  62. }
  63. if (!size)
  64. return result;
  65. tmp = leBPL_to_cpup(p);
  66. found_first:
  67. tmp &= ~0UL >> (BITS_PER_LONG - size);
  68. found_middle:
  69. return result + ffz(~tmp);
  70. }
  71. #define find_first_one_bit(addr, size)\
  72. find_next_one_bit((addr), (size), 0)
  73. static int read_block_bitmap(struct super_block *sb,
  74. struct udf_bitmap *bitmap, unsigned int block,
  75. unsigned long bitmap_nr)
  76. {
  77. struct buffer_head *bh = NULL;
  78. int retval = 0;
  79. kernel_lb_addr loc;
  80. loc.logicalBlockNum = bitmap->s_extPosition;
  81. loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
  82. bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
  83. if (!bh) {
  84. retval = -EIO;
  85. }
  86. bitmap->s_block_bitmap[bitmap_nr] = bh;
  87. return retval;
  88. }
  89. static int __load_block_bitmap(struct super_block *sb,
  90. struct udf_bitmap *bitmap,
  91. unsigned int block_group)
  92. {
  93. int retval = 0;
  94. int nr_groups = bitmap->s_nr_groups;
  95. if (block_group >= nr_groups) {
  96. udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
  97. nr_groups);
  98. }
  99. if (bitmap->s_block_bitmap[block_group])
  100. return block_group;
  101. else {
  102. retval =
  103. read_block_bitmap(sb, bitmap, block_group, block_group);
  104. if (retval < 0)
  105. return retval;
  106. return block_group;
  107. }
  108. }
  109. static inline int load_block_bitmap(struct super_block *sb,
  110. struct udf_bitmap *bitmap,
  111. unsigned int block_group)
  112. {
  113. int slot;
  114. slot = __load_block_bitmap(sb, bitmap, block_group);
  115. if (slot < 0)
  116. return slot;
  117. if (!bitmap->s_block_bitmap[slot])
  118. return -EIO;
  119. return slot;
  120. }
  121. static void udf_bitmap_free_blocks(struct super_block *sb,
  122. struct inode *inode,
  123. struct udf_bitmap *bitmap,
  124. kernel_lb_addr bloc, uint32_t offset,
  125. uint32_t count)
  126. {
  127. struct udf_sb_info *sbi = UDF_SB(sb);
  128. struct buffer_head *bh = NULL;
  129. unsigned long block;
  130. unsigned long block_group;
  131. unsigned long bit;
  132. unsigned long i;
  133. int bitmap_nr;
  134. unsigned long overflow;
  135. mutex_lock(&sbi->s_alloc_mutex);
  136. if (bloc.logicalBlockNum < 0 ||
  137. (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb,
  138. bloc.
  139. partitionReferenceNum))
  140. {
  141. udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
  142. bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
  143. bloc.
  144. partitionReferenceNum));
  145. goto error_return;
  146. }
  147. block =
  148. bloc.logicalBlockNum + offset +
  149. (sizeof(struct spaceBitmapDesc) << 3);
  150. do_more:
  151. overflow = 0;
  152. block_group = block >> (sb->s_blocksize_bits + 3);
  153. bit = block % (sb->s_blocksize << 3);
  154. /*
  155. * Check to see if we are freeing blocks across a group boundary.
  156. */
  157. if (bit + count > (sb->s_blocksize << 3)) {
  158. overflow = bit + count - (sb->s_blocksize << 3);
  159. count -= overflow;
  160. }
  161. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  162. if (bitmap_nr < 0)
  163. goto error_return;
  164. bh = bitmap->s_block_bitmap[bitmap_nr];
  165. for (i = 0; i < count; i++) {
  166. if (udf_set_bit(bit + i, bh->b_data)) {
  167. udf_debug("bit %ld already set\n", bit + i);
  168. udf_debug("byte=%2x\n",
  169. ((char *)bh->b_data)[(bit + i) >> 3]);
  170. } else {
  171. if (inode)
  172. DQUOT_FREE_BLOCK(inode, 1);
  173. if (UDF_SB_LVIDBH(sb)) {
  174. UDF_SB_LVID(sb)->
  175. freeSpaceTable[UDF_SB_PARTITION(sb)] =
  176. cpu_to_le32(le32_to_cpu
  177. (UDF_SB_LVID(sb)->
  178. freeSpaceTable[UDF_SB_PARTITION
  179. (sb)]) + 1);
  180. }
  181. }
  182. }
  183. mark_buffer_dirty(bh);
  184. if (overflow) {
  185. block += count;
  186. count = overflow;
  187. goto do_more;
  188. }
  189. error_return:
  190. sb->s_dirt = 1;
  191. if (UDF_SB_LVIDBH(sb))
  192. mark_buffer_dirty(UDF_SB_LVIDBH(sb));
  193. mutex_unlock(&sbi->s_alloc_mutex);
  194. return;
  195. }
  196. static int udf_bitmap_prealloc_blocks(struct super_block *sb,
  197. struct inode *inode,
  198. struct udf_bitmap *bitmap,
  199. uint16_t partition, uint32_t first_block,
  200. uint32_t block_count)
  201. {
  202. struct udf_sb_info *sbi = UDF_SB(sb);
  203. int alloc_count = 0;
  204. int bit, block, block_group, group_start;
  205. int nr_groups, bitmap_nr;
  206. struct buffer_head *bh;
  207. mutex_lock(&sbi->s_alloc_mutex);
  208. if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
  209. goto out;
  210. if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
  211. block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
  212. repeat:
  213. nr_groups = (UDF_SB_PARTLEN(sb, partition) +
  214. (sizeof(struct spaceBitmapDesc) << 3) +
  215. (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
  216. block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
  217. block_group = block >> (sb->s_blocksize_bits + 3);
  218. group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
  219. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  220. if (bitmap_nr < 0)
  221. goto out;
  222. bh = bitmap->s_block_bitmap[bitmap_nr];
  223. bit = block % (sb->s_blocksize << 3);
  224. while (bit < (sb->s_blocksize << 3) && block_count > 0) {
  225. if (!udf_test_bit(bit, bh->b_data))
  226. goto out;
  227. else if (DQUOT_PREALLOC_BLOCK(inode, 1))
  228. goto out;
  229. else if (!udf_clear_bit(bit, bh->b_data)) {
  230. udf_debug("bit already cleared for block %d\n", bit);
  231. DQUOT_FREE_BLOCK(inode, 1);
  232. goto out;
  233. }
  234. block_count--;
  235. alloc_count++;
  236. bit++;
  237. block++;
  238. }
  239. mark_buffer_dirty(bh);
  240. if (block_count > 0)
  241. goto repeat;
  242. out:
  243. if (UDF_SB_LVIDBH(sb)) {
  244. UDF_SB_LVID(sb)->freeSpaceTable[partition] =
  245. cpu_to_le32(le32_to_cpu
  246. (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
  247. alloc_count);
  248. mark_buffer_dirty(UDF_SB_LVIDBH(sb));
  249. }
  250. sb->s_dirt = 1;
  251. mutex_unlock(&sbi->s_alloc_mutex);
  252. return alloc_count;
  253. }
  254. static int udf_bitmap_new_block(struct super_block *sb,
  255. struct inode *inode,
  256. struct udf_bitmap *bitmap, uint16_t partition,
  257. uint32_t goal, int *err)
  258. {
  259. struct udf_sb_info *sbi = UDF_SB(sb);
  260. int newbit, bit = 0, block, block_group, group_start;
  261. int end_goal, nr_groups, bitmap_nr, i;
  262. struct buffer_head *bh = NULL;
  263. char *ptr;
  264. int newblock = 0;
  265. *err = -ENOSPC;
  266. mutex_lock(&sbi->s_alloc_mutex);
  267. repeat:
  268. if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
  269. goal = 0;
  270. nr_groups = bitmap->s_nr_groups;
  271. block = goal + (sizeof(struct spaceBitmapDesc) << 3);
  272. block_group = block >> (sb->s_blocksize_bits + 3);
  273. group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
  274. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  275. if (bitmap_nr < 0)
  276. goto error_return;
  277. bh = bitmap->s_block_bitmap[bitmap_nr];
  278. ptr =
  279. memscan((char *)bh->b_data + group_start, 0xFF,
  280. sb->s_blocksize - group_start);
  281. if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
  282. bit = block % (sb->s_blocksize << 3);
  283. if (udf_test_bit(bit, bh->b_data)) {
  284. goto got_block;
  285. }
  286. end_goal = (bit + 63) & ~63;
  287. bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
  288. if (bit < end_goal)
  289. goto got_block;
  290. ptr =
  291. memscan((char *)bh->b_data + (bit >> 3), 0xFF,
  292. sb->s_blocksize - ((bit + 7) >> 3));
  293. newbit = (ptr - ((char *)bh->b_data)) << 3;
  294. if (newbit < sb->s_blocksize << 3) {
  295. bit = newbit;
  296. goto search_back;
  297. }
  298. newbit =
  299. udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
  300. bit);
  301. if (newbit < sb->s_blocksize << 3) {
  302. bit = newbit;
  303. goto got_block;
  304. }
  305. }
  306. for (i = 0; i < (nr_groups * 2); i++) {
  307. block_group++;
  308. if (block_group >= nr_groups)
  309. block_group = 0;
  310. group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
  311. bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
  312. if (bitmap_nr < 0)
  313. goto error_return;
  314. bh = bitmap->s_block_bitmap[bitmap_nr];
  315. if (i < nr_groups) {
  316. ptr =
  317. memscan((char *)bh->b_data + group_start, 0xFF,
  318. sb->s_blocksize - group_start);
  319. if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
  320. bit = (ptr - ((char *)bh->b_data)) << 3;
  321. break;
  322. }
  323. } else {
  324. bit =
  325. udf_find_next_one_bit((char *)bh->b_data,
  326. sb->s_blocksize << 3,
  327. group_start << 3);
  328. if (bit < sb->s_blocksize << 3)
  329. break;
  330. }
  331. }
  332. if (i >= (nr_groups * 2)) {
  333. mutex_unlock(&sbi->s_alloc_mutex);
  334. return newblock;
  335. }
  336. if (bit < sb->s_blocksize << 3)
  337. goto search_back;
  338. else
  339. bit =
  340. udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
  341. group_start << 3);
  342. if (bit >= sb->s_blocksize << 3) {
  343. mutex_unlock(&sbi->s_alloc_mutex);
  344. return 0;
  345. }
  346. search_back:
  347. for (i = 0;
  348. i < 7 && bit > (group_start << 3)
  349. && udf_test_bit(bit - 1, bh->b_data); i++, bit--) ;
  350. got_block:
  351. /*
  352. * Check quota for allocation of this block.
  353. */
  354. if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
  355. mutex_unlock(&sbi->s_alloc_mutex);
  356. *err = -EDQUOT;
  357. return 0;
  358. }
  359. newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
  360. (sizeof(struct spaceBitmapDesc) << 3);
  361. if (!udf_clear_bit(bit, bh->b_data)) {
  362. udf_debug("bit already cleared for block %d\n", bit);
  363. goto repeat;
  364. }
  365. mark_buffer_dirty(bh);
  366. if (UDF_SB_LVIDBH(sb)) {
  367. UDF_SB_LVID(sb)->freeSpaceTable[partition] =
  368. cpu_to_le32(le32_to_cpu
  369. (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
  370. 1);
  371. mark_buffer_dirty(UDF_SB_LVIDBH(sb));
  372. }
  373. sb->s_dirt = 1;
  374. mutex_unlock(&sbi->s_alloc_mutex);
  375. *err = 0;
  376. return newblock;
  377. error_return:
  378. *err = -EIO;
  379. mutex_unlock(&sbi->s_alloc_mutex);
  380. return 0;
  381. }
  382. static void udf_table_free_blocks(struct super_block *sb,
  383. struct inode *inode,
  384. struct inode *table,
  385. kernel_lb_addr bloc, uint32_t offset,
  386. uint32_t count)
  387. {
  388. struct udf_sb_info *sbi = UDF_SB(sb);
  389. uint32_t start, end;
  390. uint32_t elen;
  391. kernel_lb_addr eloc;
  392. struct extent_position oepos, epos;
  393. int8_t etype;
  394. int i;
  395. mutex_lock(&sbi->s_alloc_mutex);
  396. if (bloc.logicalBlockNum < 0 ||
  397. (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb,
  398. bloc.
  399. partitionReferenceNum))
  400. {
  401. udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
  402. bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
  403. bloc.
  404. partitionReferenceNum));
  405. goto error_return;
  406. }
  407. /* We do this up front - There are some error conditions that could occure,
  408. but.. oh well */
  409. if (inode)
  410. DQUOT_FREE_BLOCK(inode, count);
  411. if (UDF_SB_LVIDBH(sb)) {
  412. UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
  413. cpu_to_le32(le32_to_cpu
  414. (UDF_SB_LVID(sb)->
  415. freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
  416. mark_buffer_dirty(UDF_SB_LVIDBH(sb));
  417. }
  418. start = bloc.logicalBlockNum + offset;
  419. end = bloc.logicalBlockNum + offset + count - 1;
  420. epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
  421. elen = 0;
  422. epos.block = oepos.block = UDF_I_LOCATION(table);
  423. epos.bh = oepos.bh = NULL;
  424. while (count && (etype =
  425. udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
  426. if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
  427. start)) {
  428. if ((0x3FFFFFFF - elen) <
  429. (count << sb->s_blocksize_bits)) {
  430. count -=
  431. ((0x3FFFFFFF -
  432. elen) >> sb->s_blocksize_bits);
  433. start +=
  434. ((0x3FFFFFFF -
  435. elen) >> sb->s_blocksize_bits);
  436. elen =
  437. (etype << 30) | (0x40000000 -
  438. sb->s_blocksize);
  439. } else {
  440. elen = (etype << 30) |
  441. (elen + (count << sb->s_blocksize_bits));
  442. start += count;
  443. count = 0;
  444. }
  445. udf_write_aext(table, &oepos, eloc, elen, 1);
  446. } else if (eloc.logicalBlockNum == (end + 1)) {
  447. if ((0x3FFFFFFF - elen) <
  448. (count << sb->s_blocksize_bits)) {
  449. count -=
  450. ((0x3FFFFFFF -
  451. elen) >> sb->s_blocksize_bits);
  452. end -=
  453. ((0x3FFFFFFF -
  454. elen) >> sb->s_blocksize_bits);
  455. eloc.logicalBlockNum -=
  456. ((0x3FFFFFFF -
  457. elen) >> sb->s_blocksize_bits);
  458. elen =
  459. (etype << 30) | (0x40000000 -
  460. sb->s_blocksize);
  461. } else {
  462. eloc.logicalBlockNum = start;
  463. elen = (etype << 30) |
  464. (elen + (count << sb->s_blocksize_bits));
  465. end -= count;
  466. count = 0;
  467. }
  468. udf_write_aext(table, &oepos, eloc, elen, 1);
  469. }
  470. if (epos.bh != oepos.bh) {
  471. i = -1;
  472. oepos.block = epos.block;
  473. brelse(oepos.bh);
  474. get_bh(epos.bh);
  475. oepos.bh = epos.bh;
  476. oepos.offset = 0;
  477. } else
  478. oepos.offset = epos.offset;
  479. }
  480. if (count) {
  481. /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
  482. a new block, and since we hold the super block lock already
  483. very bad things would happen :)
  484. We copy the behavior of udf_add_aext, but instead of
  485. trying to allocate a new block close to the existing one,
  486. we just steal a block from the extent we are trying to add.
  487. It would be nice if the blocks were close together, but it
  488. isn't required.
  489. */
  490. int adsize;
  491. short_ad *sad = NULL;
  492. long_ad *lad = NULL;
  493. struct allocExtDesc *aed;
  494. eloc.logicalBlockNum = start;
  495. elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits);
  496. if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
  497. adsize = sizeof(short_ad);
  498. else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
  499. adsize = sizeof(long_ad);
  500. else {
  501. brelse(oepos.bh);
  502. brelse(epos.bh);
  503. goto error_return;
  504. }
  505. if (epos.offset + (2 * adsize) > sb->s_blocksize) {
  506. char *sptr, *dptr;
  507. int loffset;
  508. brelse(oepos.bh);
  509. oepos = epos;
  510. /* Steal a block from the extent being free'd */
  511. epos.block.logicalBlockNum = eloc.logicalBlockNum;
  512. eloc.logicalBlockNum++;
  513. elen -= sb->s_blocksize;
  514. if (!(epos.bh = udf_tread(sb,
  515. udf_get_lb_pblock(sb,
  516. epos.block,
  517. 0)))) {
  518. brelse(oepos.bh);
  519. goto error_return;
  520. }
  521. aed = (struct allocExtDesc *)(epos.bh->b_data);
  522. aed->previousAllocExtLocation =
  523. cpu_to_le32(oepos.block.logicalBlockNum);
  524. if (epos.offset + adsize > sb->s_blocksize) {
  525. loffset = epos.offset;
  526. aed->lengthAllocDescs = cpu_to_le32(adsize);
  527. sptr = UDF_I_DATA(inode) + epos.offset -
  528. udf_file_entry_alloc_offset(inode) +
  529. UDF_I_LENEATTR(inode) - adsize;
  530. dptr =
  531. epos.bh->b_data +
  532. sizeof(struct allocExtDesc);
  533. memcpy(dptr, sptr, adsize);
  534. epos.offset =
  535. sizeof(struct allocExtDesc) + adsize;
  536. } else {
  537. loffset = epos.offset + adsize;
  538. aed->lengthAllocDescs = cpu_to_le32(0);
  539. sptr = oepos.bh->b_data + epos.offset;
  540. epos.offset = sizeof(struct allocExtDesc);
  541. if (oepos.bh) {
  542. aed =
  543. (struct allocExtDesc *)oepos.bh->
  544. b_data;
  545. aed->lengthAllocDescs =
  546. cpu_to_le32(le32_to_cpu
  547. (aed->
  548. lengthAllocDescs) +
  549. adsize);
  550. } else {
  551. UDF_I_LENALLOC(table) += adsize;
  552. mark_inode_dirty(table);
  553. }
  554. }
  555. if (UDF_SB_UDFREV(sb) >= 0x0200)
  556. udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3,
  557. 1, epos.block.logicalBlockNum,
  558. sizeof(tag));
  559. else
  560. udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2,
  561. 1, epos.block.logicalBlockNum,
  562. sizeof(tag));
  563. switch (UDF_I_ALLOCTYPE(table)) {
  564. case ICBTAG_FLAG_AD_SHORT:
  565. {
  566. sad = (short_ad *) sptr;
  567. sad->extLength =
  568. cpu_to_le32
  569. (EXT_NEXT_EXTENT_ALLOCDECS | sb->
  570. s_blocksize);
  571. sad->extPosition =
  572. cpu_to_le32(epos.block.
  573. logicalBlockNum);
  574. break;
  575. }
  576. case ICBTAG_FLAG_AD_LONG:
  577. {
  578. lad = (long_ad *) sptr;
  579. lad->extLength =
  580. cpu_to_le32
  581. (EXT_NEXT_EXTENT_ALLOCDECS | sb->
  582. s_blocksize);
  583. lad->extLocation =
  584. cpu_to_lelb(epos.block);
  585. break;
  586. }
  587. }
  588. if (oepos.bh) {
  589. udf_update_tag(oepos.bh->b_data, loffset);
  590. mark_buffer_dirty(oepos.bh);
  591. } else
  592. mark_inode_dirty(table);
  593. }
  594. if (elen) { /* It's possible that stealing the block emptied the extent */
  595. udf_write_aext(table, &epos, eloc, elen, 1);
  596. if (!epos.bh) {
  597. UDF_I_LENALLOC(table) += adsize;
  598. mark_inode_dirty(table);
  599. } else {
  600. aed = (struct allocExtDesc *)epos.bh->b_data;
  601. aed->lengthAllocDescs =
  602. cpu_to_le32(le32_to_cpu
  603. (aed->lengthAllocDescs) +
  604. adsize);
  605. udf_update_tag(epos.bh->b_data, epos.offset);
  606. mark_buffer_dirty(epos.bh);
  607. }
  608. }
  609. }
  610. brelse(epos.bh);
  611. brelse(oepos.bh);
  612. error_return:
  613. sb->s_dirt = 1;
  614. mutex_unlock(&sbi->s_alloc_mutex);
  615. return;
  616. }
  617. static int udf_table_prealloc_blocks(struct super_block *sb,
  618. struct inode *inode,
  619. struct inode *table, uint16_t partition,
  620. uint32_t first_block, uint32_t block_count)
  621. {
  622. struct udf_sb_info *sbi = UDF_SB(sb);
  623. int alloc_count = 0;
  624. uint32_t elen, adsize;
  625. kernel_lb_addr eloc;
  626. struct extent_position epos;
  627. int8_t etype = -1;
  628. if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
  629. return 0;
  630. if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
  631. adsize = sizeof(short_ad);
  632. else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
  633. adsize = sizeof(long_ad);
  634. else
  635. return 0;
  636. mutex_lock(&sbi->s_alloc_mutex);
  637. epos.offset = sizeof(struct unallocSpaceEntry);
  638. epos.block = UDF_I_LOCATION(table);
  639. epos.bh = NULL;
  640. eloc.logicalBlockNum = 0xFFFFFFFF;
  641. while (first_block != eloc.logicalBlockNum && (etype =
  642. udf_next_aext(table,
  643. &epos,
  644. &eloc,
  645. &elen,
  646. 1)) !=
  647. -1) {
  648. udf_debug("eloc=%d, elen=%d, first_block=%d\n",
  649. eloc.logicalBlockNum, elen, first_block);
  650. ; /* empty loop body */
  651. }
  652. if (first_block == eloc.logicalBlockNum) {
  653. epos.offset -= adsize;
  654. alloc_count = (elen >> sb->s_blocksize_bits);
  655. if (inode
  656. && DQUOT_PREALLOC_BLOCK(inode,
  657. alloc_count >
  658. block_count ? block_count :
  659. alloc_count))
  660. alloc_count = 0;
  661. else if (alloc_count > block_count) {
  662. alloc_count = block_count;
  663. eloc.logicalBlockNum += alloc_count;
  664. elen -= (alloc_count << sb->s_blocksize_bits);
  665. udf_write_aext(table, &epos, eloc, (etype << 30) | elen,
  666. 1);
  667. } else
  668. udf_delete_aext(table, epos, eloc,
  669. (etype << 30) | elen);
  670. } else
  671. alloc_count = 0;
  672. brelse(epos.bh);
  673. if (alloc_count && UDF_SB_LVIDBH(sb)) {
  674. UDF_SB_LVID(sb)->freeSpaceTable[partition] =
  675. cpu_to_le32(le32_to_cpu
  676. (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
  677. alloc_count);
  678. mark_buffer_dirty(UDF_SB_LVIDBH(sb));
  679. sb->s_dirt = 1;
  680. }
  681. mutex_unlock(&sbi->s_alloc_mutex);
  682. return alloc_count;
  683. }
  684. static int udf_table_new_block(struct super_block *sb,
  685. struct inode *inode,
  686. struct inode *table, uint16_t partition,
  687. uint32_t goal, int *err)
  688. {
  689. struct udf_sb_info *sbi = UDF_SB(sb);
  690. uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
  691. uint32_t newblock = 0, adsize;
  692. uint32_t elen, goal_elen = 0;
  693. kernel_lb_addr eloc, goal_eloc;
  694. struct extent_position epos, goal_epos;
  695. int8_t etype;
  696. *err = -ENOSPC;
  697. if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
  698. adsize = sizeof(short_ad);
  699. else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
  700. adsize = sizeof(long_ad);
  701. else
  702. return newblock;
  703. mutex_lock(&sbi->s_alloc_mutex);
  704. if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
  705. goal = 0;
  706. /* We search for the closest matching block to goal. If we find a exact hit,
  707. we stop. Otherwise we keep going till we run out of extents.
  708. We store the buffer_head, bloc, and extoffset of the current closest
  709. match and use that when we are done.
  710. */
  711. epos.offset = sizeof(struct unallocSpaceEntry);
  712. epos.block = UDF_I_LOCATION(table);
  713. epos.bh = goal_epos.bh = NULL;
  714. while (spread && (etype =
  715. udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
  716. if (goal >= eloc.logicalBlockNum) {
  717. if (goal <
  718. eloc.logicalBlockNum +
  719. (elen >> sb->s_blocksize_bits))
  720. nspread = 0;
  721. else
  722. nspread = goal - eloc.logicalBlockNum -
  723. (elen >> sb->s_blocksize_bits);
  724. } else
  725. nspread = eloc.logicalBlockNum - goal;
  726. if (nspread < spread) {
  727. spread = nspread;
  728. if (goal_epos.bh != epos.bh) {
  729. brelse(goal_epos.bh);
  730. goal_epos.bh = epos.bh;
  731. get_bh(goal_epos.bh);
  732. }
  733. goal_epos.block = epos.block;
  734. goal_epos.offset = epos.offset - adsize;
  735. goal_eloc = eloc;
  736. goal_elen = (etype << 30) | elen;
  737. }
  738. }
  739. brelse(epos.bh);
  740. if (spread == 0xFFFFFFFF) {
  741. brelse(goal_epos.bh);
  742. mutex_unlock(&sbi->s_alloc_mutex);
  743. return 0;
  744. }
  745. /* Only allocate blocks from the beginning of the extent.
  746. That way, we only delete (empty) extents, never have to insert an
  747. extent because of splitting */
  748. /* This works, but very poorly.... */
  749. newblock = goal_eloc.logicalBlockNum;
  750. goal_eloc.logicalBlockNum++;
  751. goal_elen -= sb->s_blocksize;
  752. if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
  753. brelse(goal_epos.bh);
  754. mutex_unlock(&sbi->s_alloc_mutex);
  755. *err = -EDQUOT;
  756. return 0;
  757. }
  758. if (goal_elen)
  759. udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
  760. else
  761. udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
  762. brelse(goal_epos.bh);
  763. if (UDF_SB_LVIDBH(sb)) {
  764. UDF_SB_LVID(sb)->freeSpaceTable[partition] =
  765. cpu_to_le32(le32_to_cpu
  766. (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
  767. 1);
  768. mark_buffer_dirty(UDF_SB_LVIDBH(sb));
  769. }
  770. sb->s_dirt = 1;
  771. mutex_unlock(&sbi->s_alloc_mutex);
  772. *err = 0;
  773. return newblock;
  774. }
  775. inline void udf_free_blocks(struct super_block *sb,
  776. struct inode *inode,
  777. kernel_lb_addr bloc, uint32_t offset,
  778. uint32_t count)
  779. {
  780. uint16_t partition = bloc.partitionReferenceNum;
  781. if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
  782. return udf_bitmap_free_blocks(sb, inode,
  783. UDF_SB_PARTMAPS(sb)[partition].
  784. s_uspace.s_bitmap, bloc, offset,
  785. count);
  786. } else if (UDF_SB_PARTFLAGS(sb, partition) &
  787. UDF_PART_FLAG_UNALLOC_TABLE) {
  788. return udf_table_free_blocks(sb, inode,
  789. UDF_SB_PARTMAPS(sb)[partition].
  790. s_uspace.s_table, bloc, offset,
  791. count);
  792. } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
  793. return udf_bitmap_free_blocks(sb, inode,
  794. UDF_SB_PARTMAPS(sb)[partition].
  795. s_fspace.s_bitmap, bloc, offset,
  796. count);
  797. } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
  798. return udf_table_free_blocks(sb, inode,
  799. UDF_SB_PARTMAPS(sb)[partition].
  800. s_fspace.s_table, bloc, offset,
  801. count);
  802. } else
  803. return;
  804. }
  805. inline int udf_prealloc_blocks(struct super_block *sb,
  806. struct inode *inode,
  807. uint16_t partition, uint32_t first_block,
  808. uint32_t block_count)
  809. {
  810. if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
  811. return udf_bitmap_prealloc_blocks(sb, inode,
  812. UDF_SB_PARTMAPS(sb)
  813. [partition].s_uspace.s_bitmap,
  814. partition, first_block,
  815. block_count);
  816. } else if (UDF_SB_PARTFLAGS(sb, partition) &
  817. UDF_PART_FLAG_UNALLOC_TABLE) {
  818. return udf_table_prealloc_blocks(sb, inode,
  819. UDF_SB_PARTMAPS(sb)[partition].
  820. s_uspace.s_table, partition,
  821. first_block, block_count);
  822. } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
  823. return udf_bitmap_prealloc_blocks(sb, inode,
  824. UDF_SB_PARTMAPS(sb)
  825. [partition].s_fspace.s_bitmap,
  826. partition, first_block,
  827. block_count);
  828. } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
  829. return udf_table_prealloc_blocks(sb, inode,
  830. UDF_SB_PARTMAPS(sb)[partition].
  831. s_fspace.s_table, partition,
  832. first_block, block_count);
  833. } else
  834. return 0;
  835. }
  836. inline int udf_new_block(struct super_block *sb,
  837. struct inode *inode,
  838. uint16_t partition, uint32_t goal, int *err)
  839. {
  840. int ret;
  841. if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
  842. ret = udf_bitmap_new_block(sb, inode,
  843. UDF_SB_PARTMAPS(sb)[partition].
  844. s_uspace.s_bitmap, partition, goal,
  845. err);
  846. return ret;
  847. } else if (UDF_SB_PARTFLAGS(sb, partition) &
  848. UDF_PART_FLAG_UNALLOC_TABLE) {
  849. return udf_table_new_block(sb, inode,
  850. UDF_SB_PARTMAPS(sb)[partition].
  851. s_uspace.s_table, partition, goal,
  852. err);
  853. } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
  854. return udf_bitmap_new_block(sb, inode,
  855. UDF_SB_PARTMAPS(sb)[partition].
  856. s_fspace.s_bitmap, partition, goal,
  857. err);
  858. } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
  859. return udf_table_new_block(sb, inode,
  860. UDF_SB_PARTMAPS(sb)[partition].
  861. s_fspace.s_table, partition, goal,
  862. err);
  863. } else {
  864. *err = -EIO;
  865. return 0;
  866. }
  867. }