bitmap.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * linux/fs/hfsplus/bitmap.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handling of allocation file
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfsplus_fs.h"
  12. #include "hfsplus_raw.h"
  13. #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
  14. int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
  15. {
  16. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  17. struct page *page;
  18. struct address_space *mapping;
  19. __be32 *pptr, *curr, *end;
  20. u32 mask, start, len, n;
  21. __be32 val;
  22. int i;
  23. len = *max;
  24. if (!len)
  25. return size;
  26. dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
  27. mutex_lock(&sbi->alloc_mutex);
  28. mapping = sbi->alloc_file->i_mapping;
  29. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
  30. if (IS_ERR(page)) {
  31. start = size;
  32. goto out;
  33. }
  34. pptr = kmap(page);
  35. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  36. i = offset % 32;
  37. offset &= ~(PAGE_CACHE_BITS - 1);
  38. if ((size ^ offset) / PAGE_CACHE_BITS)
  39. end = pptr + PAGE_CACHE_BITS / 32;
  40. else
  41. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  42. /* scan the first partial u32 for zero bits */
  43. val = *curr;
  44. if (~val) {
  45. n = be32_to_cpu(val);
  46. mask = (1U << 31) >> i;
  47. for (; i < 32; mask >>= 1, i++) {
  48. if (!(n & mask))
  49. goto found;
  50. }
  51. }
  52. curr++;
  53. /* scan complete u32s for the first zero bit */
  54. while (1) {
  55. while (curr < end) {
  56. val = *curr;
  57. if (~val) {
  58. n = be32_to_cpu(val);
  59. mask = 1 << 31;
  60. for (i = 0; i < 32; mask >>= 1, i++) {
  61. if (!(n & mask))
  62. goto found;
  63. }
  64. }
  65. curr++;
  66. }
  67. kunmap(page);
  68. offset += PAGE_CACHE_BITS;
  69. if (offset >= size)
  70. break;
  71. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  72. NULL);
  73. if (IS_ERR(page)) {
  74. start = size;
  75. goto out;
  76. }
  77. curr = pptr = kmap(page);
  78. if ((size ^ offset) / PAGE_CACHE_BITS)
  79. end = pptr + PAGE_CACHE_BITS / 32;
  80. else
  81. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  82. }
  83. dprint(DBG_BITMAP, "bitmap full\n");
  84. start = size;
  85. goto out;
  86. found:
  87. start = offset + (curr - pptr) * 32 + i;
  88. if (start >= size) {
  89. dprint(DBG_BITMAP, "bitmap full\n");
  90. goto out;
  91. }
  92. /* do any partial u32 at the start */
  93. len = min(size - start, len);
  94. while (1) {
  95. n |= mask;
  96. if (++i >= 32)
  97. break;
  98. mask >>= 1;
  99. if (!--len || n & mask)
  100. goto done;
  101. }
  102. if (!--len)
  103. goto done;
  104. *curr++ = cpu_to_be32(n);
  105. /* do full u32s */
  106. while (1) {
  107. while (curr < end) {
  108. n = be32_to_cpu(*curr);
  109. if (len < 32)
  110. goto last;
  111. if (n) {
  112. len = 32;
  113. goto last;
  114. }
  115. *curr++ = cpu_to_be32(0xffffffff);
  116. len -= 32;
  117. }
  118. set_page_dirty(page);
  119. kunmap(page);
  120. offset += PAGE_CACHE_BITS;
  121. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  122. NULL);
  123. if (IS_ERR(page)) {
  124. start = size;
  125. goto out;
  126. }
  127. pptr = kmap(page);
  128. curr = pptr;
  129. end = pptr + PAGE_CACHE_BITS / 32;
  130. }
  131. last:
  132. /* do any partial u32 at end */
  133. mask = 1U << 31;
  134. for (i = 0; i < len; i++) {
  135. if (n & mask)
  136. break;
  137. n |= mask;
  138. mask >>= 1;
  139. }
  140. done:
  141. *curr = cpu_to_be32(n);
  142. set_page_dirty(page);
  143. kunmap(page);
  144. *max = offset + (curr - pptr) * 32 + i - start;
  145. sbi->free_blocks -= *max;
  146. sb->s_dirt = 1;
  147. dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
  148. out:
  149. mutex_unlock(&sbi->alloc_mutex);
  150. return start;
  151. }
  152. int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
  153. {
  154. struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
  155. struct page *page;
  156. struct address_space *mapping;
  157. __be32 *pptr, *curr, *end;
  158. u32 mask, len, pnr;
  159. int i;
  160. /* is there any actual work to be done? */
  161. if (!count)
  162. return 0;
  163. dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
  164. /* are all of the bits in range? */
  165. if ((offset + count) > sbi->total_blocks)
  166. return -2;
  167. mutex_lock(&sbi->alloc_mutex);
  168. mapping = sbi->alloc_file->i_mapping;
  169. pnr = offset / PAGE_CACHE_BITS;
  170. page = read_mapping_page(mapping, pnr, NULL);
  171. pptr = kmap(page);
  172. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  173. end = pptr + PAGE_CACHE_BITS / 32;
  174. len = count;
  175. /* do any partial u32 at the start */
  176. i = offset % 32;
  177. if (i) {
  178. int j = 32 - i;
  179. mask = 0xffffffffU << j;
  180. if (j > count) {
  181. mask |= 0xffffffffU >> (i + count);
  182. *curr++ &= cpu_to_be32(mask);
  183. goto out;
  184. }
  185. *curr++ &= cpu_to_be32(mask);
  186. count -= j;
  187. }
  188. /* do full u32s */
  189. while (1) {
  190. while (curr < end) {
  191. if (count < 32)
  192. goto done;
  193. *curr++ = 0;
  194. count -= 32;
  195. }
  196. if (!count)
  197. break;
  198. set_page_dirty(page);
  199. kunmap(page);
  200. page = read_mapping_page(mapping, ++pnr, NULL);
  201. pptr = kmap(page);
  202. curr = pptr;
  203. end = pptr + PAGE_CACHE_BITS / 32;
  204. }
  205. done:
  206. /* do any partial u32 at end */
  207. if (count) {
  208. mask = 0xffffffffU >> count;
  209. *curr &= cpu_to_be32(mask);
  210. }
  211. out:
  212. set_page_dirty(page);
  213. kunmap(page);
  214. sbi->free_blocks += len;
  215. sb->s_dirt = 1;
  216. mutex_unlock(&sbi->alloc_mutex);
  217. return 0;
  218. }