bitmap.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * linux/fs/hfsplus/bitmap.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handling of allocation file
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfsplus_fs.h"
  12. #include "hfsplus_raw.h"
  13. #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
  14. int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
  15. {
  16. struct page *page;
  17. struct address_space *mapping;
  18. __be32 *pptr, *curr, *end;
  19. u32 mask, start, len, n;
  20. __be32 val;
  21. int i;
  22. len = *max;
  23. if (!len)
  24. return size;
  25. dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
  26. mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  27. mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
  28. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
  29. if (IS_ERR(page)) {
  30. start = size;
  31. goto out;
  32. }
  33. pptr = kmap(page);
  34. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  35. i = offset % 32;
  36. offset &= ~(PAGE_CACHE_BITS - 1);
  37. if ((size ^ offset) / PAGE_CACHE_BITS)
  38. end = pptr + PAGE_CACHE_BITS / 32;
  39. else
  40. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  41. /* scan the first partial u32 for zero bits */
  42. val = *curr;
  43. if (~val) {
  44. n = be32_to_cpu(val);
  45. mask = (1U << 31) >> i;
  46. for (; i < 32; mask >>= 1, i++) {
  47. if (!(n & mask))
  48. goto found;
  49. }
  50. }
  51. curr++;
  52. /* scan complete u32s for the first zero bit */
  53. while (1) {
  54. while (curr < end) {
  55. val = *curr;
  56. if (~val) {
  57. n = be32_to_cpu(val);
  58. mask = 1 << 31;
  59. for (i = 0; i < 32; mask >>= 1, i++) {
  60. if (!(n & mask))
  61. goto found;
  62. }
  63. }
  64. curr++;
  65. }
  66. kunmap(page);
  67. offset += PAGE_CACHE_BITS;
  68. if (offset >= size)
  69. break;
  70. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  71. NULL);
  72. if (IS_ERR(page)) {
  73. start = size;
  74. goto out;
  75. }
  76. curr = pptr = kmap(page);
  77. if ((size ^ offset) / PAGE_CACHE_BITS)
  78. end = pptr + PAGE_CACHE_BITS / 32;
  79. else
  80. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  81. }
  82. dprint(DBG_BITMAP, "bitmap full\n");
  83. start = size;
  84. goto out;
  85. found:
  86. start = offset + (curr - pptr) * 32 + i;
  87. if (start >= size) {
  88. dprint(DBG_BITMAP, "bitmap full\n");
  89. goto out;
  90. }
  91. /* do any partial u32 at the start */
  92. len = min(size - start, len);
  93. while (1) {
  94. n |= mask;
  95. if (++i >= 32)
  96. break;
  97. mask >>= 1;
  98. if (!--len || n & mask)
  99. goto done;
  100. }
  101. if (!--len)
  102. goto done;
  103. *curr++ = cpu_to_be32(n);
  104. /* do full u32s */
  105. while (1) {
  106. while (curr < end) {
  107. n = be32_to_cpu(*curr);
  108. if (len < 32)
  109. goto last;
  110. if (n) {
  111. len = 32;
  112. goto last;
  113. }
  114. *curr++ = cpu_to_be32(0xffffffff);
  115. len -= 32;
  116. }
  117. set_page_dirty(page);
  118. kunmap(page);
  119. offset += PAGE_CACHE_BITS;
  120. page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
  121. NULL);
  122. if (IS_ERR(page)) {
  123. start = size;
  124. goto out;
  125. }
  126. pptr = kmap(page);
  127. curr = pptr;
  128. end = pptr + PAGE_CACHE_BITS / 32;
  129. }
  130. last:
  131. /* do any partial u32 at end */
  132. mask = 1U << 31;
  133. for (i = 0; i < len; i++) {
  134. if (n & mask)
  135. break;
  136. n |= mask;
  137. mask >>= 1;
  138. }
  139. done:
  140. *curr = cpu_to_be32(n);
  141. set_page_dirty(page);
  142. kunmap(page);
  143. *max = offset + (curr - pptr) * 32 + i - start;
  144. HFSPLUS_SB(sb).free_blocks -= *max;
  145. sb->s_dirt = 1;
  146. dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
  147. out:
  148. mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  149. return start;
  150. }
  151. int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
  152. {
  153. struct page *page;
  154. struct address_space *mapping;
  155. __be32 *pptr, *curr, *end;
  156. u32 mask, len, pnr;
  157. int i;
  158. /* is there any actual work to be done? */
  159. if (!count)
  160. return 0;
  161. dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
  162. /* are all of the bits in range? */
  163. if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
  164. return -2;
  165. mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  166. mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
  167. pnr = offset / PAGE_CACHE_BITS;
  168. page = read_mapping_page(mapping, pnr, NULL);
  169. pptr = kmap(page);
  170. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  171. end = pptr + PAGE_CACHE_BITS / 32;
  172. len = count;
  173. /* do any partial u32 at the start */
  174. i = offset % 32;
  175. if (i) {
  176. int j = 32 - i;
  177. mask = 0xffffffffU << j;
  178. if (j > count) {
  179. mask |= 0xffffffffU >> (i + count);
  180. *curr++ &= cpu_to_be32(mask);
  181. goto out;
  182. }
  183. *curr++ &= cpu_to_be32(mask);
  184. count -= j;
  185. }
  186. /* do full u32s */
  187. while (1) {
  188. while (curr < end) {
  189. if (count < 32)
  190. goto done;
  191. *curr++ = 0;
  192. count -= 32;
  193. }
  194. if (!count)
  195. break;
  196. set_page_dirty(page);
  197. kunmap(page);
  198. page = read_mapping_page(mapping, ++pnr, NULL);
  199. pptr = kmap(page);
  200. curr = pptr;
  201. end = pptr + PAGE_CACHE_BITS / 32;
  202. }
  203. done:
  204. /* do any partial u32 at end */
  205. if (count) {
  206. mask = 0xffffffffU >> count;
  207. *curr &= cpu_to_be32(mask);
  208. }
  209. out:
  210. set_page_dirty(page);
  211. kunmap(page);
  212. HFSPLUS_SB(sb).free_blocks += len;
  213. sb->s_dirt = 1;
  214. mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  215. return 0;
  216. }