bitmap.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * linux/fs/hfsplus/bitmap.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handling of allocation file
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfsplus_fs.h"
  12. #include "hfsplus_raw.h"
  13. #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
  14. int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
  15. {
  16. struct page *page;
  17. struct address_space *mapping;
  18. __be32 *pptr, *curr, *end;
  19. u32 mask, start, len, n;
  20. __be32 val;
  21. int i;
  22. len = *max;
  23. if (!len)
  24. return size;
  25. dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
  26. mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  27. mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
  28. page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
  29. (filler_t *)mapping->a_ops->readpage, NULL);
  30. pptr = kmap(page);
  31. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  32. i = offset % 32;
  33. offset &= ~(PAGE_CACHE_BITS - 1);
  34. if ((size ^ offset) / PAGE_CACHE_BITS)
  35. end = pptr + PAGE_CACHE_BITS / 32;
  36. else
  37. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  38. /* scan the first partial u32 for zero bits */
  39. val = *curr;
  40. if (~val) {
  41. n = be32_to_cpu(val);
  42. mask = (1U << 31) >> i;
  43. for (; i < 32; mask >>= 1, i++) {
  44. if (!(n & mask))
  45. goto found;
  46. }
  47. }
  48. curr++;
  49. /* scan complete u32s for the first zero bit */
  50. while (1) {
  51. while (curr < end) {
  52. val = *curr;
  53. if (~val) {
  54. n = be32_to_cpu(val);
  55. mask = 1 << 31;
  56. for (i = 0; i < 32; mask >>= 1, i++) {
  57. if (!(n & mask))
  58. goto found;
  59. }
  60. }
  61. curr++;
  62. }
  63. kunmap(page);
  64. offset += PAGE_CACHE_BITS;
  65. if (offset >= size)
  66. break;
  67. page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
  68. (filler_t *)mapping->a_ops->readpage, NULL);
  69. curr = pptr = kmap(page);
  70. if ((size ^ offset) / PAGE_CACHE_BITS)
  71. end = pptr + PAGE_CACHE_BITS / 32;
  72. else
  73. end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
  74. }
  75. dprint(DBG_BITMAP, "bitmap full\n");
  76. start = size;
  77. goto out;
  78. found:
  79. start = offset + (curr - pptr) * 32 + i;
  80. if (start >= size) {
  81. dprint(DBG_BITMAP, "bitmap full\n");
  82. goto out;
  83. }
  84. /* do any partial u32 at the start */
  85. len = min(size - start, len);
  86. while (1) {
  87. n |= mask;
  88. if (++i >= 32)
  89. break;
  90. mask >>= 1;
  91. if (!--len || n & mask)
  92. goto done;
  93. }
  94. if (!--len)
  95. goto done;
  96. *curr++ = cpu_to_be32(n);
  97. /* do full u32s */
  98. while (1) {
  99. while (curr < end) {
  100. n = be32_to_cpu(*curr);
  101. if (len < 32)
  102. goto last;
  103. if (n) {
  104. len = 32;
  105. goto last;
  106. }
  107. *curr++ = cpu_to_be32(0xffffffff);
  108. len -= 32;
  109. }
  110. set_page_dirty(page);
  111. kunmap(page);
  112. offset += PAGE_CACHE_BITS;
  113. page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
  114. (filler_t *)mapping->a_ops->readpage, NULL);
  115. pptr = kmap(page);
  116. curr = pptr;
  117. end = pptr + PAGE_CACHE_BITS / 32;
  118. }
  119. last:
  120. /* do any partial u32 at end */
  121. mask = 1U << 31;
  122. for (i = 0; i < len; i++) {
  123. if (n & mask)
  124. break;
  125. n |= mask;
  126. mask >>= 1;
  127. }
  128. done:
  129. *curr = cpu_to_be32(n);
  130. set_page_dirty(page);
  131. kunmap(page);
  132. *max = offset + (curr - pptr) * 32 + i - start;
  133. HFSPLUS_SB(sb).free_blocks -= *max;
  134. sb->s_dirt = 1;
  135. dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
  136. out:
  137. mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  138. return start;
  139. }
  140. int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
  141. {
  142. struct page *page;
  143. struct address_space *mapping;
  144. __be32 *pptr, *curr, *end;
  145. u32 mask, len, pnr;
  146. int i;
  147. /* is there any actual work to be done? */
  148. if (!count)
  149. return 0;
  150. dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
  151. /* are all of the bits in range? */
  152. if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
  153. return -2;
  154. mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  155. mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
  156. pnr = offset / PAGE_CACHE_BITS;
  157. page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
  158. pptr = kmap(page);
  159. curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
  160. end = pptr + PAGE_CACHE_BITS / 32;
  161. len = count;
  162. /* do any partial u32 at the start */
  163. i = offset % 32;
  164. if (i) {
  165. int j = 32 - i;
  166. mask = 0xffffffffU << j;
  167. if (j > count) {
  168. mask |= 0xffffffffU >> (i + count);
  169. *curr++ &= cpu_to_be32(mask);
  170. goto out;
  171. }
  172. *curr++ &= cpu_to_be32(mask);
  173. count -= j;
  174. }
  175. /* do full u32s */
  176. while (1) {
  177. while (curr < end) {
  178. if (count < 32)
  179. goto done;
  180. *curr++ = 0;
  181. count -= 32;
  182. }
  183. if (!count)
  184. break;
  185. set_page_dirty(page);
  186. kunmap(page);
  187. page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
  188. pptr = kmap(page);
  189. curr = pptr;
  190. end = pptr + PAGE_CACHE_BITS / 32;
  191. }
  192. done:
  193. /* do any partial u32 at end */
  194. if (count) {
  195. mask = 0xffffffffU >> count;
  196. *curr &= cpu_to_be32(mask);
  197. }
  198. out:
  199. set_page_dirty(page);
  200. kunmap(page);
  201. HFSPLUS_SB(sb).free_blocks += len;
  202. sb->s_dirt = 1;
  203. mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
  204. return 0;
  205. }