btree.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * linux/fs/hfsplus/btree.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handle opening/closing btree
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/log2.h>
  13. #include "hfsplus_fs.h"
  14. #include "hfsplus_raw.h"
  15. /* Get a reference to a B*Tree and do some initial checks */
  16. struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
  17. {
  18. struct hfs_btree *tree;
  19. struct hfs_btree_header_rec *head;
  20. struct address_space *mapping;
  21. struct inode *inode;
  22. struct page *page;
  23. unsigned int size;
  24. tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  25. if (!tree)
  26. return NULL;
  27. init_MUTEX(&tree->tree_lock);
  28. spin_lock_init(&tree->hash_lock);
  29. tree->sb = sb;
  30. tree->cnid = id;
  31. inode = hfsplus_iget(sb, id);
  32. if (IS_ERR(inode))
  33. goto free_tree;
  34. tree->inode = inode;
  35. mapping = tree->inode->i_mapping;
  36. page = read_mapping_page(mapping, 0, NULL);
  37. if (IS_ERR(page))
  38. goto free_tree;
  39. /* Load the header */
  40. head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  41. tree->root = be32_to_cpu(head->root);
  42. tree->leaf_count = be32_to_cpu(head->leaf_count);
  43. tree->leaf_head = be32_to_cpu(head->leaf_head);
  44. tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  45. tree->node_count = be32_to_cpu(head->node_count);
  46. tree->free_nodes = be32_to_cpu(head->free_nodes);
  47. tree->attributes = be32_to_cpu(head->attributes);
  48. tree->node_size = be16_to_cpu(head->node_size);
  49. tree->max_key_len = be16_to_cpu(head->max_key_len);
  50. tree->depth = be16_to_cpu(head->depth);
  51. /* Set the correct compare function */
  52. if (id == HFSPLUS_EXT_CNID) {
  53. tree->keycmp = hfsplus_ext_cmp_key;
  54. } else if (id == HFSPLUS_CAT_CNID) {
  55. if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
  56. (head->key_type == HFSPLUS_KEY_BINARY))
  57. tree->keycmp = hfsplus_cat_bin_cmp_key;
  58. else {
  59. tree->keycmp = hfsplus_cat_case_cmp_key;
  60. HFSPLUS_SB(sb).flags |= HFSPLUS_SB_CASEFOLD;
  61. }
  62. } else {
  63. printk(KERN_ERR "hfs: unknown B*Tree requested\n");
  64. goto fail_page;
  65. }
  66. size = tree->node_size;
  67. if (!is_power_of_2(size))
  68. goto fail_page;
  69. if (!tree->node_count)
  70. goto fail_page;
  71. tree->node_size_shift = ffs(size) - 1;
  72. tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  73. kunmap(page);
  74. page_cache_release(page);
  75. return tree;
  76. fail_page:
  77. tree->inode->i_mapping->a_ops = &hfsplus_aops;
  78. page_cache_release(page);
  79. free_tree:
  80. iput(tree->inode);
  81. kfree(tree);
  82. return NULL;
  83. }
  84. /* Release resources used by a btree */
  85. void hfs_btree_close(struct hfs_btree *tree)
  86. {
  87. struct hfs_bnode *node;
  88. int i;
  89. if (!tree)
  90. return;
  91. for (i = 0; i < NODE_HASH_SIZE; i++) {
  92. while ((node = tree->node_hash[i])) {
  93. tree->node_hash[i] = node->next_hash;
  94. if (atomic_read(&node->refcnt))
  95. printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
  96. node->tree->cnid, node->this, atomic_read(&node->refcnt));
  97. hfs_bnode_free(node);
  98. tree->node_hash_cnt--;
  99. }
  100. }
  101. iput(tree->inode);
  102. kfree(tree);
  103. }
  104. void hfs_btree_write(struct hfs_btree *tree)
  105. {
  106. struct hfs_btree_header_rec *head;
  107. struct hfs_bnode *node;
  108. struct page *page;
  109. node = hfs_bnode_find(tree, 0);
  110. if (IS_ERR(node))
  111. /* panic? */
  112. return;
  113. /* Load the header */
  114. page = node->page[0];
  115. head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  116. head->root = cpu_to_be32(tree->root);
  117. head->leaf_count = cpu_to_be32(tree->leaf_count);
  118. head->leaf_head = cpu_to_be32(tree->leaf_head);
  119. head->leaf_tail = cpu_to_be32(tree->leaf_tail);
  120. head->node_count = cpu_to_be32(tree->node_count);
  121. head->free_nodes = cpu_to_be32(tree->free_nodes);
  122. head->attributes = cpu_to_be32(tree->attributes);
  123. head->depth = cpu_to_be16(tree->depth);
  124. kunmap(page);
  125. set_page_dirty(page);
  126. hfs_bnode_put(node);
  127. }
  128. static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
  129. {
  130. struct hfs_btree *tree = prev->tree;
  131. struct hfs_bnode *node;
  132. struct hfs_bnode_desc desc;
  133. __be32 cnid;
  134. node = hfs_bnode_create(tree, idx);
  135. if (IS_ERR(node))
  136. return node;
  137. tree->free_nodes--;
  138. prev->next = idx;
  139. cnid = cpu_to_be32(idx);
  140. hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
  141. node->type = HFS_NODE_MAP;
  142. node->num_recs = 1;
  143. hfs_bnode_clear(node, 0, tree->node_size);
  144. desc.next = 0;
  145. desc.prev = 0;
  146. desc.type = HFS_NODE_MAP;
  147. desc.height = 0;
  148. desc.num_recs = cpu_to_be16(1);
  149. desc.reserved = 0;
  150. hfs_bnode_write(node, &desc, 0, sizeof(desc));
  151. hfs_bnode_write_u16(node, 14, 0x8000);
  152. hfs_bnode_write_u16(node, tree->node_size - 2, 14);
  153. hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
  154. return node;
  155. }
  156. struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
  157. {
  158. struct hfs_bnode *node, *next_node;
  159. struct page **pagep;
  160. u32 nidx, idx;
  161. u16 off, len;
  162. u8 *data, byte, m;
  163. int i;
  164. while (!tree->free_nodes) {
  165. struct inode *inode = tree->inode;
  166. u32 count;
  167. int res;
  168. res = hfsplus_file_extend(inode);
  169. if (res)
  170. return ERR_PTR(res);
  171. HFSPLUS_I(inode).phys_size = inode->i_size =
  172. (loff_t)HFSPLUS_I(inode).alloc_blocks <<
  173. HFSPLUS_SB(tree->sb).alloc_blksz_shift;
  174. HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
  175. HFSPLUS_SB(tree->sb).fs_shift;
  176. inode_set_bytes(inode, inode->i_size);
  177. count = inode->i_size >> tree->node_size_shift;
  178. tree->free_nodes = count - tree->node_count;
  179. tree->node_count = count;
  180. }
  181. nidx = 0;
  182. node = hfs_bnode_find(tree, nidx);
  183. if (IS_ERR(node))
  184. return node;
  185. len = hfs_brec_lenoff(node, 2, &off);
  186. off += node->page_offset;
  187. pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  188. data = kmap(*pagep);
  189. off &= ~PAGE_CACHE_MASK;
  190. idx = 0;
  191. for (;;) {
  192. while (len) {
  193. byte = data[off];
  194. if (byte != 0xff) {
  195. for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
  196. if (!(byte & m)) {
  197. idx += i;
  198. data[off] |= m;
  199. set_page_dirty(*pagep);
  200. kunmap(*pagep);
  201. tree->free_nodes--;
  202. mark_inode_dirty(tree->inode);
  203. hfs_bnode_put(node);
  204. return hfs_bnode_create(tree, idx);
  205. }
  206. }
  207. }
  208. if (++off >= PAGE_CACHE_SIZE) {
  209. kunmap(*pagep);
  210. data = kmap(*++pagep);
  211. off = 0;
  212. }
  213. idx += 8;
  214. len--;
  215. }
  216. kunmap(*pagep);
  217. nidx = node->next;
  218. if (!nidx) {
  219. printk(KERN_DEBUG "hfs: create new bmap node...\n");
  220. next_node = hfs_bmap_new_bmap(node, idx);
  221. } else
  222. next_node = hfs_bnode_find(tree, nidx);
  223. hfs_bnode_put(node);
  224. if (IS_ERR(next_node))
  225. return next_node;
  226. node = next_node;
  227. len = hfs_brec_lenoff(node, 0, &off);
  228. off += node->page_offset;
  229. pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  230. data = kmap(*pagep);
  231. off &= ~PAGE_CACHE_MASK;
  232. }
  233. }
  234. void hfs_bmap_free(struct hfs_bnode *node)
  235. {
  236. struct hfs_btree *tree;
  237. struct page *page;
  238. u16 off, len;
  239. u32 nidx;
  240. u8 *data, byte, m;
  241. dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
  242. BUG_ON(!node->this);
  243. tree = node->tree;
  244. nidx = node->this;
  245. node = hfs_bnode_find(tree, 0);
  246. if (IS_ERR(node))
  247. return;
  248. len = hfs_brec_lenoff(node, 2, &off);
  249. while (nidx >= len * 8) {
  250. u32 i;
  251. nidx -= len * 8;
  252. i = node->next;
  253. hfs_bnode_put(node);
  254. if (!i) {
  255. /* panic */;
  256. printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
  257. return;
  258. }
  259. node = hfs_bnode_find(tree, i);
  260. if (IS_ERR(node))
  261. return;
  262. if (node->type != HFS_NODE_MAP) {
  263. /* panic */;
  264. printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
  265. hfs_bnode_put(node);
  266. return;
  267. }
  268. len = hfs_brec_lenoff(node, 0, &off);
  269. }
  270. off += node->page_offset + nidx / 8;
  271. page = node->page[off >> PAGE_CACHE_SHIFT];
  272. data = kmap(page);
  273. off &= ~PAGE_CACHE_MASK;
  274. m = 1 << (~nidx & 7);
  275. byte = data[off];
  276. if (!(byte & m)) {
  277. printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
  278. kunmap(page);
  279. hfs_bnode_put(node);
  280. return;
  281. }
  282. data[off] = byte & ~m;
  283. set_page_dirty(page);
  284. kunmap(page);
  285. hfs_bnode_put(node);
  286. tree->free_nodes++;
  287. mark_inode_dirty(tree->inode);
  288. }