btree.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * linux/fs/hfsplus/btree.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handle opening/closing btree
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/pagemap.h>
  12. #include "hfsplus_fs.h"
  13. #include "hfsplus_raw.h"
  14. /* Get a reference to a B*Tree and do some initial checks */
  15. struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
  16. {
  17. struct hfs_btree *tree;
  18. struct hfs_btree_header_rec *head;
  19. struct address_space *mapping;
  20. struct page *page;
  21. unsigned int size;
  22. tree = kmalloc(sizeof(*tree), GFP_KERNEL);
  23. if (!tree)
  24. return NULL;
  25. memset(tree, 0, sizeof(*tree));
  26. init_MUTEX(&tree->tree_lock);
  27. spin_lock_init(&tree->hash_lock);
  28. /* Set the correct compare function */
  29. tree->sb = sb;
  30. tree->cnid = id;
  31. if (id == HFSPLUS_EXT_CNID) {
  32. tree->keycmp = hfsplus_ext_cmp_key;
  33. } else if (id == HFSPLUS_CAT_CNID) {
  34. tree->keycmp = hfsplus_cat_cmp_key;
  35. } else {
  36. printk("HFS+-fs: unknown B*Tree requested\n");
  37. goto free_tree;
  38. }
  39. tree->inode = iget(sb, id);
  40. if (!tree->inode)
  41. goto free_tree;
  42. mapping = tree->inode->i_mapping;
  43. page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
  44. if (IS_ERR(page))
  45. goto free_tree;
  46. /* Load the header */
  47. head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  48. tree->root = be32_to_cpu(head->root);
  49. tree->leaf_count = be32_to_cpu(head->leaf_count);
  50. tree->leaf_head = be32_to_cpu(head->leaf_head);
  51. tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  52. tree->node_count = be32_to_cpu(head->node_count);
  53. tree->free_nodes = be32_to_cpu(head->free_nodes);
  54. tree->attributes = be32_to_cpu(head->attributes);
  55. tree->node_size = be16_to_cpu(head->node_size);
  56. tree->max_key_len = be16_to_cpu(head->max_key_len);
  57. tree->depth = be16_to_cpu(head->depth);
  58. size = tree->node_size;
  59. if (!size || size & (size - 1))
  60. goto fail_page;
  61. if (!tree->node_count)
  62. goto fail_page;
  63. tree->node_size_shift = ffs(size) - 1;
  64. tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  65. kunmap(page);
  66. page_cache_release(page);
  67. return tree;
  68. fail_page:
  69. tree->inode->i_mapping->a_ops = &hfsplus_aops;
  70. page_cache_release(page);
  71. free_tree:
  72. iput(tree->inode);
  73. kfree(tree);
  74. return NULL;
  75. }
  76. /* Release resources used by a btree */
  77. void hfs_btree_close(struct hfs_btree *tree)
  78. {
  79. struct hfs_bnode *node;
  80. int i;
  81. if (!tree)
  82. return;
  83. for (i = 0; i < NODE_HASH_SIZE; i++) {
  84. while ((node = tree->node_hash[i])) {
  85. tree->node_hash[i] = node->next_hash;
  86. if (atomic_read(&node->refcnt))
  87. printk("HFS+: node %d:%d still has %d user(s)!\n",
  88. node->tree->cnid, node->this, atomic_read(&node->refcnt));
  89. hfs_bnode_free(node);
  90. tree->node_hash_cnt--;
  91. }
  92. }
  93. iput(tree->inode);
  94. kfree(tree);
  95. }
  96. void hfs_btree_write(struct hfs_btree *tree)
  97. {
  98. struct hfs_btree_header_rec *head;
  99. struct hfs_bnode *node;
  100. struct page *page;
  101. node = hfs_bnode_find(tree, 0);
  102. if (IS_ERR(node))
  103. /* panic? */
  104. return;
  105. /* Load the header */
  106. page = node->page[0];
  107. head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
  108. head->root = cpu_to_be32(tree->root);
  109. head->leaf_count = cpu_to_be32(tree->leaf_count);
  110. head->leaf_head = cpu_to_be32(tree->leaf_head);
  111. head->leaf_tail = cpu_to_be32(tree->leaf_tail);
  112. head->node_count = cpu_to_be32(tree->node_count);
  113. head->free_nodes = cpu_to_be32(tree->free_nodes);
  114. head->attributes = cpu_to_be32(tree->attributes);
  115. head->depth = cpu_to_be16(tree->depth);
  116. kunmap(page);
  117. set_page_dirty(page);
  118. hfs_bnode_put(node);
  119. }
  120. static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
  121. {
  122. struct hfs_btree *tree = prev->tree;
  123. struct hfs_bnode *node;
  124. struct hfs_bnode_desc desc;
  125. __be32 cnid;
  126. node = hfs_bnode_create(tree, idx);
  127. if (IS_ERR(node))
  128. return node;
  129. tree->free_nodes--;
  130. prev->next = idx;
  131. cnid = cpu_to_be32(idx);
  132. hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
  133. node->type = HFS_NODE_MAP;
  134. node->num_recs = 1;
  135. hfs_bnode_clear(node, 0, tree->node_size);
  136. desc.next = 0;
  137. desc.prev = 0;
  138. desc.type = HFS_NODE_MAP;
  139. desc.height = 0;
  140. desc.num_recs = cpu_to_be16(1);
  141. desc.reserved = 0;
  142. hfs_bnode_write(node, &desc, 0, sizeof(desc));
  143. hfs_bnode_write_u16(node, 14, 0x8000);
  144. hfs_bnode_write_u16(node, tree->node_size - 2, 14);
  145. hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
  146. return node;
  147. }
  148. struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
  149. {
  150. struct hfs_bnode *node, *next_node;
  151. struct page **pagep;
  152. u32 nidx, idx;
  153. u16 off, len;
  154. u8 *data, byte, m;
  155. int i;
  156. while (!tree->free_nodes) {
  157. struct inode *inode = tree->inode;
  158. u32 count;
  159. int res;
  160. res = hfsplus_file_extend(inode);
  161. if (res)
  162. return ERR_PTR(res);
  163. HFSPLUS_I(inode).phys_size = inode->i_size =
  164. (loff_t)HFSPLUS_I(inode).alloc_blocks <<
  165. HFSPLUS_SB(tree->sb).alloc_blksz_shift;
  166. HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
  167. HFSPLUS_SB(tree->sb).fs_shift;
  168. inode_set_bytes(inode, inode->i_size);
  169. count = inode->i_size >> tree->node_size_shift;
  170. tree->free_nodes = count - tree->node_count;
  171. tree->node_count = count;
  172. }
  173. nidx = 0;
  174. node = hfs_bnode_find(tree, nidx);
  175. if (IS_ERR(node))
  176. return node;
  177. len = hfs_brec_lenoff(node, 2, &off);
  178. off += node->page_offset;
  179. pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  180. data = kmap(*pagep);
  181. off &= ~PAGE_CACHE_MASK;
  182. idx = 0;
  183. for (;;) {
  184. while (len) {
  185. byte = data[off];
  186. if (byte != 0xff) {
  187. for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
  188. if (!(byte & m)) {
  189. idx += i;
  190. data[off] |= m;
  191. set_page_dirty(*pagep);
  192. kunmap(*pagep);
  193. tree->free_nodes--;
  194. mark_inode_dirty(tree->inode);
  195. hfs_bnode_put(node);
  196. if (!idx) {
  197. printk("unexpected idx %u (%u)\n", idx, node->this);
  198. BUG();
  199. }
  200. return hfs_bnode_create(tree, idx);
  201. }
  202. }
  203. }
  204. if (++off >= PAGE_CACHE_SIZE) {
  205. kunmap(*pagep);
  206. data = kmap(*++pagep);
  207. off = 0;
  208. }
  209. idx += 8;
  210. len--;
  211. }
  212. kunmap(*pagep);
  213. nidx = node->next;
  214. if (!nidx) {
  215. printk("create new bmap node...\n");
  216. next_node = hfs_bmap_new_bmap(node, idx);
  217. } else
  218. next_node = hfs_bnode_find(tree, nidx);
  219. hfs_bnode_put(node);
  220. if (IS_ERR(next_node))
  221. return next_node;
  222. node = next_node;
  223. len = hfs_brec_lenoff(node, 0, &off);
  224. off += node->page_offset;
  225. pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  226. data = kmap(*pagep);
  227. off &= ~PAGE_CACHE_MASK;
  228. }
  229. }
  230. void hfs_bmap_free(struct hfs_bnode *node)
  231. {
  232. struct hfs_btree *tree;
  233. struct page *page;
  234. u16 off, len;
  235. u32 nidx;
  236. u8 *data, byte, m;
  237. dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
  238. if (!node->this)
  239. BUG();
  240. tree = node->tree;
  241. nidx = node->this;
  242. node = hfs_bnode_find(tree, 0);
  243. if (IS_ERR(node))
  244. return;
  245. len = hfs_brec_lenoff(node, 2, &off);
  246. while (nidx >= len * 8) {
  247. u32 i;
  248. nidx -= len * 8;
  249. i = node->next;
  250. hfs_bnode_put(node);
  251. if (!i) {
  252. /* panic */;
  253. printk("HFS: unable to free bnode %u. bmap not found!\n", node->this);
  254. return;
  255. }
  256. node = hfs_bnode_find(tree, i);
  257. if (IS_ERR(node))
  258. return;
  259. if (node->type != HFS_NODE_MAP) {
  260. /* panic */;
  261. printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type);
  262. hfs_bnode_put(node);
  263. return;
  264. }
  265. len = hfs_brec_lenoff(node, 0, &off);
  266. }
  267. off += node->page_offset + nidx / 8;
  268. page = node->page[off >> PAGE_CACHE_SHIFT];
  269. data = kmap(page);
  270. off &= ~PAGE_CACHE_MASK;
  271. m = 1 << (~nidx & 7);
  272. byte = data[off];
  273. if (!(byte & m)) {
  274. printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type);
  275. kunmap(page);
  276. hfs_bnode_put(node);
  277. return;
  278. }
  279. data[off] = byte & ~m;
  280. set_page_dirty(page);
  281. kunmap(page);
  282. hfs_bnode_put(node);
  283. tree->free_nodes++;
  284. mark_inode_dirty(tree->inode);
  285. }