btree.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * linux/fs/hfsplus/btree.c
  3. *
  4. * Copyright (C) 2001
  5. * Brad Boyer (flar@allandria.com)
  6. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7. *
  8. * Handle opening/closing btree
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/log2.h>
  13. #include "hfsplus_fs.h"
  14. #include "hfsplus_raw.h"
  15. /* Get a reference to a B*Tree and do some initial checks */
  16. struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
  17. {
  18. struct hfs_btree *tree;
  19. struct hfs_btree_header_rec *head;
  20. struct address_space *mapping;
  21. struct inode *inode;
  22. struct page *page;
  23. unsigned int size;
  24. tree = kzalloc(sizeof(*tree), GFP_KERNEL);
  25. if (!tree)
  26. return NULL;
  27. mutex_init(&tree->tree_lock);
  28. spin_lock_init(&tree->hash_lock);
  29. tree->sb = sb;
  30. tree->cnid = id;
  31. inode = hfsplus_iget(sb, id);
  32. if (IS_ERR(inode))
  33. goto free_tree;
  34. tree->inode = inode;
  35. if (!HFSPLUS_I(tree->inode)->first_blocks) {
  36. printk(KERN_ERR
  37. "hfs: invalid btree extent records (0 size).\n");
  38. goto free_inode;
  39. }
  40. mapping = tree->inode->i_mapping;
  41. page = read_mapping_page(mapping, 0, NULL);
  42. if (IS_ERR(page))
  43. goto free_inode;
  44. /* Load the header */
  45. head = (struct hfs_btree_header_rec *)(kmap(page) +
  46. sizeof(struct hfs_bnode_desc));
  47. tree->root = be32_to_cpu(head->root);
  48. tree->leaf_count = be32_to_cpu(head->leaf_count);
  49. tree->leaf_head = be32_to_cpu(head->leaf_head);
  50. tree->leaf_tail = be32_to_cpu(head->leaf_tail);
  51. tree->node_count = be32_to_cpu(head->node_count);
  52. tree->free_nodes = be32_to_cpu(head->free_nodes);
  53. tree->attributes = be32_to_cpu(head->attributes);
  54. tree->node_size = be16_to_cpu(head->node_size);
  55. tree->max_key_len = be16_to_cpu(head->max_key_len);
  56. tree->depth = be16_to_cpu(head->depth);
  57. /* Verify the tree and set the correct compare function */
  58. switch (id) {
  59. case HFSPLUS_EXT_CNID:
  60. if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
  61. printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
  62. tree->max_key_len);
  63. goto fail_page;
  64. }
  65. if (tree->attributes & HFS_TREE_VARIDXKEYS) {
  66. printk(KERN_ERR "hfs: invalid extent btree flag\n");
  67. goto fail_page;
  68. }
  69. tree->keycmp = hfsplus_ext_cmp_key;
  70. break;
  71. case HFSPLUS_CAT_CNID:
  72. if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
  73. printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
  74. tree->max_key_len);
  75. goto fail_page;
  76. }
  77. if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
  78. printk(KERN_ERR "hfs: invalid catalog btree flag\n");
  79. goto fail_page;
  80. }
  81. if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
  82. (head->key_type == HFSPLUS_KEY_BINARY))
  83. tree->keycmp = hfsplus_cat_bin_cmp_key;
  84. else {
  85. tree->keycmp = hfsplus_cat_case_cmp_key;
  86. set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
  87. }
  88. break;
  89. case HFSPLUS_ATTR_CNID:
  90. if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
  91. printk(KERN_ERR "hfs: invalid attributes max_key_len %d\n",
  92. tree->max_key_len);
  93. goto fail_page;
  94. }
  95. tree->keycmp = hfsplus_attr_bin_cmp_key;
  96. break;
  97. default:
  98. printk(KERN_ERR "hfs: unknown B*Tree requested\n");
  99. goto fail_page;
  100. }
  101. if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
  102. printk(KERN_ERR "hfs: invalid btree flag\n");
  103. goto fail_page;
  104. }
  105. size = tree->node_size;
  106. if (!is_power_of_2(size))
  107. goto fail_page;
  108. if (!tree->node_count)
  109. goto fail_page;
  110. tree->node_size_shift = ffs(size) - 1;
  111. tree->pages_per_bnode =
  112. (tree->node_size + PAGE_CACHE_SIZE - 1) >>
  113. PAGE_CACHE_SHIFT;
  114. kunmap(page);
  115. page_cache_release(page);
  116. return tree;
  117. fail_page:
  118. page_cache_release(page);
  119. free_inode:
  120. tree->inode->i_mapping->a_ops = &hfsplus_aops;
  121. iput(tree->inode);
  122. free_tree:
  123. kfree(tree);
  124. return NULL;
  125. }
  126. /* Release resources used by a btree */
  127. void hfs_btree_close(struct hfs_btree *tree)
  128. {
  129. struct hfs_bnode *node;
  130. int i;
  131. if (!tree)
  132. return;
  133. for (i = 0; i < NODE_HASH_SIZE; i++) {
  134. while ((node = tree->node_hash[i])) {
  135. tree->node_hash[i] = node->next_hash;
  136. if (atomic_read(&node->refcnt))
  137. printk(KERN_CRIT "hfs: node %d:%d "
  138. "still has %d user(s)!\n",
  139. node->tree->cnid, node->this,
  140. atomic_read(&node->refcnt));
  141. hfs_bnode_free(node);
  142. tree->node_hash_cnt--;
  143. }
  144. }
  145. iput(tree->inode);
  146. kfree(tree);
  147. }
  148. int hfs_btree_write(struct hfs_btree *tree)
  149. {
  150. struct hfs_btree_header_rec *head;
  151. struct hfs_bnode *node;
  152. struct page *page;
  153. node = hfs_bnode_find(tree, 0);
  154. if (IS_ERR(node))
  155. /* panic? */
  156. return -EIO;
  157. /* Load the header */
  158. page = node->page[0];
  159. head = (struct hfs_btree_header_rec *)(kmap(page) +
  160. sizeof(struct hfs_bnode_desc));
  161. head->root = cpu_to_be32(tree->root);
  162. head->leaf_count = cpu_to_be32(tree->leaf_count);
  163. head->leaf_head = cpu_to_be32(tree->leaf_head);
  164. head->leaf_tail = cpu_to_be32(tree->leaf_tail);
  165. head->node_count = cpu_to_be32(tree->node_count);
  166. head->free_nodes = cpu_to_be32(tree->free_nodes);
  167. head->attributes = cpu_to_be32(tree->attributes);
  168. head->depth = cpu_to_be16(tree->depth);
  169. kunmap(page);
  170. set_page_dirty(page);
  171. hfs_bnode_put(node);
  172. return 0;
  173. }
  174. static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
  175. {
  176. struct hfs_btree *tree = prev->tree;
  177. struct hfs_bnode *node;
  178. struct hfs_bnode_desc desc;
  179. __be32 cnid;
  180. node = hfs_bnode_create(tree, idx);
  181. if (IS_ERR(node))
  182. return node;
  183. tree->free_nodes--;
  184. prev->next = idx;
  185. cnid = cpu_to_be32(idx);
  186. hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
  187. node->type = HFS_NODE_MAP;
  188. node->num_recs = 1;
  189. hfs_bnode_clear(node, 0, tree->node_size);
  190. desc.next = 0;
  191. desc.prev = 0;
  192. desc.type = HFS_NODE_MAP;
  193. desc.height = 0;
  194. desc.num_recs = cpu_to_be16(1);
  195. desc.reserved = 0;
  196. hfs_bnode_write(node, &desc, 0, sizeof(desc));
  197. hfs_bnode_write_u16(node, 14, 0x8000);
  198. hfs_bnode_write_u16(node, tree->node_size - 2, 14);
  199. hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
  200. return node;
  201. }
  202. struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
  203. {
  204. struct hfs_bnode *node, *next_node;
  205. struct page **pagep;
  206. u32 nidx, idx;
  207. unsigned off;
  208. u16 off16;
  209. u16 len;
  210. u8 *data, byte, m;
  211. int i;
  212. while (!tree->free_nodes) {
  213. struct inode *inode = tree->inode;
  214. struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
  215. u32 count;
  216. int res;
  217. res = hfsplus_file_extend(inode);
  218. if (res)
  219. return ERR_PTR(res);
  220. hip->phys_size = inode->i_size =
  221. (loff_t)hip->alloc_blocks <<
  222. HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
  223. hip->fs_blocks =
  224. hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
  225. inode_set_bytes(inode, inode->i_size);
  226. count = inode->i_size >> tree->node_size_shift;
  227. tree->free_nodes = count - tree->node_count;
  228. tree->node_count = count;
  229. }
  230. nidx = 0;
  231. node = hfs_bnode_find(tree, nidx);
  232. if (IS_ERR(node))
  233. return node;
  234. len = hfs_brec_lenoff(node, 2, &off16);
  235. off = off16;
  236. off += node->page_offset;
  237. pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  238. data = kmap(*pagep);
  239. off &= ~PAGE_CACHE_MASK;
  240. idx = 0;
  241. for (;;) {
  242. while (len) {
  243. byte = data[off];
  244. if (byte != 0xff) {
  245. for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
  246. if (!(byte & m)) {
  247. idx += i;
  248. data[off] |= m;
  249. set_page_dirty(*pagep);
  250. kunmap(*pagep);
  251. tree->free_nodes--;
  252. mark_inode_dirty(tree->inode);
  253. hfs_bnode_put(node);
  254. return hfs_bnode_create(tree,
  255. idx);
  256. }
  257. }
  258. }
  259. if (++off >= PAGE_CACHE_SIZE) {
  260. kunmap(*pagep);
  261. data = kmap(*++pagep);
  262. off = 0;
  263. }
  264. idx += 8;
  265. len--;
  266. }
  267. kunmap(*pagep);
  268. nidx = node->next;
  269. if (!nidx) {
  270. dprint(DBG_BNODE_MOD, "hfs: create new bmap node.\n");
  271. next_node = hfs_bmap_new_bmap(node, idx);
  272. } else
  273. next_node = hfs_bnode_find(tree, nidx);
  274. hfs_bnode_put(node);
  275. if (IS_ERR(next_node))
  276. return next_node;
  277. node = next_node;
  278. len = hfs_brec_lenoff(node, 0, &off16);
  279. off = off16;
  280. off += node->page_offset;
  281. pagep = node->page + (off >> PAGE_CACHE_SHIFT);
  282. data = kmap(*pagep);
  283. off &= ~PAGE_CACHE_MASK;
  284. }
  285. }
  286. void hfs_bmap_free(struct hfs_bnode *node)
  287. {
  288. struct hfs_btree *tree;
  289. struct page *page;
  290. u16 off, len;
  291. u32 nidx;
  292. u8 *data, byte, m;
  293. dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
  294. BUG_ON(!node->this);
  295. tree = node->tree;
  296. nidx = node->this;
  297. node = hfs_bnode_find(tree, 0);
  298. if (IS_ERR(node))
  299. return;
  300. len = hfs_brec_lenoff(node, 2, &off);
  301. while (nidx >= len * 8) {
  302. u32 i;
  303. nidx -= len * 8;
  304. i = node->next;
  305. hfs_bnode_put(node);
  306. if (!i) {
  307. /* panic */;
  308. printk(KERN_CRIT "hfs: unable to free bnode %u. "
  309. "bmap not found!\n",
  310. node->this);
  311. return;
  312. }
  313. node = hfs_bnode_find(tree, i);
  314. if (IS_ERR(node))
  315. return;
  316. if (node->type != HFS_NODE_MAP) {
  317. /* panic */;
  318. printk(KERN_CRIT "hfs: invalid bmap found! "
  319. "(%u,%d)\n",
  320. node->this, node->type);
  321. hfs_bnode_put(node);
  322. return;
  323. }
  324. len = hfs_brec_lenoff(node, 0, &off);
  325. }
  326. off += node->page_offset + nidx / 8;
  327. page = node->page[off >> PAGE_CACHE_SHIFT];
  328. data = kmap(page);
  329. off &= ~PAGE_CACHE_MASK;
  330. m = 1 << (~nidx & 7);
  331. byte = data[off];
  332. if (!(byte & m)) {
  333. printk(KERN_CRIT "hfs: trying to free free bnode "
  334. "%u(%d)\n",
  335. node->this, node->type);
  336. kunmap(page);
  337. hfs_bnode_put(node);
  338. return;
  339. }
  340. data[off] = byte & ~m;
  341. set_page_dirty(page);
  342. kunmap(page);
  343. hfs_bnode_put(node);
  344. tree->free_nodes++;
  345. mark_inode_dirty(tree->inode);
  346. }