dm-btree-internal.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. * Copyright (C) 2011 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #ifndef DM_BTREE_INTERNAL_H
  7. #define DM_BTREE_INTERNAL_H
  8. #include "dm-btree.h"
  9. /*----------------------------------------------------------------*/
  10. /*
  11. * We'll need 2 accessor functions for n->csum and n->blocknr
  12. * to support dm-btree-spine.c in that case.
  13. */
  14. enum node_flags {
  15. INTERNAL_NODE = 1,
  16. LEAF_NODE = 1 << 1
  17. };
  18. /*
  19. * Every btree node begins with this structure. Make sure it's a multiple
  20. * of 8-bytes in size, otherwise the 64bit keys will be mis-aligned.
  21. */
  22. struct node_header {
  23. __le32 csum;
  24. __le32 flags;
  25. __le64 blocknr; /* Block this node is supposed to live in. */
  26. __le32 nr_entries;
  27. __le32 max_entries;
  28. __le32 value_size;
  29. __le32 padding;
  30. } __packed;
  31. struct node {
  32. struct node_header header;
  33. __le64 keys[0];
  34. } __packed;
  35. void inc_children(struct dm_transaction_manager *tm, struct node *n,
  36. struct dm_btree_value_type *vt);
  37. int new_block(struct dm_btree_info *info, struct dm_block **result);
  38. int unlock_block(struct dm_btree_info *info, struct dm_block *b);
  39. /*
  40. * Spines keep track of the rolling locks. There are 2 variants, read-only
  41. * and one that uses shadowing. These are separate structs to allow the
  42. * type checker to spot misuse, for example accidentally calling read_lock
  43. * on a shadow spine.
  44. */
  45. struct ro_spine {
  46. struct dm_btree_info *info;
  47. int count;
  48. struct dm_block *nodes[2];
  49. };
  50. void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
  51. int exit_ro_spine(struct ro_spine *s);
  52. int ro_step(struct ro_spine *s, dm_block_t new_child);
  53. struct node *ro_node(struct ro_spine *s);
  54. struct shadow_spine {
  55. struct dm_btree_info *info;
  56. int count;
  57. struct dm_block *nodes[2];
  58. dm_block_t root;
  59. };
  60. void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info);
  61. int exit_shadow_spine(struct shadow_spine *s);
  62. int shadow_step(struct shadow_spine *s, dm_block_t b,
  63. struct dm_btree_value_type *vt);
  64. /*
  65. * The spine must have at least one entry before calling this.
  66. */
  67. struct dm_block *shadow_current(struct shadow_spine *s);
  68. /*
  69. * The spine must have at least two entries before calling this.
  70. */
  71. struct dm_block *shadow_parent(struct shadow_spine *s);
  72. int shadow_has_parent(struct shadow_spine *s);
  73. int shadow_root(struct shadow_spine *s);
  74. /*
  75. * Some inlines.
  76. */
  77. static inline __le64 *key_ptr(struct node *n, uint32_t index)
  78. {
  79. return n->keys + index;
  80. }
  81. static inline void *value_base(struct node *n)
  82. {
  83. return &n->keys[le32_to_cpu(n->header.max_entries)];
  84. }
  85. /*
  86. * FIXME: Now that value size is stored in node we don't need the third parm.
  87. */
  88. static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
  89. {
  90. BUG_ON(value_size != le32_to_cpu(n->header.value_size));
  91. return value_base(n) + (value_size * index);
  92. }
  93. /*
  94. * Assumes the values are suitably-aligned and converts to core format.
  95. */
  96. static inline uint64_t value64(struct node *n, uint32_t index)
  97. {
  98. __le64 *values_le = value_base(n);
  99. return le64_to_cpu(values_le[index]);
  100. }
  101. /*
  102. * Searching for a key within a single node.
  103. */
  104. int lower_bound(struct node *n, uint64_t key);
  105. extern struct dm_block_validator btree_node_validator;
  106. #endif /* DM_BTREE_INTERNAL_H */