dm-array.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-array.h"
  7. #include "dm-space-map.h"
  8. #include "dm-transaction-manager.h"
  9. #include <linux/export.h>
  10. #include <linux/device-mapper.h>
  11. #define DM_MSG_PREFIX "array"
  12. /*----------------------------------------------------------------*/
  13. /*
  14. * The array is implemented as a fully populated btree, which points to
  15. * blocks that contain the packed values. This is more space efficient
  16. * than just using a btree since we don't store 1 key per value.
  17. */
  18. struct array_block {
  19. __le32 csum;
  20. __le32 max_entries;
  21. __le32 nr_entries;
  22. __le32 value_size;
  23. __le64 blocknr; /* Block this node is supposed to live in. */
  24. } __packed;
  25. /*----------------------------------------------------------------*/
  26. /*
  27. * Validator methods. As usual we calculate a checksum, and also write the
  28. * block location into the header (paranoia about ssds remapping areas by
  29. * mistake).
  30. */
  31. #define CSUM_XOR 595846735
  32. static void array_block_prepare_for_write(struct dm_block_validator *v,
  33. struct dm_block *b,
  34. size_t size_of_block)
  35. {
  36. struct array_block *bh_le = dm_block_data(b);
  37. bh_le->blocknr = cpu_to_le64(dm_block_location(b));
  38. bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
  39. size_of_block - sizeof(__le32),
  40. CSUM_XOR));
  41. }
  42. static int array_block_check(struct dm_block_validator *v,
  43. struct dm_block *b,
  44. size_t size_of_block)
  45. {
  46. struct array_block *bh_le = dm_block_data(b);
  47. __le32 csum_disk;
  48. if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
  49. DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
  50. (unsigned long long) le64_to_cpu(bh_le->blocknr),
  51. (unsigned long long) dm_block_location(b));
  52. return -ENOTBLK;
  53. }
  54. csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
  55. size_of_block - sizeof(__le32),
  56. CSUM_XOR));
  57. if (csum_disk != bh_le->csum) {
  58. DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
  59. (unsigned) le32_to_cpu(csum_disk),
  60. (unsigned) le32_to_cpu(bh_le->csum));
  61. return -EILSEQ;
  62. }
  63. return 0;
  64. }
  65. static struct dm_block_validator array_validator = {
  66. .name = "array",
  67. .prepare_for_write = array_block_prepare_for_write,
  68. .check = array_block_check
  69. };
  70. /*----------------------------------------------------------------*/
  71. /*
  72. * Functions for manipulating the array blocks.
  73. */
  74. /*
  75. * Returns a pointer to a value within an array block.
  76. *
  77. * index - The index into _this_ specific block.
  78. */
  79. static void *element_at(struct dm_array_info *info, struct array_block *ab,
  80. unsigned index)
  81. {
  82. unsigned char *entry = (unsigned char *) (ab + 1);
  83. entry += index * info->value_type.size;
  84. return entry;
  85. }
  86. /*
  87. * Utility function that calls one of the value_type methods on every value
  88. * in an array block.
  89. */
  90. static void on_entries(struct dm_array_info *info, struct array_block *ab,
  91. void (*fn)(void *, const void *))
  92. {
  93. unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
  94. for (i = 0; i < nr_entries; i++)
  95. fn(info->value_type.context, element_at(info, ab, i));
  96. }
  97. /*
  98. * Increment every value in an array block.
  99. */
  100. static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
  101. {
  102. struct dm_btree_value_type *vt = &info->value_type;
  103. if (vt->inc)
  104. on_entries(info, ab, vt->inc);
  105. }
  106. /*
  107. * Decrement every value in an array block.
  108. */
  109. static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
  110. {
  111. struct dm_btree_value_type *vt = &info->value_type;
  112. if (vt->dec)
  113. on_entries(info, ab, vt->dec);
  114. }
  115. /*
  116. * Each array block can hold this many values.
  117. */
  118. static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
  119. {
  120. return (size_of_block - sizeof(struct array_block)) / value_size;
  121. }
  122. /*
  123. * Allocate a new array block. The caller will need to unlock block.
  124. */
  125. static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
  126. uint32_t max_entries,
  127. struct dm_block **block, struct array_block **ab)
  128. {
  129. int r;
  130. r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
  131. if (r)
  132. return r;
  133. (*ab) = dm_block_data(*block);
  134. (*ab)->max_entries = cpu_to_le32(max_entries);
  135. (*ab)->nr_entries = cpu_to_le32(0);
  136. (*ab)->value_size = cpu_to_le32(info->value_type.size);
  137. return 0;
  138. }
  139. /*
  140. * Pad an array block out with a particular value. Every instance will
  141. * cause an increment of the value_type. new_nr must always be more than
  142. * the current number of entries.
  143. */
  144. static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
  145. const void *value, unsigned new_nr)
  146. {
  147. unsigned i;
  148. uint32_t nr_entries;
  149. struct dm_btree_value_type *vt = &info->value_type;
  150. BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
  151. BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
  152. nr_entries = le32_to_cpu(ab->nr_entries);
  153. for (i = nr_entries; i < new_nr; i++) {
  154. if (vt->inc)
  155. vt->inc(vt->context, value);
  156. memcpy(element_at(info, ab, i), value, vt->size);
  157. }
  158. ab->nr_entries = cpu_to_le32(new_nr);
  159. }
  160. /*
  161. * Remove some entries from the back of an array block. Every value
  162. * removed will be decremented. new_nr must be <= the current number of
  163. * entries.
  164. */
  165. static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
  166. unsigned new_nr)
  167. {
  168. unsigned i;
  169. uint32_t nr_entries;
  170. struct dm_btree_value_type *vt = &info->value_type;
  171. BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
  172. BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
  173. nr_entries = le32_to_cpu(ab->nr_entries);
  174. for (i = nr_entries; i > new_nr; i--)
  175. if (vt->dec)
  176. vt->dec(vt->context, element_at(info, ab, i - 1));
  177. ab->nr_entries = cpu_to_le32(new_nr);
  178. }
  179. /*
  180. * Read locks a block, and coerces it to an array block. The caller must
  181. * unlock 'block' when finished.
  182. */
  183. static int get_ablock(struct dm_array_info *info, dm_block_t b,
  184. struct dm_block **block, struct array_block **ab)
  185. {
  186. int r;
  187. r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
  188. if (r)
  189. return r;
  190. *ab = dm_block_data(*block);
  191. return 0;
  192. }
  193. /*
  194. * Unlocks an array block.
  195. */
  196. static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
  197. {
  198. return dm_tm_unlock(info->btree_info.tm, block);
  199. }
  200. /*----------------------------------------------------------------*/
  201. /*
  202. * Btree manipulation.
  203. */
  204. /*
  205. * Looks up an array block in the btree, and then read locks it.
  206. *
  207. * index is the index of the index of the array_block, (ie. the array index
  208. * / max_entries).
  209. */
  210. static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
  211. unsigned index, struct dm_block **block,
  212. struct array_block **ab)
  213. {
  214. int r;
  215. uint64_t key = index;
  216. __le64 block_le;
  217. r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
  218. if (r)
  219. return r;
  220. return get_ablock(info, le64_to_cpu(block_le), block, ab);
  221. }
  222. /*
  223. * Insert an array block into the btree. The block is _not_ unlocked.
  224. */
  225. static int insert_ablock(struct dm_array_info *info, uint64_t index,
  226. struct dm_block *block, dm_block_t *root)
  227. {
  228. __le64 block_le = cpu_to_le64(dm_block_location(block));
  229. __dm_bless_for_disk(block_le);
  230. return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
  231. }
  232. /*
  233. * Looks up an array block in the btree. Then shadows it, and updates the
  234. * btree to point to this new shadow. 'root' is an input/output parameter
  235. * for both the current root block, and the new one.
  236. */
  237. static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
  238. unsigned index, struct dm_block **block,
  239. struct array_block **ab)
  240. {
  241. int r, inc;
  242. uint64_t key = index;
  243. dm_block_t b;
  244. __le64 block_le;
  245. /*
  246. * lookup
  247. */
  248. r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
  249. if (r)
  250. return r;
  251. b = le64_to_cpu(block_le);
  252. /*
  253. * shadow
  254. */
  255. r = dm_tm_shadow_block(info->btree_info.tm, b,
  256. &array_validator, block, &inc);
  257. if (r)
  258. return r;
  259. *ab = dm_block_data(*block);
  260. if (inc)
  261. inc_ablock_entries(info, *ab);
  262. /*
  263. * Reinsert.
  264. *
  265. * The shadow op will often be a noop. Only insert if it really
  266. * copied data.
  267. */
  268. if (dm_block_location(*block) != b)
  269. r = insert_ablock(info, index, *block, root);
  270. return r;
  271. }
  272. /*
  273. * Allocate an new array block, and fill it with some values.
  274. */
  275. static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
  276. uint32_t max_entries,
  277. unsigned block_index, uint32_t nr,
  278. const void *value, dm_block_t *root)
  279. {
  280. int r;
  281. struct dm_block *block;
  282. struct array_block *ab;
  283. r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
  284. if (r)
  285. return r;
  286. fill_ablock(info, ab, value, nr);
  287. r = insert_ablock(info, block_index, block, root);
  288. unlock_ablock(info, block);
  289. return r;
  290. }
  291. static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
  292. unsigned begin_block, unsigned end_block,
  293. unsigned max_entries, const void *value,
  294. dm_block_t *root)
  295. {
  296. int r = 0;
  297. for (; !r && begin_block != end_block; begin_block++)
  298. r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
  299. return r;
  300. }
  301. /*
  302. * There are a bunch of functions involved with resizing an array. This
  303. * structure holds information that commonly needed by them. Purely here
  304. * to reduce parameter count.
  305. */
  306. struct resize {
  307. /*
  308. * Describes the array.
  309. */
  310. struct dm_array_info *info;
  311. /*
  312. * The current root of the array. This gets updated.
  313. */
  314. dm_block_t root;
  315. /*
  316. * Metadata block size. Used to calculate the nr entries in an
  317. * array block.
  318. */
  319. size_t size_of_block;
  320. /*
  321. * Maximum nr entries in an array block.
  322. */
  323. unsigned max_entries;
  324. /*
  325. * nr of completely full blocks in the array.
  326. *
  327. * 'old' refers to before the resize, 'new' after.
  328. */
  329. unsigned old_nr_full_blocks, new_nr_full_blocks;
  330. /*
  331. * Number of entries in the final block. 0 iff only full blocks in
  332. * the array.
  333. */
  334. unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
  335. /*
  336. * The default value used when growing the array.
  337. */
  338. const void *value;
  339. };
  340. /*
  341. * Removes a consecutive set of array blocks from the btree. The values
  342. * in block are decremented as a side effect of the btree remove.
  343. *
  344. * begin_index - the index of the first array block to remove.
  345. * end_index - the one-past-the-end value. ie. this block is not removed.
  346. */
  347. static int drop_blocks(struct resize *resize, unsigned begin_index,
  348. unsigned end_index)
  349. {
  350. int r;
  351. while (begin_index != end_index) {
  352. uint64_t key = begin_index++;
  353. r = dm_btree_remove(&resize->info->btree_info, resize->root,
  354. &key, &resize->root);
  355. if (r)
  356. return r;
  357. }
  358. return 0;
  359. }
  360. /*
  361. * Calculates how many blocks are needed for the array.
  362. */
  363. static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
  364. unsigned nr_entries_in_last_block)
  365. {
  366. return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
  367. }
  368. /*
  369. * Shrink an array.
  370. */
  371. static int shrink(struct resize *resize)
  372. {
  373. int r;
  374. unsigned begin, end;
  375. struct dm_block *block;
  376. struct array_block *ab;
  377. /*
  378. * Lose some blocks from the back?
  379. */
  380. if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
  381. begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
  382. resize->new_nr_entries_in_last_block);
  383. end = total_nr_blocks_needed(resize->old_nr_full_blocks,
  384. resize->old_nr_entries_in_last_block);
  385. r = drop_blocks(resize, begin, end);
  386. if (r)
  387. return r;
  388. }
  389. /*
  390. * Trim the new tail block
  391. */
  392. if (resize->new_nr_entries_in_last_block) {
  393. r = shadow_ablock(resize->info, &resize->root,
  394. resize->new_nr_full_blocks, &block, &ab);
  395. if (r)
  396. return r;
  397. trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
  398. unlock_ablock(resize->info, block);
  399. }
  400. return 0;
  401. }
  402. /*
  403. * Grow an array.
  404. */
  405. static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
  406. {
  407. int r;
  408. struct dm_block *block;
  409. struct array_block *ab;
  410. r = shadow_ablock(resize->info, &resize->root,
  411. resize->old_nr_full_blocks, &block, &ab);
  412. if (r)
  413. return r;
  414. fill_ablock(resize->info, ab, resize->value, new_nr_entries);
  415. unlock_ablock(resize->info, block);
  416. return r;
  417. }
  418. static int grow_add_tail_block(struct resize *resize)
  419. {
  420. return insert_new_ablock(resize->info, resize->size_of_block,
  421. resize->max_entries,
  422. resize->new_nr_full_blocks,
  423. resize->new_nr_entries_in_last_block,
  424. resize->value, &resize->root);
  425. }
  426. static int grow_needs_more_blocks(struct resize *resize)
  427. {
  428. int r;
  429. if (resize->old_nr_entries_in_last_block > 0) {
  430. r = grow_extend_tail_block(resize, resize->max_entries);
  431. if (r)
  432. return r;
  433. }
  434. r = insert_full_ablocks(resize->info, resize->size_of_block,
  435. resize->old_nr_full_blocks,
  436. resize->new_nr_full_blocks,
  437. resize->max_entries, resize->value,
  438. &resize->root);
  439. if (r)
  440. return r;
  441. if (resize->new_nr_entries_in_last_block)
  442. r = grow_add_tail_block(resize);
  443. return r;
  444. }
  445. static int grow(struct resize *resize)
  446. {
  447. if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
  448. return grow_needs_more_blocks(resize);
  449. else if (resize->old_nr_entries_in_last_block)
  450. return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
  451. else
  452. return grow_add_tail_block(resize);
  453. }
  454. /*----------------------------------------------------------------*/
  455. /*
  456. * These are the value_type functions for the btree elements, which point
  457. * to array blocks.
  458. */
  459. static void block_inc(void *context, const void *value)
  460. {
  461. __le64 block_le;
  462. struct dm_array_info *info = context;
  463. memcpy(&block_le, value, sizeof(block_le));
  464. dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
  465. }
  466. static void block_dec(void *context, const void *value)
  467. {
  468. int r;
  469. uint64_t b;
  470. __le64 block_le;
  471. uint32_t ref_count;
  472. struct dm_block *block;
  473. struct array_block *ab;
  474. struct dm_array_info *info = context;
  475. memcpy(&block_le, value, sizeof(block_le));
  476. b = le64_to_cpu(block_le);
  477. r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
  478. if (r) {
  479. DMERR_LIMIT("couldn't get reference count for block %llu",
  480. (unsigned long long) b);
  481. return;
  482. }
  483. if (ref_count == 1) {
  484. /*
  485. * We're about to drop the last reference to this ablock.
  486. * So we need to decrement the ref count of the contents.
  487. */
  488. r = get_ablock(info, b, &block, &ab);
  489. if (r) {
  490. DMERR_LIMIT("couldn't get array block %llu",
  491. (unsigned long long) b);
  492. return;
  493. }
  494. dec_ablock_entries(info, ab);
  495. unlock_ablock(info, block);
  496. }
  497. dm_tm_dec(info->btree_info.tm, b);
  498. }
  499. static int block_equal(void *context, const void *value1, const void *value2)
  500. {
  501. return !memcmp(value1, value2, sizeof(__le64));
  502. }
  503. /*----------------------------------------------------------------*/
  504. void dm_array_info_init(struct dm_array_info *info,
  505. struct dm_transaction_manager *tm,
  506. struct dm_btree_value_type *vt)
  507. {
  508. struct dm_btree_value_type *bvt = &info->btree_info.value_type;
  509. memcpy(&info->value_type, vt, sizeof(info->value_type));
  510. info->btree_info.tm = tm;
  511. info->btree_info.levels = 1;
  512. bvt->context = info;
  513. bvt->size = sizeof(__le64);
  514. bvt->inc = block_inc;
  515. bvt->dec = block_dec;
  516. bvt->equal = block_equal;
  517. }
  518. EXPORT_SYMBOL_GPL(dm_array_info_init);
  519. int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
  520. {
  521. return dm_btree_empty(&info->btree_info, root);
  522. }
  523. EXPORT_SYMBOL_GPL(dm_array_empty);
  524. static int array_resize(struct dm_array_info *info, dm_block_t root,
  525. uint32_t old_size, uint32_t new_size,
  526. const void *value, dm_block_t *new_root)
  527. {
  528. int r;
  529. struct resize resize;
  530. if (old_size == new_size)
  531. return 0;
  532. resize.info = info;
  533. resize.root = root;
  534. resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  535. resize.max_entries = calc_max_entries(info->value_type.size,
  536. resize.size_of_block);
  537. resize.old_nr_full_blocks = old_size / resize.max_entries;
  538. resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
  539. resize.new_nr_full_blocks = new_size / resize.max_entries;
  540. resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
  541. resize.value = value;
  542. r = ((new_size > old_size) ? grow : shrink)(&resize);
  543. if (r)
  544. return r;
  545. *new_root = resize.root;
  546. return 0;
  547. }
  548. int dm_array_resize(struct dm_array_info *info, dm_block_t root,
  549. uint32_t old_size, uint32_t new_size,
  550. const void *value, dm_block_t *new_root)
  551. __dm_written_to_disk(value)
  552. {
  553. int r = array_resize(info, root, old_size, new_size, value, new_root);
  554. __dm_unbless_for_disk(value);
  555. return r;
  556. }
  557. EXPORT_SYMBOL_GPL(dm_array_resize);
  558. int dm_array_del(struct dm_array_info *info, dm_block_t root)
  559. {
  560. return dm_btree_del(&info->btree_info, root);
  561. }
  562. EXPORT_SYMBOL_GPL(dm_array_del);
  563. int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
  564. uint32_t index, void *value_le)
  565. {
  566. int r;
  567. struct dm_block *block;
  568. struct array_block *ab;
  569. size_t size_of_block;
  570. unsigned entry, max_entries;
  571. size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  572. max_entries = calc_max_entries(info->value_type.size, size_of_block);
  573. r = lookup_ablock(info, root, index / max_entries, &block, &ab);
  574. if (r)
  575. return r;
  576. entry = index % max_entries;
  577. if (entry >= le32_to_cpu(ab->nr_entries))
  578. r = -ENODATA;
  579. else
  580. memcpy(value_le, element_at(info, ab, entry),
  581. info->value_type.size);
  582. unlock_ablock(info, block);
  583. return r;
  584. }
  585. EXPORT_SYMBOL_GPL(dm_array_get_value);
  586. static int array_set_value(struct dm_array_info *info, dm_block_t root,
  587. uint32_t index, const void *value, dm_block_t *new_root)
  588. {
  589. int r;
  590. struct dm_block *block;
  591. struct array_block *ab;
  592. size_t size_of_block;
  593. unsigned max_entries;
  594. unsigned entry;
  595. void *old_value;
  596. struct dm_btree_value_type *vt = &info->value_type;
  597. size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  598. max_entries = calc_max_entries(info->value_type.size, size_of_block);
  599. r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
  600. if (r)
  601. return r;
  602. *new_root = root;
  603. entry = index % max_entries;
  604. if (entry >= le32_to_cpu(ab->nr_entries)) {
  605. r = -ENODATA;
  606. goto out;
  607. }
  608. old_value = element_at(info, ab, entry);
  609. if (vt->dec &&
  610. (!vt->equal || !vt->equal(vt->context, old_value, value))) {
  611. vt->dec(vt->context, old_value);
  612. if (vt->inc)
  613. vt->inc(vt->context, value);
  614. }
  615. memcpy(old_value, value, info->value_type.size);
  616. out:
  617. unlock_ablock(info, block);
  618. return r;
  619. }
  620. int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
  621. uint32_t index, const void *value, dm_block_t *new_root)
  622. __dm_written_to_disk(value)
  623. {
  624. int r;
  625. r = array_set_value(info, root, index, value, new_root);
  626. __dm_unbless_for_disk(value);
  627. return r;
  628. }
  629. EXPORT_SYMBOL_GPL(dm_array_set_value);
  630. struct walk_info {
  631. struct dm_array_info *info;
  632. int (*fn)(void *context, uint64_t key, void *leaf);
  633. void *context;
  634. };
  635. static int walk_ablock(void *context, uint64_t *keys, void *leaf)
  636. {
  637. struct walk_info *wi = context;
  638. int r;
  639. unsigned i;
  640. __le64 block_le;
  641. unsigned nr_entries, max_entries;
  642. struct dm_block *block;
  643. struct array_block *ab;
  644. memcpy(&block_le, leaf, sizeof(block_le));
  645. r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
  646. if (r)
  647. return r;
  648. max_entries = le32_to_cpu(ab->max_entries);
  649. nr_entries = le32_to_cpu(ab->nr_entries);
  650. for (i = 0; i < nr_entries; i++) {
  651. r = wi->fn(wi->context, keys[0] * max_entries + i,
  652. element_at(wi->info, ab, i));
  653. if (r)
  654. break;
  655. }
  656. unlock_ablock(wi->info, block);
  657. return r;
  658. }
  659. int dm_array_walk(struct dm_array_info *info, dm_block_t root,
  660. int (*fn)(void *, uint64_t key, void *leaf),
  661. void *context)
  662. {
  663. struct walk_info wi;
  664. wi.info = info;
  665. wi.fn = fn;
  666. wi.context = context;
  667. return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
  668. }
  669. EXPORT_SYMBOL_GPL(dm_array_walk);
  670. /*----------------------------------------------------------------*/