assoc_array.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745
  1. /* Generic associative array implementation.
  2. *
  3. * See Documentation/assoc_array.txt for information.
  4. *
  5. * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
  6. * Written by David Howells (dhowells@redhat.com)
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public Licence
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the Licence, or (at your option) any later version.
  12. */
  13. //#define DEBUG
  14. #include <linux/slab.h>
  15. #include <linux/assoc_array_priv.h>
  16. /*
  17. * Iterate over an associative array. The caller must hold the RCU read lock
  18. * or better.
  19. */
  20. static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
  21. const struct assoc_array_ptr *stop,
  22. int (*iterator)(const void *leaf,
  23. void *iterator_data),
  24. void *iterator_data)
  25. {
  26. const struct assoc_array_shortcut *shortcut;
  27. const struct assoc_array_node *node;
  28. const struct assoc_array_ptr *cursor, *ptr, *parent;
  29. unsigned long has_meta;
  30. int slot, ret;
  31. cursor = root;
  32. begin_node:
  33. if (assoc_array_ptr_is_shortcut(cursor)) {
  34. /* Descend through a shortcut */
  35. shortcut = assoc_array_ptr_to_shortcut(cursor);
  36. smp_read_barrier_depends();
  37. cursor = ACCESS_ONCE(shortcut->next_node);
  38. }
  39. node = assoc_array_ptr_to_node(cursor);
  40. smp_read_barrier_depends();
  41. slot = 0;
  42. /* We perform two passes of each node.
  43. *
  44. * The first pass does all the leaves in this node. This means we
  45. * don't miss any leaves if the node is split up by insertion whilst
  46. * we're iterating over the branches rooted here (we may, however, see
  47. * some leaves twice).
  48. */
  49. has_meta = 0;
  50. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  51. ptr = ACCESS_ONCE(node->slots[slot]);
  52. has_meta |= (unsigned long)ptr;
  53. if (ptr && assoc_array_ptr_is_leaf(ptr)) {
  54. /* We need a barrier between the read of the pointer
  55. * and dereferencing the pointer - but only if we are
  56. * actually going to dereference it.
  57. */
  58. smp_read_barrier_depends();
  59. /* Invoke the callback */
  60. ret = iterator(assoc_array_ptr_to_leaf(ptr),
  61. iterator_data);
  62. if (ret)
  63. return ret;
  64. }
  65. }
  66. /* The second pass attends to all the metadata pointers. If we follow
  67. * one of these we may find that we don't come back here, but rather go
  68. * back to a replacement node with the leaves in a different layout.
  69. *
  70. * We are guaranteed to make progress, however, as the slot number for
  71. * a particular portion of the key space cannot change - and we
  72. * continue at the back pointer + 1.
  73. */
  74. if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
  75. goto finished_node;
  76. slot = 0;
  77. continue_node:
  78. node = assoc_array_ptr_to_node(cursor);
  79. smp_read_barrier_depends();
  80. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  81. ptr = ACCESS_ONCE(node->slots[slot]);
  82. if (assoc_array_ptr_is_meta(ptr)) {
  83. cursor = ptr;
  84. goto begin_node;
  85. }
  86. }
  87. finished_node:
  88. /* Move up to the parent (may need to skip back over a shortcut) */
  89. parent = ACCESS_ONCE(node->back_pointer);
  90. slot = node->parent_slot;
  91. if (parent == stop)
  92. return 0;
  93. if (assoc_array_ptr_is_shortcut(parent)) {
  94. shortcut = assoc_array_ptr_to_shortcut(parent);
  95. smp_read_barrier_depends();
  96. cursor = parent;
  97. parent = ACCESS_ONCE(shortcut->back_pointer);
  98. slot = shortcut->parent_slot;
  99. if (parent == stop)
  100. return 0;
  101. }
  102. /* Ascend to next slot in parent node */
  103. cursor = parent;
  104. slot++;
  105. goto continue_node;
  106. }
  107. /**
  108. * assoc_array_iterate - Pass all objects in the array to a callback
  109. * @array: The array to iterate over.
  110. * @iterator: The callback function.
  111. * @iterator_data: Private data for the callback function.
  112. *
  113. * Iterate over all the objects in an associative array. Each one will be
  114. * presented to the iterator function.
  115. *
  116. * If the array is being modified concurrently with the iteration then it is
  117. * possible that some objects in the array will be passed to the iterator
  118. * callback more than once - though every object should be passed at least
  119. * once. If this is undesirable then the caller must lock against modification
  120. * for the duration of this function.
  121. *
  122. * The function will return 0 if no objects were in the array or else it will
  123. * return the result of the last iterator function called. Iteration stops
  124. * immediately if any call to the iteration function results in a non-zero
  125. * return.
  126. *
  127. * The caller should hold the RCU read lock or better if concurrent
  128. * modification is possible.
  129. */
  130. int assoc_array_iterate(const struct assoc_array *array,
  131. int (*iterator)(const void *object,
  132. void *iterator_data),
  133. void *iterator_data)
  134. {
  135. struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
  136. if (!root)
  137. return 0;
  138. return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
  139. }
  140. enum assoc_array_walk_status {
  141. assoc_array_walk_tree_empty,
  142. assoc_array_walk_found_terminal_node,
  143. assoc_array_walk_found_wrong_shortcut,
  144. } status;
  145. struct assoc_array_walk_result {
  146. struct {
  147. struct assoc_array_node *node; /* Node in which leaf might be found */
  148. int level;
  149. int slot;
  150. } terminal_node;
  151. struct {
  152. struct assoc_array_shortcut *shortcut;
  153. int level;
  154. int sc_level;
  155. unsigned long sc_segments;
  156. unsigned long dissimilarity;
  157. } wrong_shortcut;
  158. };
  159. /*
  160. * Navigate through the internal tree looking for the closest node to the key.
  161. */
  162. static enum assoc_array_walk_status
  163. assoc_array_walk(const struct assoc_array *array,
  164. const struct assoc_array_ops *ops,
  165. const void *index_key,
  166. struct assoc_array_walk_result *result)
  167. {
  168. struct assoc_array_shortcut *shortcut;
  169. struct assoc_array_node *node;
  170. struct assoc_array_ptr *cursor, *ptr;
  171. unsigned long sc_segments, dissimilarity;
  172. unsigned long segments;
  173. int level, sc_level, next_sc_level;
  174. int slot;
  175. pr_devel("-->%s()\n", __func__);
  176. cursor = ACCESS_ONCE(array->root);
  177. if (!cursor)
  178. return assoc_array_walk_tree_empty;
  179. level = 0;
  180. /* Use segments from the key for the new leaf to navigate through the
  181. * internal tree, skipping through nodes and shortcuts that are on
  182. * route to the destination. Eventually we'll come to a slot that is
  183. * either empty or contains a leaf at which point we've found a node in
  184. * which the leaf we're looking for might be found or into which it
  185. * should be inserted.
  186. */
  187. jumped:
  188. segments = ops->get_key_chunk(index_key, level);
  189. pr_devel("segments[%d]: %lx\n", level, segments);
  190. if (assoc_array_ptr_is_shortcut(cursor))
  191. goto follow_shortcut;
  192. consider_node:
  193. node = assoc_array_ptr_to_node(cursor);
  194. smp_read_barrier_depends();
  195. slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
  196. slot &= ASSOC_ARRAY_FAN_MASK;
  197. ptr = ACCESS_ONCE(node->slots[slot]);
  198. pr_devel("consider slot %x [ix=%d type=%lu]\n",
  199. slot, level, (unsigned long)ptr & 3);
  200. if (!assoc_array_ptr_is_meta(ptr)) {
  201. /* The node doesn't have a node/shortcut pointer in the slot
  202. * corresponding to the index key that we have to follow.
  203. */
  204. result->terminal_node.node = node;
  205. result->terminal_node.level = level;
  206. result->terminal_node.slot = slot;
  207. pr_devel("<--%s() = terminal_node\n", __func__);
  208. return assoc_array_walk_found_terminal_node;
  209. }
  210. if (assoc_array_ptr_is_node(ptr)) {
  211. /* There is a pointer to a node in the slot corresponding to
  212. * this index key segment, so we need to follow it.
  213. */
  214. cursor = ptr;
  215. level += ASSOC_ARRAY_LEVEL_STEP;
  216. if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
  217. goto consider_node;
  218. goto jumped;
  219. }
  220. /* There is a shortcut in the slot corresponding to the index key
  221. * segment. We follow the shortcut if its partial index key matches
  222. * this leaf's. Otherwise we need to split the shortcut.
  223. */
  224. cursor = ptr;
  225. follow_shortcut:
  226. shortcut = assoc_array_ptr_to_shortcut(cursor);
  227. smp_read_barrier_depends();
  228. pr_devel("shortcut to %d\n", shortcut->skip_to_level);
  229. sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
  230. BUG_ON(sc_level > shortcut->skip_to_level);
  231. do {
  232. /* Check the leaf against the shortcut's index key a word at a
  233. * time, trimming the final word (the shortcut stores the index
  234. * key completely from the root to the shortcut's target).
  235. */
  236. if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
  237. segments = ops->get_key_chunk(index_key, sc_level);
  238. sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
  239. dissimilarity = segments ^ sc_segments;
  240. if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
  241. /* Trim segments that are beyond the shortcut */
  242. int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  243. dissimilarity &= ~(ULONG_MAX << shift);
  244. next_sc_level = shortcut->skip_to_level;
  245. } else {
  246. next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
  247. next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  248. }
  249. if (dissimilarity != 0) {
  250. /* This shortcut points elsewhere */
  251. result->wrong_shortcut.shortcut = shortcut;
  252. result->wrong_shortcut.level = level;
  253. result->wrong_shortcut.sc_level = sc_level;
  254. result->wrong_shortcut.sc_segments = sc_segments;
  255. result->wrong_shortcut.dissimilarity = dissimilarity;
  256. return assoc_array_walk_found_wrong_shortcut;
  257. }
  258. sc_level = next_sc_level;
  259. } while (sc_level < shortcut->skip_to_level);
  260. /* The shortcut matches the leaf's index to this point. */
  261. cursor = ACCESS_ONCE(shortcut->next_node);
  262. if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
  263. level = sc_level;
  264. goto jumped;
  265. } else {
  266. level = sc_level;
  267. goto consider_node;
  268. }
  269. }
  270. /**
  271. * assoc_array_find - Find an object by index key
  272. * @array: The associative array to search.
  273. * @ops: The operations to use.
  274. * @index_key: The key to the object.
  275. *
  276. * Find an object in an associative array by walking through the internal tree
  277. * to the node that should contain the object and then searching the leaves
  278. * there. NULL is returned if the requested object was not found in the array.
  279. *
  280. * The caller must hold the RCU read lock or better.
  281. */
  282. void *assoc_array_find(const struct assoc_array *array,
  283. const struct assoc_array_ops *ops,
  284. const void *index_key)
  285. {
  286. struct assoc_array_walk_result result;
  287. const struct assoc_array_node *node;
  288. const struct assoc_array_ptr *ptr;
  289. const void *leaf;
  290. int slot;
  291. if (assoc_array_walk(array, ops, index_key, &result) !=
  292. assoc_array_walk_found_terminal_node)
  293. return NULL;
  294. node = result.terminal_node.node;
  295. smp_read_barrier_depends();
  296. /* If the target key is available to us, it's has to be pointed to by
  297. * the terminal node.
  298. */
  299. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  300. ptr = ACCESS_ONCE(node->slots[slot]);
  301. if (ptr && assoc_array_ptr_is_leaf(ptr)) {
  302. /* We need a barrier between the read of the pointer
  303. * and dereferencing the pointer - but only if we are
  304. * actually going to dereference it.
  305. */
  306. leaf = assoc_array_ptr_to_leaf(ptr);
  307. smp_read_barrier_depends();
  308. if (ops->compare_object(leaf, index_key))
  309. return (void *)leaf;
  310. }
  311. }
  312. return NULL;
  313. }
  314. /*
  315. * Destructively iterate over an associative array. The caller must prevent
  316. * other simultaneous accesses.
  317. */
  318. static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
  319. const struct assoc_array_ops *ops)
  320. {
  321. struct assoc_array_shortcut *shortcut;
  322. struct assoc_array_node *node;
  323. struct assoc_array_ptr *cursor, *parent = NULL;
  324. int slot = -1;
  325. pr_devel("-->%s()\n", __func__);
  326. cursor = root;
  327. if (!cursor) {
  328. pr_devel("empty\n");
  329. return;
  330. }
  331. move_to_meta:
  332. if (assoc_array_ptr_is_shortcut(cursor)) {
  333. /* Descend through a shortcut */
  334. pr_devel("[%d] shortcut\n", slot);
  335. BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
  336. shortcut = assoc_array_ptr_to_shortcut(cursor);
  337. BUG_ON(shortcut->back_pointer != parent);
  338. BUG_ON(slot != -1 && shortcut->parent_slot != slot);
  339. parent = cursor;
  340. cursor = shortcut->next_node;
  341. slot = -1;
  342. BUG_ON(!assoc_array_ptr_is_node(cursor));
  343. }
  344. pr_devel("[%d] node\n", slot);
  345. node = assoc_array_ptr_to_node(cursor);
  346. BUG_ON(node->back_pointer != parent);
  347. BUG_ON(slot != -1 && node->parent_slot != slot);
  348. slot = 0;
  349. continue_node:
  350. pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
  351. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  352. struct assoc_array_ptr *ptr = node->slots[slot];
  353. if (!ptr)
  354. continue;
  355. if (assoc_array_ptr_is_meta(ptr)) {
  356. parent = cursor;
  357. cursor = ptr;
  358. goto move_to_meta;
  359. }
  360. if (ops) {
  361. pr_devel("[%d] free leaf\n", slot);
  362. ops->free_object(assoc_array_ptr_to_leaf(ptr));
  363. }
  364. }
  365. parent = node->back_pointer;
  366. slot = node->parent_slot;
  367. pr_devel("free node\n");
  368. kfree(node);
  369. if (!parent)
  370. return; /* Done */
  371. /* Move back up to the parent (may need to free a shortcut on
  372. * the way up) */
  373. if (assoc_array_ptr_is_shortcut(parent)) {
  374. shortcut = assoc_array_ptr_to_shortcut(parent);
  375. BUG_ON(shortcut->next_node != cursor);
  376. cursor = parent;
  377. parent = shortcut->back_pointer;
  378. slot = shortcut->parent_slot;
  379. pr_devel("free shortcut\n");
  380. kfree(shortcut);
  381. if (!parent)
  382. return;
  383. BUG_ON(!assoc_array_ptr_is_node(parent));
  384. }
  385. /* Ascend to next slot in parent node */
  386. pr_devel("ascend to %p[%d]\n", parent, slot);
  387. cursor = parent;
  388. node = assoc_array_ptr_to_node(cursor);
  389. slot++;
  390. goto continue_node;
  391. }
  392. /**
  393. * assoc_array_destroy - Destroy an associative array
  394. * @array: The array to destroy.
  395. * @ops: The operations to use.
  396. *
  397. * Discard all metadata and free all objects in an associative array. The
  398. * array will be empty and ready to use again upon completion. This function
  399. * cannot fail.
  400. *
  401. * The caller must prevent all other accesses whilst this takes place as no
  402. * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
  403. * accesses to continue. On the other hand, no memory allocation is required.
  404. */
  405. void assoc_array_destroy(struct assoc_array *array,
  406. const struct assoc_array_ops *ops)
  407. {
  408. assoc_array_destroy_subtree(array->root, ops);
  409. array->root = NULL;
  410. }
  411. /*
  412. * Handle insertion into an empty tree.
  413. */
  414. static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
  415. {
  416. struct assoc_array_node *new_n0;
  417. pr_devel("-->%s()\n", __func__);
  418. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  419. if (!new_n0)
  420. return false;
  421. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  422. edit->leaf_p = &new_n0->slots[0];
  423. edit->adjust_count_on = new_n0;
  424. edit->set[0].ptr = &edit->array->root;
  425. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  426. pr_devel("<--%s() = ok [no root]\n", __func__);
  427. return true;
  428. }
  429. /*
  430. * Handle insertion into a terminal node.
  431. */
  432. static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
  433. const struct assoc_array_ops *ops,
  434. const void *index_key,
  435. struct assoc_array_walk_result *result)
  436. {
  437. struct assoc_array_shortcut *shortcut, *new_s0;
  438. struct assoc_array_node *node, *new_n0, *new_n1, *side;
  439. struct assoc_array_ptr *ptr;
  440. unsigned long dissimilarity, base_seg, blank;
  441. size_t keylen;
  442. bool have_meta;
  443. int level, diff;
  444. int slot, next_slot, free_slot, i, j;
  445. node = result->terminal_node.node;
  446. level = result->terminal_node.level;
  447. edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
  448. pr_devel("-->%s()\n", __func__);
  449. /* We arrived at a node which doesn't have an onward node or shortcut
  450. * pointer that we have to follow. This means that (a) the leaf we
  451. * want must go here (either by insertion or replacement) or (b) we
  452. * need to split this node and insert in one of the fragments.
  453. */
  454. free_slot = -1;
  455. /* Firstly, we have to check the leaves in this node to see if there's
  456. * a matching one we should replace in place.
  457. */
  458. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  459. ptr = node->slots[i];
  460. if (!ptr) {
  461. free_slot = i;
  462. continue;
  463. }
  464. if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
  465. pr_devel("replace in slot %d\n", i);
  466. edit->leaf_p = &node->slots[i];
  467. edit->dead_leaf = node->slots[i];
  468. pr_devel("<--%s() = ok [replace]\n", __func__);
  469. return true;
  470. }
  471. }
  472. /* If there is a free slot in this node then we can just insert the
  473. * leaf here.
  474. */
  475. if (free_slot >= 0) {
  476. pr_devel("insert in free slot %d\n", free_slot);
  477. edit->leaf_p = &node->slots[free_slot];
  478. edit->adjust_count_on = node;
  479. pr_devel("<--%s() = ok [insert]\n", __func__);
  480. return true;
  481. }
  482. /* The node has no spare slots - so we're either going to have to split
  483. * it or insert another node before it.
  484. *
  485. * Whatever, we're going to need at least two new nodes - so allocate
  486. * those now. We may also need a new shortcut, but we deal with that
  487. * when we need it.
  488. */
  489. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  490. if (!new_n0)
  491. return false;
  492. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  493. new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  494. if (!new_n1)
  495. return false;
  496. edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
  497. /* We need to find out how similar the leaves are. */
  498. pr_devel("no spare slots\n");
  499. have_meta = false;
  500. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  501. ptr = node->slots[i];
  502. if (assoc_array_ptr_is_meta(ptr)) {
  503. edit->segment_cache[i] = 0xff;
  504. have_meta = true;
  505. continue;
  506. }
  507. base_seg = ops->get_object_key_chunk(
  508. assoc_array_ptr_to_leaf(ptr), level);
  509. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  510. edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
  511. }
  512. if (have_meta) {
  513. pr_devel("have meta\n");
  514. goto split_node;
  515. }
  516. /* The node contains only leaves */
  517. dissimilarity = 0;
  518. base_seg = edit->segment_cache[0];
  519. for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
  520. dissimilarity |= edit->segment_cache[i] ^ base_seg;
  521. pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
  522. if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
  523. /* The old leaves all cluster in the same slot. We will need
  524. * to insert a shortcut if the new node wants to cluster with them.
  525. */
  526. if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
  527. goto all_leaves_cluster_together;
  528. /* Otherwise we can just insert a new node ahead of the old
  529. * one.
  530. */
  531. goto present_leaves_cluster_but_not_new_leaf;
  532. }
  533. split_node:
  534. pr_devel("split node\n");
  535. /* We need to split the current node; we know that the node doesn't
  536. * simply contain a full set of leaves that cluster together (it
  537. * contains meta pointers and/or non-clustering leaves).
  538. *
  539. * We need to expel at least two leaves out of a set consisting of the
  540. * leaves in the node and the new leaf.
  541. *
  542. * We need a new node (n0) to replace the current one and a new node to
  543. * take the expelled nodes (n1).
  544. */
  545. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  546. new_n0->back_pointer = node->back_pointer;
  547. new_n0->parent_slot = node->parent_slot;
  548. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  549. new_n1->parent_slot = -1; /* Need to calculate this */
  550. do_split_node:
  551. pr_devel("do_split_node\n");
  552. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  553. new_n1->nr_leaves_on_branch = 0;
  554. /* Begin by finding two matching leaves. There have to be at least two
  555. * that match - even if there are meta pointers - because any leaf that
  556. * would match a slot with a meta pointer in it must be somewhere
  557. * behind that meta pointer and cannot be here. Further, given N
  558. * remaining leaf slots, we now have N+1 leaves to go in them.
  559. */
  560. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  561. slot = edit->segment_cache[i];
  562. if (slot != 0xff)
  563. for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
  564. if (edit->segment_cache[j] == slot)
  565. goto found_slot_for_multiple_occupancy;
  566. }
  567. found_slot_for_multiple_occupancy:
  568. pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
  569. BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
  570. BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
  571. BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
  572. new_n1->parent_slot = slot;
  573. /* Metadata pointers cannot change slot */
  574. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
  575. if (assoc_array_ptr_is_meta(node->slots[i]))
  576. new_n0->slots[i] = node->slots[i];
  577. else
  578. new_n0->slots[i] = NULL;
  579. BUG_ON(new_n0->slots[slot] != NULL);
  580. new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
  581. /* Filter the leaf pointers between the new nodes */
  582. free_slot = -1;
  583. next_slot = 0;
  584. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  585. if (assoc_array_ptr_is_meta(node->slots[i]))
  586. continue;
  587. if (edit->segment_cache[i] == slot) {
  588. new_n1->slots[next_slot++] = node->slots[i];
  589. new_n1->nr_leaves_on_branch++;
  590. } else {
  591. do {
  592. free_slot++;
  593. } while (new_n0->slots[free_slot] != NULL);
  594. new_n0->slots[free_slot] = node->slots[i];
  595. }
  596. }
  597. pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
  598. if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
  599. do {
  600. free_slot++;
  601. } while (new_n0->slots[free_slot] != NULL);
  602. edit->leaf_p = &new_n0->slots[free_slot];
  603. edit->adjust_count_on = new_n0;
  604. } else {
  605. edit->leaf_p = &new_n1->slots[next_slot++];
  606. edit->adjust_count_on = new_n1;
  607. }
  608. BUG_ON(next_slot <= 1);
  609. edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
  610. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  611. if (edit->segment_cache[i] == 0xff) {
  612. ptr = node->slots[i];
  613. BUG_ON(assoc_array_ptr_is_leaf(ptr));
  614. if (assoc_array_ptr_is_node(ptr)) {
  615. side = assoc_array_ptr_to_node(ptr);
  616. edit->set_backpointers[i] = &side->back_pointer;
  617. } else {
  618. shortcut = assoc_array_ptr_to_shortcut(ptr);
  619. edit->set_backpointers[i] = &shortcut->back_pointer;
  620. }
  621. }
  622. }
  623. ptr = node->back_pointer;
  624. if (!ptr)
  625. edit->set[0].ptr = &edit->array->root;
  626. else if (assoc_array_ptr_is_node(ptr))
  627. edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
  628. else
  629. edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
  630. edit->excised_meta[0] = assoc_array_node_to_ptr(node);
  631. pr_devel("<--%s() = ok [split node]\n", __func__);
  632. return true;
  633. present_leaves_cluster_but_not_new_leaf:
  634. /* All the old leaves cluster in the same slot, but the new leaf wants
  635. * to go into a different slot, so we create a new node to hold the new
  636. * leaf and a pointer to a new node holding all the old leaves.
  637. */
  638. pr_devel("present leaves cluster but not new leaf\n");
  639. new_n0->back_pointer = node->back_pointer;
  640. new_n0->parent_slot = node->parent_slot;
  641. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  642. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  643. new_n1->parent_slot = edit->segment_cache[0];
  644. new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
  645. edit->adjust_count_on = new_n0;
  646. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
  647. new_n1->slots[i] = node->slots[i];
  648. new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
  649. edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
  650. edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
  651. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  652. edit->excised_meta[0] = assoc_array_node_to_ptr(node);
  653. pr_devel("<--%s() = ok [insert node before]\n", __func__);
  654. return true;
  655. all_leaves_cluster_together:
  656. /* All the leaves, new and old, want to cluster together in this node
  657. * in the same slot, so we have to replace this node with a shortcut to
  658. * skip over the identical parts of the key and then place a pair of
  659. * nodes, one inside the other, at the end of the shortcut and
  660. * distribute the keys between them.
  661. *
  662. * Firstly we need to work out where the leaves start diverging as a
  663. * bit position into their keys so that we know how big the shortcut
  664. * needs to be.
  665. *
  666. * We only need to make a single pass of N of the N+1 leaves because if
  667. * any keys differ between themselves at bit X then at least one of
  668. * them must also differ with the base key at bit X or before.
  669. */
  670. pr_devel("all leaves cluster together\n");
  671. diff = INT_MAX;
  672. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  673. int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf),
  674. assoc_array_ptr_to_leaf(node->slots[i]));
  675. if (x < diff) {
  676. BUG_ON(x < 0);
  677. diff = x;
  678. }
  679. }
  680. BUG_ON(diff == INT_MAX);
  681. BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
  682. keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  683. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  684. new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
  685. keylen * sizeof(unsigned long), GFP_KERNEL);
  686. if (!new_s0)
  687. return false;
  688. edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
  689. edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
  690. new_s0->back_pointer = node->back_pointer;
  691. new_s0->parent_slot = node->parent_slot;
  692. new_s0->next_node = assoc_array_node_to_ptr(new_n0);
  693. new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
  694. new_n0->parent_slot = 0;
  695. new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
  696. new_n1->parent_slot = -1; /* Need to calculate this */
  697. new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
  698. pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
  699. BUG_ON(level <= 0);
  700. for (i = 0; i < keylen; i++)
  701. new_s0->index_key[i] =
  702. ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
  703. blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
  704. pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
  705. new_s0->index_key[keylen - 1] &= ~blank;
  706. /* This now reduces to a node splitting exercise for which we'll need
  707. * to regenerate the disparity table.
  708. */
  709. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  710. ptr = node->slots[i];
  711. base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
  712. level);
  713. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  714. edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
  715. }
  716. base_seg = ops->get_key_chunk(index_key, level);
  717. base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
  718. edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
  719. goto do_split_node;
  720. }
  721. /*
  722. * Handle insertion into the middle of a shortcut.
  723. */
  724. static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
  725. const struct assoc_array_ops *ops,
  726. struct assoc_array_walk_result *result)
  727. {
  728. struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
  729. struct assoc_array_node *node, *new_n0, *side;
  730. unsigned long sc_segments, dissimilarity, blank;
  731. size_t keylen;
  732. int level, sc_level, diff;
  733. int sc_slot;
  734. shortcut = result->wrong_shortcut.shortcut;
  735. level = result->wrong_shortcut.level;
  736. sc_level = result->wrong_shortcut.sc_level;
  737. sc_segments = result->wrong_shortcut.sc_segments;
  738. dissimilarity = result->wrong_shortcut.dissimilarity;
  739. pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
  740. __func__, level, dissimilarity, sc_level);
  741. /* We need to split a shortcut and insert a node between the two
  742. * pieces. Zero-length pieces will be dispensed with entirely.
  743. *
  744. * First of all, we need to find out in which level the first
  745. * difference was.
  746. */
  747. diff = __ffs(dissimilarity);
  748. diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
  749. diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
  750. pr_devel("diff=%d\n", diff);
  751. if (!shortcut->back_pointer) {
  752. edit->set[0].ptr = &edit->array->root;
  753. } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
  754. node = assoc_array_ptr_to_node(shortcut->back_pointer);
  755. edit->set[0].ptr = &node->slots[shortcut->parent_slot];
  756. } else {
  757. BUG();
  758. }
  759. edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
  760. /* Create a new node now since we're going to need it anyway */
  761. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  762. if (!new_n0)
  763. return false;
  764. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  765. edit->adjust_count_on = new_n0;
  766. /* Insert a new shortcut before the new node if this segment isn't of
  767. * zero length - otherwise we just connect the new node directly to the
  768. * parent.
  769. */
  770. level += ASSOC_ARRAY_LEVEL_STEP;
  771. if (diff > level) {
  772. pr_devel("pre-shortcut %d...%d\n", level, diff);
  773. keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  774. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  775. new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
  776. keylen * sizeof(unsigned long), GFP_KERNEL);
  777. if (!new_s0)
  778. return false;
  779. edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
  780. edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
  781. new_s0->back_pointer = shortcut->back_pointer;
  782. new_s0->parent_slot = shortcut->parent_slot;
  783. new_s0->next_node = assoc_array_node_to_ptr(new_n0);
  784. new_s0->skip_to_level = diff;
  785. new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
  786. new_n0->parent_slot = 0;
  787. memcpy(new_s0->index_key, shortcut->index_key,
  788. keylen * sizeof(unsigned long));
  789. blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
  790. pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
  791. new_s0->index_key[keylen - 1] &= ~blank;
  792. } else {
  793. pr_devel("no pre-shortcut\n");
  794. edit->set[0].to = assoc_array_node_to_ptr(new_n0);
  795. new_n0->back_pointer = shortcut->back_pointer;
  796. new_n0->parent_slot = shortcut->parent_slot;
  797. }
  798. side = assoc_array_ptr_to_node(shortcut->next_node);
  799. new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
  800. /* We need to know which slot in the new node is going to take a
  801. * metadata pointer.
  802. */
  803. sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
  804. sc_slot &= ASSOC_ARRAY_FAN_MASK;
  805. pr_devel("new slot %lx >> %d -> %d\n",
  806. sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
  807. /* Determine whether we need to follow the new node with a replacement
  808. * for the current shortcut. We could in theory reuse the current
  809. * shortcut if its parent slot number doesn't change - but that's a
  810. * 1-in-16 chance so not worth expending the code upon.
  811. */
  812. level = diff + ASSOC_ARRAY_LEVEL_STEP;
  813. if (level < shortcut->skip_to_level) {
  814. pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
  815. keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  816. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  817. new_s1 = kzalloc(sizeof(struct assoc_array_shortcut) +
  818. keylen * sizeof(unsigned long), GFP_KERNEL);
  819. if (!new_s1)
  820. return false;
  821. edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
  822. new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
  823. new_s1->parent_slot = sc_slot;
  824. new_s1->next_node = shortcut->next_node;
  825. new_s1->skip_to_level = shortcut->skip_to_level;
  826. new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
  827. memcpy(new_s1->index_key, shortcut->index_key,
  828. keylen * sizeof(unsigned long));
  829. edit->set[1].ptr = &side->back_pointer;
  830. edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
  831. } else {
  832. pr_devel("no post-shortcut\n");
  833. /* We don't have to replace the pointed-to node as long as we
  834. * use memory barriers to make sure the parent slot number is
  835. * changed before the back pointer (the parent slot number is
  836. * irrelevant to the old parent shortcut).
  837. */
  838. new_n0->slots[sc_slot] = shortcut->next_node;
  839. edit->set_parent_slot[0].p = &side->parent_slot;
  840. edit->set_parent_slot[0].to = sc_slot;
  841. edit->set[1].ptr = &side->back_pointer;
  842. edit->set[1].to = assoc_array_node_to_ptr(new_n0);
  843. }
  844. /* Install the new leaf in a spare slot in the new node. */
  845. if (sc_slot == 0)
  846. edit->leaf_p = &new_n0->slots[1];
  847. else
  848. edit->leaf_p = &new_n0->slots[0];
  849. pr_devel("<--%s() = ok [split shortcut]\n", __func__);
  850. return edit;
  851. }
  852. /**
  853. * assoc_array_insert - Script insertion of an object into an associative array
  854. * @array: The array to insert into.
  855. * @ops: The operations to use.
  856. * @index_key: The key to insert at.
  857. * @object: The object to insert.
  858. *
  859. * Precalculate and preallocate a script for the insertion or replacement of an
  860. * object in an associative array. This results in an edit script that can
  861. * either be applied or cancelled.
  862. *
  863. * The function returns a pointer to an edit script or -ENOMEM.
  864. *
  865. * The caller should lock against other modifications and must continue to hold
  866. * the lock until assoc_array_apply_edit() has been called.
  867. *
  868. * Accesses to the tree may take place concurrently with this function,
  869. * provided they hold the RCU read lock.
  870. */
  871. struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
  872. const struct assoc_array_ops *ops,
  873. const void *index_key,
  874. void *object)
  875. {
  876. struct assoc_array_walk_result result;
  877. struct assoc_array_edit *edit;
  878. pr_devel("-->%s()\n", __func__);
  879. /* The leaf pointer we're given must not have the bottom bit set as we
  880. * use those for type-marking the pointer. NULL pointers are also not
  881. * allowed as they indicate an empty slot but we have to allow them
  882. * here as they can be updated later.
  883. */
  884. BUG_ON(assoc_array_ptr_is_meta(object));
  885. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  886. if (!edit)
  887. return ERR_PTR(-ENOMEM);
  888. edit->array = array;
  889. edit->ops = ops;
  890. edit->leaf = assoc_array_leaf_to_ptr(object);
  891. edit->adjust_count_by = 1;
  892. switch (assoc_array_walk(array, ops, index_key, &result)) {
  893. case assoc_array_walk_tree_empty:
  894. /* Allocate a root node if there isn't one yet */
  895. if (!assoc_array_insert_in_empty_tree(edit))
  896. goto enomem;
  897. return edit;
  898. case assoc_array_walk_found_terminal_node:
  899. /* We found a node that doesn't have a node/shortcut pointer in
  900. * the slot corresponding to the index key that we have to
  901. * follow.
  902. */
  903. if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
  904. &result))
  905. goto enomem;
  906. return edit;
  907. case assoc_array_walk_found_wrong_shortcut:
  908. /* We found a shortcut that didn't match our key in a slot we
  909. * needed to follow.
  910. */
  911. if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
  912. goto enomem;
  913. return edit;
  914. }
  915. enomem:
  916. /* Clean up after an out of memory error */
  917. pr_devel("enomem\n");
  918. assoc_array_cancel_edit(edit);
  919. return ERR_PTR(-ENOMEM);
  920. }
  921. /**
  922. * assoc_array_insert_set_object - Set the new object pointer in an edit script
  923. * @edit: The edit script to modify.
  924. * @object: The object pointer to set.
  925. *
  926. * Change the object to be inserted in an edit script. The object pointed to
  927. * by the old object is not freed. This must be done prior to applying the
  928. * script.
  929. */
  930. void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
  931. {
  932. BUG_ON(!object);
  933. edit->leaf = assoc_array_leaf_to_ptr(object);
  934. }
  935. struct assoc_array_delete_collapse_context {
  936. struct assoc_array_node *node;
  937. const void *skip_leaf;
  938. int slot;
  939. };
  940. /*
  941. * Subtree collapse to node iterator.
  942. */
  943. static int assoc_array_delete_collapse_iterator(const void *leaf,
  944. void *iterator_data)
  945. {
  946. struct assoc_array_delete_collapse_context *collapse = iterator_data;
  947. if (leaf == collapse->skip_leaf)
  948. return 0;
  949. BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
  950. collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
  951. return 0;
  952. }
  953. /**
  954. * assoc_array_delete - Script deletion of an object from an associative array
  955. * @array: The array to search.
  956. * @ops: The operations to use.
  957. * @index_key: The key to the object.
  958. *
  959. * Precalculate and preallocate a script for the deletion of an object from an
  960. * associative array. This results in an edit script that can either be
  961. * applied or cancelled.
  962. *
  963. * The function returns a pointer to an edit script if the object was found,
  964. * NULL if the object was not found or -ENOMEM.
  965. *
  966. * The caller should lock against other modifications and must continue to hold
  967. * the lock until assoc_array_apply_edit() has been called.
  968. *
  969. * Accesses to the tree may take place concurrently with this function,
  970. * provided they hold the RCU read lock.
  971. */
  972. struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
  973. const struct assoc_array_ops *ops,
  974. const void *index_key)
  975. {
  976. struct assoc_array_delete_collapse_context collapse;
  977. struct assoc_array_walk_result result;
  978. struct assoc_array_node *node, *new_n0;
  979. struct assoc_array_edit *edit;
  980. struct assoc_array_ptr *ptr;
  981. bool has_meta;
  982. int slot, i;
  983. pr_devel("-->%s()\n", __func__);
  984. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  985. if (!edit)
  986. return ERR_PTR(-ENOMEM);
  987. edit->array = array;
  988. edit->ops = ops;
  989. edit->adjust_count_by = -1;
  990. switch (assoc_array_walk(array, ops, index_key, &result)) {
  991. case assoc_array_walk_found_terminal_node:
  992. /* We found a node that should contain the leaf we've been
  993. * asked to remove - *if* it's in the tree.
  994. */
  995. pr_devel("terminal_node\n");
  996. node = result.terminal_node.node;
  997. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  998. ptr = node->slots[slot];
  999. if (ptr &&
  1000. assoc_array_ptr_is_leaf(ptr) &&
  1001. ops->compare_object(assoc_array_ptr_to_leaf(ptr),
  1002. index_key))
  1003. goto found_leaf;
  1004. }
  1005. case assoc_array_walk_tree_empty:
  1006. case assoc_array_walk_found_wrong_shortcut:
  1007. default:
  1008. assoc_array_cancel_edit(edit);
  1009. pr_devel("not found\n");
  1010. return NULL;
  1011. }
  1012. found_leaf:
  1013. BUG_ON(array->nr_leaves_on_tree <= 0);
  1014. /* In the simplest form of deletion we just clear the slot and release
  1015. * the leaf after a suitable interval.
  1016. */
  1017. edit->dead_leaf = node->slots[slot];
  1018. edit->set[0].ptr = &node->slots[slot];
  1019. edit->set[0].to = NULL;
  1020. edit->adjust_count_on = node;
  1021. /* If that concludes erasure of the last leaf, then delete the entire
  1022. * internal array.
  1023. */
  1024. if (array->nr_leaves_on_tree == 1) {
  1025. edit->set[1].ptr = &array->root;
  1026. edit->set[1].to = NULL;
  1027. edit->adjust_count_on = NULL;
  1028. edit->excised_subtree = array->root;
  1029. pr_devel("all gone\n");
  1030. return edit;
  1031. }
  1032. /* However, we'd also like to clear up some metadata blocks if we
  1033. * possibly can.
  1034. *
  1035. * We go for a simple algorithm of: if this node has FAN_OUT or fewer
  1036. * leaves in it, then attempt to collapse it - and attempt to
  1037. * recursively collapse up the tree.
  1038. *
  1039. * We could also try and collapse in partially filled subtrees to take
  1040. * up space in this node.
  1041. */
  1042. if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
  1043. struct assoc_array_node *parent, *grandparent;
  1044. struct assoc_array_ptr *ptr;
  1045. /* First of all, we need to know if this node has metadata so
  1046. * that we don't try collapsing if all the leaves are already
  1047. * here.
  1048. */
  1049. has_meta = false;
  1050. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  1051. ptr = node->slots[i];
  1052. if (assoc_array_ptr_is_meta(ptr)) {
  1053. has_meta = true;
  1054. break;
  1055. }
  1056. }
  1057. pr_devel("leaves: %ld [m=%d]\n",
  1058. node->nr_leaves_on_branch - 1, has_meta);
  1059. /* Look further up the tree to see if we can collapse this node
  1060. * into a more proximal node too.
  1061. */
  1062. parent = node;
  1063. collapse_up:
  1064. pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
  1065. ptr = parent->back_pointer;
  1066. if (!ptr)
  1067. goto do_collapse;
  1068. if (assoc_array_ptr_is_shortcut(ptr)) {
  1069. struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
  1070. ptr = s->back_pointer;
  1071. if (!ptr)
  1072. goto do_collapse;
  1073. }
  1074. grandparent = assoc_array_ptr_to_node(ptr);
  1075. if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
  1076. parent = grandparent;
  1077. goto collapse_up;
  1078. }
  1079. do_collapse:
  1080. /* There's no point collapsing if the original node has no meta
  1081. * pointers to discard and if we didn't merge into one of that
  1082. * node's ancestry.
  1083. */
  1084. if (has_meta || parent != node) {
  1085. node = parent;
  1086. /* Create a new node to collapse into */
  1087. new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  1088. if (!new_n0)
  1089. goto enomem;
  1090. edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
  1091. new_n0->back_pointer = node->back_pointer;
  1092. new_n0->parent_slot = node->parent_slot;
  1093. new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
  1094. edit->adjust_count_on = new_n0;
  1095. collapse.node = new_n0;
  1096. collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
  1097. collapse.slot = 0;
  1098. assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
  1099. node->back_pointer,
  1100. assoc_array_delete_collapse_iterator,
  1101. &collapse);
  1102. pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
  1103. BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
  1104. if (!node->back_pointer) {
  1105. edit->set[1].ptr = &array->root;
  1106. } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
  1107. BUG();
  1108. } else if (assoc_array_ptr_is_node(node->back_pointer)) {
  1109. struct assoc_array_node *p =
  1110. assoc_array_ptr_to_node(node->back_pointer);
  1111. edit->set[1].ptr = &p->slots[node->parent_slot];
  1112. } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
  1113. struct assoc_array_shortcut *s =
  1114. assoc_array_ptr_to_shortcut(node->back_pointer);
  1115. edit->set[1].ptr = &s->next_node;
  1116. }
  1117. edit->set[1].to = assoc_array_node_to_ptr(new_n0);
  1118. edit->excised_subtree = assoc_array_node_to_ptr(node);
  1119. }
  1120. }
  1121. return edit;
  1122. enomem:
  1123. /* Clean up after an out of memory error */
  1124. pr_devel("enomem\n");
  1125. assoc_array_cancel_edit(edit);
  1126. return ERR_PTR(-ENOMEM);
  1127. }
  1128. /**
  1129. * assoc_array_clear - Script deletion of all objects from an associative array
  1130. * @array: The array to clear.
  1131. * @ops: The operations to use.
  1132. *
  1133. * Precalculate and preallocate a script for the deletion of all the objects
  1134. * from an associative array. This results in an edit script that can either
  1135. * be applied or cancelled.
  1136. *
  1137. * The function returns a pointer to an edit script if there are objects to be
  1138. * deleted, NULL if there are no objects in the array or -ENOMEM.
  1139. *
  1140. * The caller should lock against other modifications and must continue to hold
  1141. * the lock until assoc_array_apply_edit() has been called.
  1142. *
  1143. * Accesses to the tree may take place concurrently with this function,
  1144. * provided they hold the RCU read lock.
  1145. */
  1146. struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
  1147. const struct assoc_array_ops *ops)
  1148. {
  1149. struct assoc_array_edit *edit;
  1150. pr_devel("-->%s()\n", __func__);
  1151. if (!array->root)
  1152. return NULL;
  1153. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  1154. if (!edit)
  1155. return ERR_PTR(-ENOMEM);
  1156. edit->array = array;
  1157. edit->ops = ops;
  1158. edit->set[1].ptr = &array->root;
  1159. edit->set[1].to = NULL;
  1160. edit->excised_subtree = array->root;
  1161. edit->ops_for_excised_subtree = ops;
  1162. pr_devel("all gone\n");
  1163. return edit;
  1164. }
  1165. /*
  1166. * Handle the deferred destruction after an applied edit.
  1167. */
  1168. static void assoc_array_rcu_cleanup(struct rcu_head *head)
  1169. {
  1170. struct assoc_array_edit *edit =
  1171. container_of(head, struct assoc_array_edit, rcu);
  1172. int i;
  1173. pr_devel("-->%s()\n", __func__);
  1174. if (edit->dead_leaf)
  1175. edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
  1176. for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
  1177. if (edit->excised_meta[i])
  1178. kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
  1179. if (edit->excised_subtree) {
  1180. BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
  1181. if (assoc_array_ptr_is_node(edit->excised_subtree)) {
  1182. struct assoc_array_node *n =
  1183. assoc_array_ptr_to_node(edit->excised_subtree);
  1184. n->back_pointer = NULL;
  1185. } else {
  1186. struct assoc_array_shortcut *s =
  1187. assoc_array_ptr_to_shortcut(edit->excised_subtree);
  1188. s->back_pointer = NULL;
  1189. }
  1190. assoc_array_destroy_subtree(edit->excised_subtree,
  1191. edit->ops_for_excised_subtree);
  1192. }
  1193. kfree(edit);
  1194. }
  1195. /**
  1196. * assoc_array_apply_edit - Apply an edit script to an associative array
  1197. * @edit: The script to apply.
  1198. *
  1199. * Apply an edit script to an associative array to effect an insertion,
  1200. * deletion or clearance. As the edit script includes preallocated memory,
  1201. * this is guaranteed not to fail.
  1202. *
  1203. * The edit script, dead objects and dead metadata will be scheduled for
  1204. * destruction after an RCU grace period to permit those doing read-only
  1205. * accesses on the array to continue to do so under the RCU read lock whilst
  1206. * the edit is taking place.
  1207. */
  1208. void assoc_array_apply_edit(struct assoc_array_edit *edit)
  1209. {
  1210. struct assoc_array_shortcut *shortcut;
  1211. struct assoc_array_node *node;
  1212. struct assoc_array_ptr *ptr;
  1213. int i;
  1214. pr_devel("-->%s()\n", __func__);
  1215. smp_wmb();
  1216. if (edit->leaf_p)
  1217. *edit->leaf_p = edit->leaf;
  1218. smp_wmb();
  1219. for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
  1220. if (edit->set_parent_slot[i].p)
  1221. *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
  1222. smp_wmb();
  1223. for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
  1224. if (edit->set_backpointers[i])
  1225. *edit->set_backpointers[i] = edit->set_backpointers_to;
  1226. smp_wmb();
  1227. for (i = 0; i < ARRAY_SIZE(edit->set); i++)
  1228. if (edit->set[i].ptr)
  1229. *edit->set[i].ptr = edit->set[i].to;
  1230. if (edit->array->root == NULL) {
  1231. edit->array->nr_leaves_on_tree = 0;
  1232. } else if (edit->adjust_count_on) {
  1233. node = edit->adjust_count_on;
  1234. for (;;) {
  1235. node->nr_leaves_on_branch += edit->adjust_count_by;
  1236. ptr = node->back_pointer;
  1237. if (!ptr)
  1238. break;
  1239. if (assoc_array_ptr_is_shortcut(ptr)) {
  1240. shortcut = assoc_array_ptr_to_shortcut(ptr);
  1241. ptr = shortcut->back_pointer;
  1242. if (!ptr)
  1243. break;
  1244. }
  1245. BUG_ON(!assoc_array_ptr_is_node(ptr));
  1246. node = assoc_array_ptr_to_node(ptr);
  1247. }
  1248. edit->array->nr_leaves_on_tree += edit->adjust_count_by;
  1249. }
  1250. call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
  1251. }
  1252. /**
  1253. * assoc_array_cancel_edit - Discard an edit script.
  1254. * @edit: The script to discard.
  1255. *
  1256. * Free an edit script and all the preallocated data it holds without making
  1257. * any changes to the associative array it was intended for.
  1258. *
  1259. * NOTE! In the case of an insertion script, this does _not_ release the leaf
  1260. * that was to be inserted. That is left to the caller.
  1261. */
  1262. void assoc_array_cancel_edit(struct assoc_array_edit *edit)
  1263. {
  1264. struct assoc_array_ptr *ptr;
  1265. int i;
  1266. pr_devel("-->%s()\n", __func__);
  1267. /* Clean up after an out of memory error */
  1268. for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
  1269. ptr = edit->new_meta[i];
  1270. if (ptr) {
  1271. if (assoc_array_ptr_is_node(ptr))
  1272. kfree(assoc_array_ptr_to_node(ptr));
  1273. else
  1274. kfree(assoc_array_ptr_to_shortcut(ptr));
  1275. }
  1276. }
  1277. kfree(edit);
  1278. }
  1279. /**
  1280. * assoc_array_gc - Garbage collect an associative array.
  1281. * @array: The array to clean.
  1282. * @ops: The operations to use.
  1283. * @iterator: A callback function to pass judgement on each object.
  1284. * @iterator_data: Private data for the callback function.
  1285. *
  1286. * Collect garbage from an associative array and pack down the internal tree to
  1287. * save memory.
  1288. *
  1289. * The iterator function is asked to pass judgement upon each object in the
  1290. * array. If it returns false, the object is discard and if it returns true,
  1291. * the object is kept. If it returns true, it must increment the object's
  1292. * usage count (or whatever it needs to do to retain it) before returning.
  1293. *
  1294. * This function returns 0 if successful or -ENOMEM if out of memory. In the
  1295. * latter case, the array is not changed.
  1296. *
  1297. * The caller should lock against other modifications and must continue to hold
  1298. * the lock until assoc_array_apply_edit() has been called.
  1299. *
  1300. * Accesses to the tree may take place concurrently with this function,
  1301. * provided they hold the RCU read lock.
  1302. */
  1303. int assoc_array_gc(struct assoc_array *array,
  1304. const struct assoc_array_ops *ops,
  1305. bool (*iterator)(void *object, void *iterator_data),
  1306. void *iterator_data)
  1307. {
  1308. struct assoc_array_shortcut *shortcut, *new_s;
  1309. struct assoc_array_node *node, *new_n;
  1310. struct assoc_array_edit *edit;
  1311. struct assoc_array_ptr *cursor, *ptr;
  1312. struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
  1313. unsigned long nr_leaves_on_tree;
  1314. int keylen, slot, nr_free, next_slot, i;
  1315. pr_devel("-->%s()\n", __func__);
  1316. if (!array->root)
  1317. return 0;
  1318. edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
  1319. if (!edit)
  1320. return -ENOMEM;
  1321. edit->array = array;
  1322. edit->ops = ops;
  1323. edit->ops_for_excised_subtree = ops;
  1324. edit->set[0].ptr = &array->root;
  1325. edit->excised_subtree = array->root;
  1326. new_root = new_parent = NULL;
  1327. new_ptr_pp = &new_root;
  1328. cursor = array->root;
  1329. descend:
  1330. /* If this point is a shortcut, then we need to duplicate it and
  1331. * advance the target cursor.
  1332. */
  1333. if (assoc_array_ptr_is_shortcut(cursor)) {
  1334. shortcut = assoc_array_ptr_to_shortcut(cursor);
  1335. keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
  1336. keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
  1337. new_s = kmalloc(sizeof(struct assoc_array_shortcut) +
  1338. keylen * sizeof(unsigned long), GFP_KERNEL);
  1339. if (!new_s)
  1340. goto enomem;
  1341. pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
  1342. memcpy(new_s, shortcut, (sizeof(struct assoc_array_shortcut) +
  1343. keylen * sizeof(unsigned long)));
  1344. new_s->back_pointer = new_parent;
  1345. new_s->parent_slot = shortcut->parent_slot;
  1346. *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
  1347. new_ptr_pp = &new_s->next_node;
  1348. cursor = shortcut->next_node;
  1349. }
  1350. /* Duplicate the node at this position */
  1351. node = assoc_array_ptr_to_node(cursor);
  1352. new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
  1353. if (!new_n)
  1354. goto enomem;
  1355. pr_devel("dup node %p -> %p\n", node, new_n);
  1356. new_n->back_pointer = new_parent;
  1357. new_n->parent_slot = node->parent_slot;
  1358. *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
  1359. new_ptr_pp = NULL;
  1360. slot = 0;
  1361. continue_node:
  1362. /* Filter across any leaves and gc any subtrees */
  1363. for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1364. ptr = node->slots[slot];
  1365. if (!ptr)
  1366. continue;
  1367. if (assoc_array_ptr_is_leaf(ptr)) {
  1368. if (iterator(assoc_array_ptr_to_leaf(ptr),
  1369. iterator_data))
  1370. /* The iterator will have done any reference
  1371. * counting on the object for us.
  1372. */
  1373. new_n->slots[slot] = ptr;
  1374. continue;
  1375. }
  1376. new_ptr_pp = &new_n->slots[slot];
  1377. cursor = ptr;
  1378. goto descend;
  1379. }
  1380. pr_devel("-- compress node %p --\n", new_n);
  1381. /* Count up the number of empty slots in this node and work out the
  1382. * subtree leaf count.
  1383. */
  1384. new_n->nr_leaves_on_branch = 0;
  1385. nr_free = 0;
  1386. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1387. ptr = new_n->slots[slot];
  1388. if (!ptr)
  1389. nr_free++;
  1390. else if (assoc_array_ptr_is_leaf(ptr))
  1391. new_n->nr_leaves_on_branch++;
  1392. }
  1393. pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
  1394. /* See what we can fold in */
  1395. next_slot = 0;
  1396. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
  1397. struct assoc_array_shortcut *s;
  1398. struct assoc_array_node *child;
  1399. ptr = new_n->slots[slot];
  1400. if (!ptr || assoc_array_ptr_is_leaf(ptr))
  1401. continue;
  1402. s = NULL;
  1403. if (assoc_array_ptr_is_shortcut(ptr)) {
  1404. s = assoc_array_ptr_to_shortcut(ptr);
  1405. ptr = s->next_node;
  1406. }
  1407. child = assoc_array_ptr_to_node(ptr);
  1408. new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
  1409. if (child->nr_leaves_on_branch <= nr_free + 1) {
  1410. /* Fold the child node into this one */
  1411. pr_devel("[%d] fold node %lu/%d [nx %d]\n",
  1412. slot, child->nr_leaves_on_branch, nr_free + 1,
  1413. next_slot);
  1414. /* We would already have reaped an intervening shortcut
  1415. * on the way back up the tree.
  1416. */
  1417. BUG_ON(s);
  1418. new_n->slots[slot] = NULL;
  1419. nr_free++;
  1420. if (slot < next_slot)
  1421. next_slot = slot;
  1422. for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
  1423. struct assoc_array_ptr *p = child->slots[i];
  1424. if (!p)
  1425. continue;
  1426. BUG_ON(assoc_array_ptr_is_meta(p));
  1427. while (new_n->slots[next_slot])
  1428. next_slot++;
  1429. BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
  1430. new_n->slots[next_slot++] = p;
  1431. nr_free--;
  1432. }
  1433. kfree(child);
  1434. } else {
  1435. pr_devel("[%d] retain node %lu/%d [nx %d]\n",
  1436. slot, child->nr_leaves_on_branch, nr_free + 1,
  1437. next_slot);
  1438. }
  1439. }
  1440. pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
  1441. nr_leaves_on_tree = new_n->nr_leaves_on_branch;
  1442. /* Excise this node if it is singly occupied by a shortcut */
  1443. if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
  1444. for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
  1445. if ((ptr = new_n->slots[slot]))
  1446. break;
  1447. if (assoc_array_ptr_is_meta(ptr) &&
  1448. assoc_array_ptr_is_shortcut(ptr)) {
  1449. pr_devel("excise node %p with 1 shortcut\n", new_n);
  1450. new_s = assoc_array_ptr_to_shortcut(ptr);
  1451. new_parent = new_n->back_pointer;
  1452. slot = new_n->parent_slot;
  1453. kfree(new_n);
  1454. if (!new_parent) {
  1455. new_s->back_pointer = NULL;
  1456. new_s->parent_slot = 0;
  1457. new_root = ptr;
  1458. goto gc_complete;
  1459. }
  1460. if (assoc_array_ptr_is_shortcut(new_parent)) {
  1461. /* We can discard any preceding shortcut also */
  1462. struct assoc_array_shortcut *s =
  1463. assoc_array_ptr_to_shortcut(new_parent);
  1464. pr_devel("excise preceding shortcut\n");
  1465. new_parent = new_s->back_pointer = s->back_pointer;
  1466. slot = new_s->parent_slot = s->parent_slot;
  1467. kfree(s);
  1468. if (!new_parent) {
  1469. new_s->back_pointer = NULL;
  1470. new_s->parent_slot = 0;
  1471. new_root = ptr;
  1472. goto gc_complete;
  1473. }
  1474. }
  1475. new_s->back_pointer = new_parent;
  1476. new_s->parent_slot = slot;
  1477. new_n = assoc_array_ptr_to_node(new_parent);
  1478. new_n->slots[slot] = ptr;
  1479. goto ascend_old_tree;
  1480. }
  1481. }
  1482. /* Excise any shortcuts we might encounter that point to nodes that
  1483. * only contain leaves.
  1484. */
  1485. ptr = new_n->back_pointer;
  1486. if (!ptr)
  1487. goto gc_complete;
  1488. if (assoc_array_ptr_is_shortcut(ptr)) {
  1489. new_s = assoc_array_ptr_to_shortcut(ptr);
  1490. new_parent = new_s->back_pointer;
  1491. slot = new_s->parent_slot;
  1492. if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
  1493. struct assoc_array_node *n;
  1494. pr_devel("excise shortcut\n");
  1495. new_n->back_pointer = new_parent;
  1496. new_n->parent_slot = slot;
  1497. kfree(new_s);
  1498. if (!new_parent) {
  1499. new_root = assoc_array_node_to_ptr(new_n);
  1500. goto gc_complete;
  1501. }
  1502. n = assoc_array_ptr_to_node(new_parent);
  1503. n->slots[slot] = assoc_array_node_to_ptr(new_n);
  1504. }
  1505. } else {
  1506. new_parent = ptr;
  1507. }
  1508. new_n = assoc_array_ptr_to_node(new_parent);
  1509. ascend_old_tree:
  1510. ptr = node->back_pointer;
  1511. if (assoc_array_ptr_is_shortcut(ptr)) {
  1512. shortcut = assoc_array_ptr_to_shortcut(ptr);
  1513. slot = shortcut->parent_slot;
  1514. cursor = shortcut->back_pointer;
  1515. } else {
  1516. slot = node->parent_slot;
  1517. cursor = ptr;
  1518. }
  1519. BUG_ON(!ptr);
  1520. node = assoc_array_ptr_to_node(cursor);
  1521. slot++;
  1522. goto continue_node;
  1523. gc_complete:
  1524. edit->set[0].to = new_root;
  1525. assoc_array_apply_edit(edit);
  1526. edit->array->nr_leaves_on_tree = nr_leaves_on_tree;
  1527. return 0;
  1528. enomem:
  1529. pr_devel("enomem\n");
  1530. assoc_array_destroy_subtree(new_root, edit->ops);
  1531. kfree(edit);
  1532. return -ENOMEM;
  1533. }