fib_trie.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License
  4. * as published by the Free Software Foundation; either version
  5. * 2 of the License, or (at your option) any later version.
  6. *
  7. * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
  8. * & Swedish University of Agricultural Sciences.
  9. *
  10. * Jens Laas <jens.laas@data.slu.se> Swedish University of
  11. * Agricultural Sciences.
  12. *
  13. * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
  14. *
  15. * This work is based on the LPC-trie which is originally descibed in:
  16. *
  17. * An experimental study of compression methods for dynamic tries
  18. * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
  19. * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
  20. *
  21. *
  22. * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
  23. * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
  24. *
  25. * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
  26. *
  27. *
  28. * Code from fib_hash has been reused which includes the following header:
  29. *
  30. *
  31. * INET An implementation of the TCP/IP protocol suite for the LINUX
  32. * operating system. INET is implemented using the BSD Socket
  33. * interface as the means of communication with the user level.
  34. *
  35. * IPv4 FIB: lookup engine and maintenance routines.
  36. *
  37. *
  38. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  39. *
  40. * This program is free software; you can redistribute it and/or
  41. * modify it under the terms of the GNU General Public License
  42. * as published by the Free Software Foundation; either version
  43. * 2 of the License, or (at your option) any later version.
  44. */
  45. #define VERSION "0.402"
  46. #include <linux/config.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/system.h>
  49. #include <asm/bitops.h>
  50. #include <linux/types.h>
  51. #include <linux/kernel.h>
  52. #include <linux/sched.h>
  53. #include <linux/mm.h>
  54. #include <linux/string.h>
  55. #include <linux/socket.h>
  56. #include <linux/sockios.h>
  57. #include <linux/errno.h>
  58. #include <linux/in.h>
  59. #include <linux/inet.h>
  60. #include <linux/netdevice.h>
  61. #include <linux/if_arp.h>
  62. #include <linux/proc_fs.h>
  63. #include <linux/rcupdate.h>
  64. #include <linux/skbuff.h>
  65. #include <linux/netlink.h>
  66. #include <linux/init.h>
  67. #include <linux/list.h>
  68. #include <net/ip.h>
  69. #include <net/protocol.h>
  70. #include <net/route.h>
  71. #include <net/tcp.h>
  72. #include <net/sock.h>
  73. #include <net/ip_fib.h>
  74. #include "fib_lookup.h"
  75. #undef CONFIG_IP_FIB_TRIE_STATS
  76. #define MAX_CHILDS 16384
  77. #define KEYLENGTH (8*sizeof(t_key))
  78. #define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l))
  79. #define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset))
  80. typedef unsigned int t_key;
  81. #define T_TNODE 0
  82. #define T_LEAF 1
  83. #define NODE_TYPE_MASK 0x1UL
  84. #define NODE_PARENT(node) \
  85. ((struct tnode *)rcu_dereference(((node)->parent & ~NODE_TYPE_MASK)))
  86. #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
  87. #define NODE_SET_PARENT(node, ptr) \
  88. rcu_assign_pointer((node)->parent, \
  89. ((unsigned long)(ptr)) | NODE_TYPE(node))
  90. #define IS_TNODE(n) (!(n->parent & T_LEAF))
  91. #define IS_LEAF(n) (n->parent & T_LEAF)
  92. struct node {
  93. t_key key;
  94. unsigned long parent;
  95. };
  96. struct leaf {
  97. t_key key;
  98. unsigned long parent;
  99. struct hlist_head list;
  100. struct rcu_head rcu;
  101. };
  102. struct leaf_info {
  103. struct hlist_node hlist;
  104. struct rcu_head rcu;
  105. int plen;
  106. struct list_head falh;
  107. };
  108. struct tnode {
  109. t_key key;
  110. unsigned long parent;
  111. unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
  112. unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
  113. unsigned short full_children; /* KEYLENGTH bits needed */
  114. unsigned short empty_children; /* KEYLENGTH bits needed */
  115. struct rcu_head rcu;
  116. struct node *child[0];
  117. };
  118. #ifdef CONFIG_IP_FIB_TRIE_STATS
  119. struct trie_use_stats {
  120. unsigned int gets;
  121. unsigned int backtrack;
  122. unsigned int semantic_match_passed;
  123. unsigned int semantic_match_miss;
  124. unsigned int null_node_hit;
  125. unsigned int resize_node_skipped;
  126. };
  127. #endif
  128. struct trie_stat {
  129. unsigned int totdepth;
  130. unsigned int maxdepth;
  131. unsigned int tnodes;
  132. unsigned int leaves;
  133. unsigned int nullpointers;
  134. unsigned int nodesizes[MAX_CHILDS];
  135. };
  136. struct trie {
  137. struct node *trie;
  138. #ifdef CONFIG_IP_FIB_TRIE_STATS
  139. struct trie_use_stats stats;
  140. #endif
  141. int size;
  142. unsigned int revision;
  143. };
  144. static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
  145. static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
  146. static struct node *resize(struct trie *t, struct tnode *tn);
  147. static struct tnode *inflate(struct trie *t, struct tnode *tn);
  148. static struct tnode *halve(struct trie *t, struct tnode *tn);
  149. static void tnode_free(struct tnode *tn);
  150. static void trie_dump_seq(struct seq_file *seq, struct trie *t);
  151. static kmem_cache_t *fn_alias_kmem __read_mostly;
  152. static struct trie *trie_local = NULL, *trie_main = NULL;
  153. /* rcu_read_lock needs to be hold by caller from readside */
  154. static inline struct node *tnode_get_child(struct tnode *tn, int i)
  155. {
  156. BUG_ON(i >= 1 << tn->bits);
  157. return rcu_dereference(tn->child[i]);
  158. }
  159. static inline int tnode_child_length(const struct tnode *tn)
  160. {
  161. return 1 << tn->bits;
  162. }
  163. static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
  164. {
  165. if (offset < KEYLENGTH)
  166. return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
  167. else
  168. return 0;
  169. }
  170. static inline int tkey_equals(t_key a, t_key b)
  171. {
  172. return a == b;
  173. }
  174. static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
  175. {
  176. if (bits == 0 || offset >= KEYLENGTH)
  177. return 1;
  178. bits = bits > KEYLENGTH ? KEYLENGTH : bits;
  179. return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
  180. }
  181. static inline int tkey_mismatch(t_key a, int offset, t_key b)
  182. {
  183. t_key diff = a ^ b;
  184. int i = offset;
  185. if (!diff)
  186. return 0;
  187. while ((diff << i) >> (KEYLENGTH-1) == 0)
  188. i++;
  189. return i;
  190. }
  191. /*
  192. To understand this stuff, an understanding of keys and all their bits is
  193. necessary. Every node in the trie has a key associated with it, but not
  194. all of the bits in that key are significant.
  195. Consider a node 'n' and its parent 'tp'.
  196. If n is a leaf, every bit in its key is significant. Its presence is
  197. necessitaded by path compression, since during a tree traversal (when
  198. searching for a leaf - unless we are doing an insertion) we will completely
  199. ignore all skipped bits we encounter. Thus we need to verify, at the end of
  200. a potentially successful search, that we have indeed been walking the
  201. correct key path.
  202. Note that we can never "miss" the correct key in the tree if present by
  203. following the wrong path. Path compression ensures that segments of the key
  204. that are the same for all keys with a given prefix are skipped, but the
  205. skipped part *is* identical for each node in the subtrie below the skipped
  206. bit! trie_insert() in this implementation takes care of that - note the
  207. call to tkey_sub_equals() in trie_insert().
  208. if n is an internal node - a 'tnode' here, the various parts of its key
  209. have many different meanings.
  210. Example:
  211. _________________________________________________________________
  212. | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
  213. -----------------------------------------------------------------
  214. 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  215. _________________________________________________________________
  216. | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
  217. -----------------------------------------------------------------
  218. 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
  219. tp->pos = 7
  220. tp->bits = 3
  221. n->pos = 15
  222. n->bits = 4
  223. First, let's just ignore the bits that come before the parent tp, that is
  224. the bits from 0 to (tp->pos-1). They are *known* but at this point we do
  225. not use them for anything.
  226. The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
  227. index into the parent's child array. That is, they will be used to find
  228. 'n' among tp's children.
  229. The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
  230. for the node n.
  231. All the bits we have seen so far are significant to the node n. The rest
  232. of the bits are really not needed or indeed known in n->key.
  233. The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
  234. n's child array, and will of course be different for each child.
  235. The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
  236. at this point.
  237. */
  238. static inline void check_tnode(const struct tnode *tn)
  239. {
  240. WARN_ON(tn && tn->pos+tn->bits > 32);
  241. }
  242. static int halve_threshold = 25;
  243. static int inflate_threshold = 50;
  244. static void __alias_free_mem(struct rcu_head *head)
  245. {
  246. struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
  247. kmem_cache_free(fn_alias_kmem, fa);
  248. }
  249. static inline void alias_free_mem_rcu(struct fib_alias *fa)
  250. {
  251. call_rcu(&fa->rcu, __alias_free_mem);
  252. }
  253. static void __leaf_free_rcu(struct rcu_head *head)
  254. {
  255. kfree(container_of(head, struct leaf, rcu));
  256. }
  257. static inline void free_leaf(struct leaf *leaf)
  258. {
  259. call_rcu(&leaf->rcu, __leaf_free_rcu);
  260. }
  261. static void __leaf_info_free_rcu(struct rcu_head *head)
  262. {
  263. kfree(container_of(head, struct leaf_info, rcu));
  264. }
  265. static inline void free_leaf_info(struct leaf_info *leaf)
  266. {
  267. call_rcu(&leaf->rcu, __leaf_info_free_rcu);
  268. }
  269. static struct tnode *tnode_alloc(unsigned int size)
  270. {
  271. struct page *pages;
  272. if (size <= PAGE_SIZE)
  273. return kcalloc(size, 1, GFP_KERNEL);
  274. pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
  275. if (!pages)
  276. return NULL;
  277. return page_address(pages);
  278. }
  279. static void __tnode_free_rcu(struct rcu_head *head)
  280. {
  281. struct tnode *tn = container_of(head, struct tnode, rcu);
  282. unsigned int size = sizeof(struct tnode) +
  283. (1 << tn->bits) * sizeof(struct node *);
  284. if (size <= PAGE_SIZE)
  285. kfree(tn);
  286. else
  287. free_pages((unsigned long)tn, get_order(size));
  288. }
  289. static inline void tnode_free(struct tnode *tn)
  290. {
  291. call_rcu(&tn->rcu, __tnode_free_rcu);
  292. }
  293. static struct leaf *leaf_new(void)
  294. {
  295. struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
  296. if (l) {
  297. l->parent = T_LEAF;
  298. INIT_HLIST_HEAD(&l->list);
  299. }
  300. return l;
  301. }
  302. static struct leaf_info *leaf_info_new(int plen)
  303. {
  304. struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
  305. if (li) {
  306. li->plen = plen;
  307. INIT_LIST_HEAD(&li->falh);
  308. }
  309. return li;
  310. }
  311. static struct tnode* tnode_new(t_key key, int pos, int bits)
  312. {
  313. int nchildren = 1<<bits;
  314. int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
  315. struct tnode *tn = tnode_alloc(sz);
  316. if (tn) {
  317. memset(tn, 0, sz);
  318. tn->parent = T_TNODE;
  319. tn->pos = pos;
  320. tn->bits = bits;
  321. tn->key = key;
  322. tn->full_children = 0;
  323. tn->empty_children = 1<<bits;
  324. }
  325. pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
  326. (unsigned int) (sizeof(struct node) * 1<<bits));
  327. return tn;
  328. }
  329. /*
  330. * Check whether a tnode 'n' is "full", i.e. it is an internal node
  331. * and no bits are skipped. See discussion in dyntree paper p. 6
  332. */
  333. static inline int tnode_full(const struct tnode *tn, const struct node *n)
  334. {
  335. if (n == NULL || IS_LEAF(n))
  336. return 0;
  337. return ((struct tnode *) n)->pos == tn->pos + tn->bits;
  338. }
  339. static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
  340. {
  341. tnode_put_child_reorg(tn, i, n, -1);
  342. }
  343. /*
  344. * Add a child at position i overwriting the old value.
  345. * Update the value of full_children and empty_children.
  346. */
  347. static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
  348. {
  349. struct node *chi = tn->child[i];
  350. int isfull;
  351. BUG_ON(i >= 1<<tn->bits);
  352. /* update emptyChildren */
  353. if (n == NULL && chi != NULL)
  354. tn->empty_children++;
  355. else if (n != NULL && chi == NULL)
  356. tn->empty_children--;
  357. /* update fullChildren */
  358. if (wasfull == -1)
  359. wasfull = tnode_full(tn, chi);
  360. isfull = tnode_full(tn, n);
  361. if (wasfull && !isfull)
  362. tn->full_children--;
  363. else if (!wasfull && isfull)
  364. tn->full_children++;
  365. if (n)
  366. NODE_SET_PARENT(n, tn);
  367. rcu_assign_pointer(tn->child[i], n);
  368. }
  369. static struct node *resize(struct trie *t, struct tnode *tn)
  370. {
  371. int i;
  372. int err = 0;
  373. struct tnode *old_tn;
  374. if (!tn)
  375. return NULL;
  376. pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
  377. tn, inflate_threshold, halve_threshold);
  378. /* No children */
  379. if (tn->empty_children == tnode_child_length(tn)) {
  380. tnode_free(tn);
  381. return NULL;
  382. }
  383. /* One child */
  384. if (tn->empty_children == tnode_child_length(tn) - 1)
  385. for (i = 0; i < tnode_child_length(tn); i++) {
  386. struct node *n;
  387. n = tn->child[i];
  388. if (!n)
  389. continue;
  390. /* compress one level */
  391. NODE_SET_PARENT(n, NULL);
  392. tnode_free(tn);
  393. return n;
  394. }
  395. /*
  396. * Double as long as the resulting node has a number of
  397. * nonempty nodes that are above the threshold.
  398. */
  399. /*
  400. * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
  401. * the Helsinki University of Technology and Matti Tikkanen of Nokia
  402. * Telecommunications, page 6:
  403. * "A node is doubled if the ratio of non-empty children to all
  404. * children in the *doubled* node is at least 'high'."
  405. *
  406. * 'high' in this instance is the variable 'inflate_threshold'. It
  407. * is expressed as a percentage, so we multiply it with
  408. * tnode_child_length() and instead of multiplying by 2 (since the
  409. * child array will be doubled by inflate()) and multiplying
  410. * the left-hand side by 100 (to handle the percentage thing) we
  411. * multiply the left-hand side by 50.
  412. *
  413. * The left-hand side may look a bit weird: tnode_child_length(tn)
  414. * - tn->empty_children is of course the number of non-null children
  415. * in the current node. tn->full_children is the number of "full"
  416. * children, that is non-null tnodes with a skip value of 0.
  417. * All of those will be doubled in the resulting inflated tnode, so
  418. * we just count them one extra time here.
  419. *
  420. * A clearer way to write this would be:
  421. *
  422. * to_be_doubled = tn->full_children;
  423. * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
  424. * tn->full_children;
  425. *
  426. * new_child_length = tnode_child_length(tn) * 2;
  427. *
  428. * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
  429. * new_child_length;
  430. * if (new_fill_factor >= inflate_threshold)
  431. *
  432. * ...and so on, tho it would mess up the while () loop.
  433. *
  434. * anyway,
  435. * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
  436. * inflate_threshold
  437. *
  438. * avoid a division:
  439. * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
  440. * inflate_threshold * new_child_length
  441. *
  442. * expand not_to_be_doubled and to_be_doubled, and shorten:
  443. * 100 * (tnode_child_length(tn) - tn->empty_children +
  444. * tn->full_children) >= inflate_threshold * new_child_length
  445. *
  446. * expand new_child_length:
  447. * 100 * (tnode_child_length(tn) - tn->empty_children +
  448. * tn->full_children) >=
  449. * inflate_threshold * tnode_child_length(tn) * 2
  450. *
  451. * shorten again:
  452. * 50 * (tn->full_children + tnode_child_length(tn) -
  453. * tn->empty_children) >= inflate_threshold *
  454. * tnode_child_length(tn)
  455. *
  456. */
  457. check_tnode(tn);
  458. err = 0;
  459. while ((tn->full_children > 0 &&
  460. 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
  461. inflate_threshold * tnode_child_length(tn))) {
  462. old_tn = tn;
  463. tn = inflate(t, tn);
  464. if (IS_ERR(tn)) {
  465. tn = old_tn;
  466. #ifdef CONFIG_IP_FIB_TRIE_STATS
  467. t->stats.resize_node_skipped++;
  468. #endif
  469. break;
  470. }
  471. }
  472. check_tnode(tn);
  473. /*
  474. * Halve as long as the number of empty children in this
  475. * node is above threshold.
  476. */
  477. err = 0;
  478. while (tn->bits > 1 &&
  479. 100 * (tnode_child_length(tn) - tn->empty_children) <
  480. halve_threshold * tnode_child_length(tn)) {
  481. old_tn = tn;
  482. tn = halve(t, tn);
  483. if (IS_ERR(tn)) {
  484. tn = old_tn;
  485. #ifdef CONFIG_IP_FIB_TRIE_STATS
  486. t->stats.resize_node_skipped++;
  487. #endif
  488. break;
  489. }
  490. }
  491. /* Only one child remains */
  492. if (tn->empty_children == tnode_child_length(tn) - 1)
  493. for (i = 0; i < tnode_child_length(tn); i++) {
  494. struct node *n;
  495. n = tn->child[i];
  496. if (!n)
  497. continue;
  498. /* compress one level */
  499. NODE_SET_PARENT(n, NULL);
  500. tnode_free(tn);
  501. return n;
  502. }
  503. return (struct node *) tn;
  504. }
  505. static struct tnode *inflate(struct trie *t, struct tnode *tn)
  506. {
  507. struct tnode *inode;
  508. struct tnode *oldtnode = tn;
  509. int olen = tnode_child_length(tn);
  510. int i;
  511. pr_debug("In inflate\n");
  512. tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
  513. if (!tn)
  514. return ERR_PTR(-ENOMEM);
  515. /*
  516. * Preallocate and store tnodes before the actual work so we
  517. * don't get into an inconsistent state if memory allocation
  518. * fails. In case of failure we return the oldnode and inflate
  519. * of tnode is ignored.
  520. */
  521. for (i = 0; i < olen; i++) {
  522. struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
  523. if (inode &&
  524. IS_TNODE(inode) &&
  525. inode->pos == oldtnode->pos + oldtnode->bits &&
  526. inode->bits > 1) {
  527. struct tnode *left, *right;
  528. t_key m = TKEY_GET_MASK(inode->pos, 1);
  529. left = tnode_new(inode->key&(~m), inode->pos + 1,
  530. inode->bits - 1);
  531. if (!left)
  532. goto nomem;
  533. right = tnode_new(inode->key|m, inode->pos + 1,
  534. inode->bits - 1);
  535. if (!right) {
  536. tnode_free(left);
  537. goto nomem;
  538. }
  539. put_child(t, tn, 2*i, (struct node *) left);
  540. put_child(t, tn, 2*i+1, (struct node *) right);
  541. }
  542. }
  543. for (i = 0; i < olen; i++) {
  544. struct node *node = tnode_get_child(oldtnode, i);
  545. struct tnode *left, *right;
  546. int size, j;
  547. /* An empty child */
  548. if (node == NULL)
  549. continue;
  550. /* A leaf or an internal node with skipped bits */
  551. if (IS_LEAF(node) || ((struct tnode *) node)->pos >
  552. tn->pos + tn->bits - 1) {
  553. if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
  554. 1) == 0)
  555. put_child(t, tn, 2*i, node);
  556. else
  557. put_child(t, tn, 2*i+1, node);
  558. continue;
  559. }
  560. /* An internal node with two children */
  561. inode = (struct tnode *) node;
  562. if (inode->bits == 1) {
  563. put_child(t, tn, 2*i, inode->child[0]);
  564. put_child(t, tn, 2*i+1, inode->child[1]);
  565. tnode_free(inode);
  566. continue;
  567. }
  568. /* An internal node with more than two children */
  569. /* We will replace this node 'inode' with two new
  570. * ones, 'left' and 'right', each with half of the
  571. * original children. The two new nodes will have
  572. * a position one bit further down the key and this
  573. * means that the "significant" part of their keys
  574. * (see the discussion near the top of this file)
  575. * will differ by one bit, which will be "0" in
  576. * left's key and "1" in right's key. Since we are
  577. * moving the key position by one step, the bit that
  578. * we are moving away from - the bit at position
  579. * (inode->pos) - is the one that will differ between
  580. * left and right. So... we synthesize that bit in the
  581. * two new keys.
  582. * The mask 'm' below will be a single "one" bit at
  583. * the position (inode->pos)
  584. */
  585. /* Use the old key, but set the new significant
  586. * bit to zero.
  587. */
  588. left = (struct tnode *) tnode_get_child(tn, 2*i);
  589. put_child(t, tn, 2*i, NULL);
  590. BUG_ON(!left);
  591. right = (struct tnode *) tnode_get_child(tn, 2*i+1);
  592. put_child(t, tn, 2*i+1, NULL);
  593. BUG_ON(!right);
  594. size = tnode_child_length(left);
  595. for (j = 0; j < size; j++) {
  596. put_child(t, left, j, inode->child[j]);
  597. put_child(t, right, j, inode->child[j + size]);
  598. }
  599. put_child(t, tn, 2*i, resize(t, left));
  600. put_child(t, tn, 2*i+1, resize(t, right));
  601. tnode_free(inode);
  602. }
  603. tnode_free(oldtnode);
  604. return tn;
  605. nomem:
  606. {
  607. int size = tnode_child_length(tn);
  608. int j;
  609. for (j = 0; j < size; j++)
  610. if (tn->child[j])
  611. tnode_free((struct tnode *)tn->child[j]);
  612. tnode_free(tn);
  613. return ERR_PTR(-ENOMEM);
  614. }
  615. }
  616. static struct tnode *halve(struct trie *t, struct tnode *tn)
  617. {
  618. struct tnode *oldtnode = tn;
  619. struct node *left, *right;
  620. int i;
  621. int olen = tnode_child_length(tn);
  622. pr_debug("In halve\n");
  623. tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
  624. if (!tn)
  625. return ERR_PTR(-ENOMEM);
  626. /*
  627. * Preallocate and store tnodes before the actual work so we
  628. * don't get into an inconsistent state if memory allocation
  629. * fails. In case of failure we return the oldnode and halve
  630. * of tnode is ignored.
  631. */
  632. for (i = 0; i < olen; i += 2) {
  633. left = tnode_get_child(oldtnode, i);
  634. right = tnode_get_child(oldtnode, i+1);
  635. /* Two nonempty children */
  636. if (left && right) {
  637. struct tnode *newn;
  638. newn = tnode_new(left->key, tn->pos + tn->bits, 1);
  639. if (!newn)
  640. goto nomem;
  641. put_child(t, tn, i/2, (struct node *)newn);
  642. }
  643. }
  644. for (i = 0; i < olen; i += 2) {
  645. struct tnode *newBinNode;
  646. left = tnode_get_child(oldtnode, i);
  647. right = tnode_get_child(oldtnode, i+1);
  648. /* At least one of the children is empty */
  649. if (left == NULL) {
  650. if (right == NULL) /* Both are empty */
  651. continue;
  652. put_child(t, tn, i/2, right);
  653. continue;
  654. }
  655. if (right == NULL) {
  656. put_child(t, tn, i/2, left);
  657. continue;
  658. }
  659. /* Two nonempty children */
  660. newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
  661. put_child(t, tn, i/2, NULL);
  662. put_child(t, newBinNode, 0, left);
  663. put_child(t, newBinNode, 1, right);
  664. put_child(t, tn, i/2, resize(t, newBinNode));
  665. }
  666. tnode_free(oldtnode);
  667. return tn;
  668. nomem:
  669. {
  670. int size = tnode_child_length(tn);
  671. int j;
  672. for (j = 0; j < size; j++)
  673. if (tn->child[j])
  674. tnode_free((struct tnode *)tn->child[j]);
  675. tnode_free(tn);
  676. return ERR_PTR(-ENOMEM);
  677. }
  678. }
  679. static void trie_init(struct trie *t)
  680. {
  681. if (!t)
  682. return;
  683. t->size = 0;
  684. rcu_assign_pointer(t->trie, NULL);
  685. t->revision = 0;
  686. #ifdef CONFIG_IP_FIB_TRIE_STATS
  687. memset(&t->stats, 0, sizeof(struct trie_use_stats));
  688. #endif
  689. }
  690. /* readside most use rcu_read_lock currently dump routines
  691. via get_fa_head and dump */
  692. static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen)
  693. {
  694. struct hlist_node *node;
  695. struct leaf_info *li;
  696. hlist_for_each_entry_rcu(li, node, head, hlist)
  697. if (li->plen == plen)
  698. return li;
  699. return NULL;
  700. }
  701. static inline struct list_head * get_fa_head(struct leaf *l, int plen)
  702. {
  703. struct leaf_info *li = find_leaf_info(&l->list, plen);
  704. if (!li)
  705. return NULL;
  706. return &li->falh;
  707. }
  708. static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
  709. {
  710. struct leaf_info *li = NULL, *last = NULL;
  711. struct hlist_node *node;
  712. if (hlist_empty(head)) {
  713. hlist_add_head_rcu(&new->hlist, head);
  714. } else {
  715. hlist_for_each_entry(li, node, head, hlist) {
  716. if (new->plen > li->plen)
  717. break;
  718. last = li;
  719. }
  720. if (last)
  721. hlist_add_after_rcu(&last->hlist, &new->hlist);
  722. else
  723. hlist_add_before_rcu(&new->hlist, &li->hlist);
  724. }
  725. }
  726. /* rcu_read_lock needs to be hold by caller from readside */
  727. static struct leaf *
  728. fib_find_node(struct trie *t, u32 key)
  729. {
  730. int pos;
  731. struct tnode *tn;
  732. struct node *n;
  733. pos = 0;
  734. n = rcu_dereference(t->trie);
  735. while (n != NULL && NODE_TYPE(n) == T_TNODE) {
  736. tn = (struct tnode *) n;
  737. check_tnode(tn);
  738. if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
  739. pos = tn->pos + tn->bits;
  740. n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
  741. } else
  742. break;
  743. }
  744. /* Case we have found a leaf. Compare prefixes */
  745. if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
  746. return (struct leaf *)n;
  747. return NULL;
  748. }
  749. static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
  750. {
  751. int wasfull;
  752. t_key cindex, key;
  753. struct tnode *tp = NULL;
  754. key = tn->key;
  755. while (tn != NULL && NODE_PARENT(tn) != NULL) {
  756. tp = NODE_PARENT(tn);
  757. cindex = tkey_extract_bits(key, tp->pos, tp->bits);
  758. wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
  759. tn = (struct tnode *) resize (t, (struct tnode *)tn);
  760. tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
  761. if (!NODE_PARENT(tn))
  762. break;
  763. tn = NODE_PARENT(tn);
  764. }
  765. /* Handle last (top) tnode */
  766. if (IS_TNODE(tn))
  767. tn = (struct tnode*) resize(t, (struct tnode *)tn);
  768. return (struct node*) tn;
  769. }
  770. /* only used from updater-side */
  771. static struct list_head *
  772. fib_insert_node(struct trie *t, int *err, u32 key, int plen)
  773. {
  774. int pos, newpos;
  775. struct tnode *tp = NULL, *tn = NULL;
  776. struct node *n;
  777. struct leaf *l;
  778. int missbit;
  779. struct list_head *fa_head = NULL;
  780. struct leaf_info *li;
  781. t_key cindex;
  782. pos = 0;
  783. n = t->trie;
  784. /* If we point to NULL, stop. Either the tree is empty and we should
  785. * just put a new leaf in if, or we have reached an empty child slot,
  786. * and we should just put our new leaf in that.
  787. * If we point to a T_TNODE, check if it matches our key. Note that
  788. * a T_TNODE might be skipping any number of bits - its 'pos' need
  789. * not be the parent's 'pos'+'bits'!
  790. *
  791. * If it does match the current key, get pos/bits from it, extract
  792. * the index from our key, push the T_TNODE and walk the tree.
  793. *
  794. * If it doesn't, we have to replace it with a new T_TNODE.
  795. *
  796. * If we point to a T_LEAF, it might or might not have the same key
  797. * as we do. If it does, just change the value, update the T_LEAF's
  798. * value, and return it.
  799. * If it doesn't, we need to replace it with a T_TNODE.
  800. */
  801. while (n != NULL && NODE_TYPE(n) == T_TNODE) {
  802. tn = (struct tnode *) n;
  803. check_tnode(tn);
  804. if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
  805. tp = tn;
  806. pos = tn->pos + tn->bits;
  807. n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
  808. BUG_ON(n && NODE_PARENT(n) != tn);
  809. } else
  810. break;
  811. }
  812. /*
  813. * n ----> NULL, LEAF or TNODE
  814. *
  815. * tp is n's (parent) ----> NULL or TNODE
  816. */
  817. BUG_ON(tp && IS_LEAF(tp));
  818. /* Case 1: n is a leaf. Compare prefixes */
  819. if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
  820. struct leaf *l = (struct leaf *) n;
  821. li = leaf_info_new(plen);
  822. if (!li) {
  823. *err = -ENOMEM;
  824. goto err;
  825. }
  826. fa_head = &li->falh;
  827. insert_leaf_info(&l->list, li);
  828. goto done;
  829. }
  830. t->size++;
  831. l = leaf_new();
  832. if (!l) {
  833. *err = -ENOMEM;
  834. goto err;
  835. }
  836. l->key = key;
  837. li = leaf_info_new(plen);
  838. if (!li) {
  839. tnode_free((struct tnode *) l);
  840. *err = -ENOMEM;
  841. goto err;
  842. }
  843. fa_head = &li->falh;
  844. insert_leaf_info(&l->list, li);
  845. if (t->trie && n == NULL) {
  846. /* Case 2: n is NULL, and will just insert a new leaf */
  847. NODE_SET_PARENT(l, tp);
  848. cindex = tkey_extract_bits(key, tp->pos, tp->bits);
  849. put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
  850. } else {
  851. /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
  852. /*
  853. * Add a new tnode here
  854. * first tnode need some special handling
  855. */
  856. if (tp)
  857. pos = tp->pos+tp->bits;
  858. else
  859. pos = 0;
  860. if (n) {
  861. newpos = tkey_mismatch(key, pos, n->key);
  862. tn = tnode_new(n->key, newpos, 1);
  863. } else {
  864. newpos = 0;
  865. tn = tnode_new(key, newpos, 1); /* First tnode */
  866. }
  867. if (!tn) {
  868. free_leaf_info(li);
  869. tnode_free((struct tnode *) l);
  870. *err = -ENOMEM;
  871. goto err;
  872. }
  873. NODE_SET_PARENT(tn, tp);
  874. missbit = tkey_extract_bits(key, newpos, 1);
  875. put_child(t, tn, missbit, (struct node *)l);
  876. put_child(t, tn, 1-missbit, n);
  877. if (tp) {
  878. cindex = tkey_extract_bits(key, tp->pos, tp->bits);
  879. put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
  880. } else {
  881. rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
  882. tp = tn;
  883. }
  884. }
  885. if (tp && tp->pos + tp->bits > 32)
  886. printk("ERROR tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
  887. tp, tp->pos, tp->bits, key, plen);
  888. /* Rebalance the trie */
  889. rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
  890. done:
  891. t->revision++;
  892. err:
  893. return fa_head;
  894. }
  895. static int
  896. fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
  897. struct nlmsghdr *nlhdr, struct netlink_skb_parms *req)
  898. {
  899. struct trie *t = (struct trie *) tb->tb_data;
  900. struct fib_alias *fa, *new_fa;
  901. struct list_head *fa_head = NULL;
  902. struct fib_info *fi;
  903. int plen = r->rtm_dst_len;
  904. int type = r->rtm_type;
  905. u8 tos = r->rtm_tos;
  906. u32 key, mask;
  907. int err;
  908. struct leaf *l;
  909. if (plen > 32)
  910. return -EINVAL;
  911. key = 0;
  912. if (rta->rta_dst)
  913. memcpy(&key, rta->rta_dst, 4);
  914. key = ntohl(key);
  915. pr_debug("Insert table=%d %08x/%d\n", tb->tb_id, key, plen);
  916. mask = ntohl(inet_make_mask(plen));
  917. if (key & ~mask)
  918. return -EINVAL;
  919. key = key & mask;
  920. fi = fib_create_info(r, rta, nlhdr, &err);
  921. if (!fi)
  922. goto err;
  923. l = fib_find_node(t, key);
  924. fa = NULL;
  925. if (l) {
  926. fa_head = get_fa_head(l, plen);
  927. fa = fib_find_alias(fa_head, tos, fi->fib_priority);
  928. }
  929. /* Now fa, if non-NULL, points to the first fib alias
  930. * with the same keys [prefix,tos,priority], if such key already
  931. * exists or to the node before which we will insert new one.
  932. *
  933. * If fa is NULL, we will need to allocate a new one and
  934. * insert to the head of f.
  935. *
  936. * If f is NULL, no fib node matched the destination key
  937. * and we need to allocate a new one of those as well.
  938. */
  939. if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
  940. struct fib_alias *fa_orig;
  941. err = -EEXIST;
  942. if (nlhdr->nlmsg_flags & NLM_F_EXCL)
  943. goto out;
  944. if (nlhdr->nlmsg_flags & NLM_F_REPLACE) {
  945. struct fib_info *fi_drop;
  946. u8 state;
  947. err = -ENOBUFS;
  948. new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
  949. if (new_fa == NULL)
  950. goto out;
  951. fi_drop = fa->fa_info;
  952. new_fa->fa_tos = fa->fa_tos;
  953. new_fa->fa_info = fi;
  954. new_fa->fa_type = type;
  955. new_fa->fa_scope = r->rtm_scope;
  956. state = fa->fa_state;
  957. new_fa->fa_state &= ~FA_S_ACCESSED;
  958. list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
  959. alias_free_mem_rcu(fa);
  960. fib_release_info(fi_drop);
  961. if (state & FA_S_ACCESSED)
  962. rt_cache_flush(-1);
  963. goto succeeded;
  964. }
  965. /* Error if we find a perfect match which
  966. * uses the same scope, type, and nexthop
  967. * information.
  968. */
  969. fa_orig = fa;
  970. list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
  971. if (fa->fa_tos != tos)
  972. break;
  973. if (fa->fa_info->fib_priority != fi->fib_priority)
  974. break;
  975. if (fa->fa_type == type &&
  976. fa->fa_scope == r->rtm_scope &&
  977. fa->fa_info == fi) {
  978. goto out;
  979. }
  980. }
  981. if (!(nlhdr->nlmsg_flags & NLM_F_APPEND))
  982. fa = fa_orig;
  983. }
  984. err = -ENOENT;
  985. if (!(nlhdr->nlmsg_flags & NLM_F_CREATE))
  986. goto out;
  987. err = -ENOBUFS;
  988. new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
  989. if (new_fa == NULL)
  990. goto out;
  991. new_fa->fa_info = fi;
  992. new_fa->fa_tos = tos;
  993. new_fa->fa_type = type;
  994. new_fa->fa_scope = r->rtm_scope;
  995. new_fa->fa_state = 0;
  996. /*
  997. * Insert new entry to the list.
  998. */
  999. if (!fa_head) {
  1000. fa_head = fib_insert_node(t, &err, key, plen);
  1001. err = 0;
  1002. if (err)
  1003. goto out_free_new_fa;
  1004. }
  1005. list_add_tail_rcu(&new_fa->fa_list,
  1006. (fa ? &fa->fa_list : fa_head));
  1007. rt_cache_flush(-1);
  1008. rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req);
  1009. succeeded:
  1010. return 0;
  1011. out_free_new_fa:
  1012. kmem_cache_free(fn_alias_kmem, new_fa);
  1013. out:
  1014. fib_release_info(fi);
  1015. err:
  1016. return err;
  1017. }
  1018. /* should be clalled with rcu_read_lock */
  1019. static inline int check_leaf(struct trie *t, struct leaf *l,
  1020. t_key key, int *plen, const struct flowi *flp,
  1021. struct fib_result *res)
  1022. {
  1023. int err, i;
  1024. t_key mask;
  1025. struct leaf_info *li;
  1026. struct hlist_head *hhead = &l->list;
  1027. struct hlist_node *node;
  1028. hlist_for_each_entry_rcu(li, node, hhead, hlist) {
  1029. i = li->plen;
  1030. mask = ntohl(inet_make_mask(i));
  1031. if (l->key != (key & mask))
  1032. continue;
  1033. if ((err = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) <= 0) {
  1034. *plen = i;
  1035. #ifdef CONFIG_IP_FIB_TRIE_STATS
  1036. t->stats.semantic_match_passed++;
  1037. #endif
  1038. return err;
  1039. }
  1040. #ifdef CONFIG_IP_FIB_TRIE_STATS
  1041. t->stats.semantic_match_miss++;
  1042. #endif
  1043. }
  1044. return 1;
  1045. }
  1046. static int
  1047. fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
  1048. {
  1049. struct trie *t = (struct trie *) tb->tb_data;
  1050. int plen, ret = 0;
  1051. struct node *n;
  1052. struct tnode *pn;
  1053. int pos, bits;
  1054. t_key key = ntohl(flp->fl4_dst);
  1055. int chopped_off;
  1056. t_key cindex = 0;
  1057. int current_prefix_length = KEYLENGTH;
  1058. struct tnode *cn;
  1059. t_key node_prefix, key_prefix, pref_mismatch;
  1060. int mp;
  1061. rcu_read_lock();
  1062. n = rcu_dereference(t->trie);
  1063. if (!n)
  1064. goto failed;
  1065. #ifdef CONFIG_IP_FIB_TRIE_STATS
  1066. t->stats.gets++;
  1067. #endif
  1068. /* Just a leaf? */
  1069. if (IS_LEAF(n)) {
  1070. if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
  1071. goto found;
  1072. goto failed;
  1073. }
  1074. pn = (struct tnode *) n;
  1075. chopped_off = 0;
  1076. while (pn) {
  1077. pos = pn->pos;
  1078. bits = pn->bits;
  1079. if (!chopped_off)
  1080. cindex = tkey_extract_bits(MASK_PFX(key, current_prefix_length), pos, bits);
  1081. n = tnode_get_child(pn, cindex);
  1082. if (n == NULL) {
  1083. #ifdef CONFIG_IP_FIB_TRIE_STATS
  1084. t->stats.null_node_hit++;
  1085. #endif
  1086. goto backtrace;
  1087. }
  1088. if (IS_LEAF(n)) {
  1089. if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
  1090. goto found;
  1091. else
  1092. goto backtrace;
  1093. }
  1094. #define HL_OPTIMIZE
  1095. #ifdef HL_OPTIMIZE
  1096. cn = (struct tnode *)n;
  1097. /*
  1098. * It's a tnode, and we can do some extra checks here if we
  1099. * like, to avoid descending into a dead-end branch.
  1100. * This tnode is in the parent's child array at index
  1101. * key[p_pos..p_pos+p_bits] but potentially with some bits
  1102. * chopped off, so in reality the index may be just a
  1103. * subprefix, padded with zero at the end.
  1104. * We can also take a look at any skipped bits in this
  1105. * tnode - everything up to p_pos is supposed to be ok,
  1106. * and the non-chopped bits of the index (se previous
  1107. * paragraph) are also guaranteed ok, but the rest is
  1108. * considered unknown.
  1109. *
  1110. * The skipped bits are key[pos+bits..cn->pos].
  1111. */
  1112. /* If current_prefix_length < pos+bits, we are already doing
  1113. * actual prefix matching, which means everything from
  1114. * pos+(bits-chopped_off) onward must be zero along some
  1115. * branch of this subtree - otherwise there is *no* valid
  1116. * prefix present. Here we can only check the skipped
  1117. * bits. Remember, since we have already indexed into the
  1118. * parent's child array, we know that the bits we chopped of
  1119. * *are* zero.
  1120. */
  1121. /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
  1122. if (current_prefix_length < pos+bits) {
  1123. if (tkey_extract_bits(cn->key, current_prefix_length,
  1124. cn->pos - current_prefix_length) != 0 ||
  1125. !(cn->child[0]))
  1126. goto backtrace;
  1127. }
  1128. /*
  1129. * If chopped_off=0, the index is fully validated and we
  1130. * only need to look at the skipped bits for this, the new,
  1131. * tnode. What we actually want to do is to find out if
  1132. * these skipped bits match our key perfectly, or if we will
  1133. * have to count on finding a matching prefix further down,
  1134. * because if we do, we would like to have some way of
  1135. * verifying the existence of such a prefix at this point.
  1136. */
  1137. /* The only thing we can do at this point is to verify that
  1138. * any such matching prefix can indeed be a prefix to our
  1139. * key, and if the bits in the node we are inspecting that
  1140. * do not match our key are not ZERO, this cannot be true.
  1141. * Thus, find out where there is a mismatch (before cn->pos)
  1142. * and verify that all the mismatching bits are zero in the
  1143. * new tnode's key.
  1144. */
  1145. /* Note: We aren't very concerned about the piece of the key
  1146. * that precede pn->pos+pn->bits, since these have already been
  1147. * checked. The bits after cn->pos aren't checked since these are
  1148. * by definition "unknown" at this point. Thus, what we want to
  1149. * see is if we are about to enter the "prefix matching" state,
  1150. * and in that case verify that the skipped bits that will prevail
  1151. * throughout this subtree are zero, as they have to be if we are
  1152. * to find a matching prefix.
  1153. */
  1154. node_prefix = MASK_PFX(cn->key, cn->pos);
  1155. key_prefix = MASK_PFX(key, cn->pos);
  1156. pref_mismatch = key_prefix^node_prefix;
  1157. mp = 0;
  1158. /* In short: If skipped bits in this node do not match the search
  1159. * key, enter the "prefix matching" state.directly.
  1160. */
  1161. if (pref_mismatch) {
  1162. while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
  1163. mp++;
  1164. pref_mismatch = pref_mismatch <<1;
  1165. }
  1166. key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
  1167. if (key_prefix != 0)
  1168. goto backtrace;
  1169. if (current_prefix_length >= cn->pos)
  1170. current_prefix_length = mp;
  1171. }
  1172. #endif
  1173. pn = (struct tnode *)n; /* Descend */
  1174. chopped_off = 0;
  1175. continue;
  1176. backtrace:
  1177. chopped_off++;
  1178. /* As zero don't change the child key (cindex) */
  1179. while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
  1180. chopped_off++;
  1181. /* Decrease current_... with bits chopped off */
  1182. if (current_prefix_length > pn->pos + pn->bits - chopped_off)
  1183. current_prefix_length = pn->pos + pn->bits - chopped_off;
  1184. /*
  1185. * Either we do the actual chop off according or if we have
  1186. * chopped off all bits in this tnode walk up to our parent.
  1187. */
  1188. if (chopped_off <= pn->bits) {
  1189. cindex &= ~(1 << (chopped_off-1));
  1190. } else {
  1191. if (NODE_PARENT(pn) == NULL)
  1192. goto failed;
  1193. /* Get Child's index */
  1194. cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits);
  1195. pn = NODE_PARENT(pn);
  1196. chopped_off = 0;
  1197. #ifdef CONFIG_IP_FIB_TRIE_STATS
  1198. t->stats.backtrack++;
  1199. #endif
  1200. goto backtrace;
  1201. }
  1202. }
  1203. failed:
  1204. ret = 1;
  1205. found:
  1206. rcu_read_unlock();
  1207. return ret;
  1208. }
  1209. /* only called from updater side */
  1210. static int trie_leaf_remove(struct trie *t, t_key key)
  1211. {
  1212. t_key cindex;
  1213. struct tnode *tp = NULL;
  1214. struct node *n = t->trie;
  1215. struct leaf *l;
  1216. pr_debug("entering trie_leaf_remove(%p)\n", n);
  1217. /* Note that in the case skipped bits, those bits are *not* checked!
  1218. * When we finish this, we will have NULL or a T_LEAF, and the
  1219. * T_LEAF may or may not match our key.
  1220. */
  1221. while (n != NULL && IS_TNODE(n)) {
  1222. struct tnode *tn = (struct tnode *) n;
  1223. check_tnode(tn);
  1224. n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
  1225. BUG_ON(n && NODE_PARENT(n) != tn);
  1226. }
  1227. l = (struct leaf *) n;
  1228. if (!n || !tkey_equals(l->key, key))
  1229. return 0;
  1230. /*
  1231. * Key found.
  1232. * Remove the leaf and rebalance the tree
  1233. */
  1234. t->revision++;
  1235. t->size--;
  1236. preempt_disable();
  1237. tp = NODE_PARENT(n);
  1238. tnode_free((struct tnode *) n);
  1239. if (tp) {
  1240. cindex = tkey_extract_bits(key, tp->pos, tp->bits);
  1241. put_child(t, (struct tnode *)tp, cindex, NULL);
  1242. rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
  1243. } else
  1244. rcu_assign_pointer(t->trie, NULL);
  1245. preempt_enable();
  1246. return 1;
  1247. }
  1248. static int
  1249. fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
  1250. struct nlmsghdr *nlhdr, struct netlink_skb_parms *req)
  1251. {
  1252. struct trie *t = (struct trie *) tb->tb_data;
  1253. u32 key, mask;
  1254. int plen = r->rtm_dst_len;
  1255. u8 tos = r->rtm_tos;
  1256. struct fib_alias *fa, *fa_to_delete;
  1257. struct list_head *fa_head;
  1258. struct leaf *l;
  1259. struct leaf_info *li;
  1260. if (plen > 32)
  1261. return -EINVAL;
  1262. key = 0;
  1263. if (rta->rta_dst)
  1264. memcpy(&key, rta->rta_dst, 4);
  1265. key = ntohl(key);
  1266. mask = ntohl(inet_make_mask(plen));
  1267. if (key & ~mask)
  1268. return -EINVAL;
  1269. key = key & mask;
  1270. l = fib_find_node(t, key);
  1271. if (!l)
  1272. return -ESRCH;
  1273. fa_head = get_fa_head(l, plen);
  1274. fa = fib_find_alias(fa_head, tos, 0);
  1275. if (!fa)
  1276. return -ESRCH;
  1277. pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
  1278. fa_to_delete = NULL;
  1279. fa_head = fa->fa_list.prev;
  1280. list_for_each_entry(fa, fa_head, fa_list) {
  1281. struct fib_info *fi = fa->fa_info;
  1282. if (fa->fa_tos != tos)
  1283. break;
  1284. if ((!r->rtm_type ||
  1285. fa->fa_type == r->rtm_type) &&
  1286. (r->rtm_scope == RT_SCOPE_NOWHERE ||
  1287. fa->fa_scope == r->rtm_scope) &&
  1288. (!r->rtm_protocol ||
  1289. fi->fib_protocol == r->rtm_protocol) &&
  1290. fib_nh_match(r, nlhdr, rta, fi) == 0) {
  1291. fa_to_delete = fa;
  1292. break;
  1293. }
  1294. }
  1295. if (!fa_to_delete)
  1296. return -ESRCH;
  1297. fa = fa_to_delete;
  1298. rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req);
  1299. l = fib_find_node(t, key);
  1300. li = find_leaf_info(&l->list, plen);
  1301. list_del_rcu(&fa->fa_list);
  1302. if (list_empty(fa_head)) {
  1303. hlist_del_rcu(&li->hlist);
  1304. free_leaf_info(li);
  1305. }
  1306. if (hlist_empty(&l->list))
  1307. trie_leaf_remove(t, key);
  1308. if (fa->fa_state & FA_S_ACCESSED)
  1309. rt_cache_flush(-1);
  1310. fib_release_info(fa->fa_info);
  1311. alias_free_mem_rcu(fa);
  1312. return 0;
  1313. }
  1314. static int trie_flush_list(struct trie *t, struct list_head *head)
  1315. {
  1316. struct fib_alias *fa, *fa_node;
  1317. int found = 0;
  1318. list_for_each_entry_safe(fa, fa_node, head, fa_list) {
  1319. struct fib_info *fi = fa->fa_info;
  1320. if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
  1321. list_del_rcu(&fa->fa_list);
  1322. fib_release_info(fa->fa_info);
  1323. alias_free_mem_rcu(fa);
  1324. found++;
  1325. }
  1326. }
  1327. return found;
  1328. }
  1329. static int trie_flush_leaf(struct trie *t, struct leaf *l)
  1330. {
  1331. int found = 0;
  1332. struct hlist_head *lih = &l->list;
  1333. struct hlist_node *node, *tmp;
  1334. struct leaf_info *li = NULL;
  1335. hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
  1336. found += trie_flush_list(t, &li->falh);
  1337. if (list_empty(&li->falh)) {
  1338. hlist_del_rcu(&li->hlist);
  1339. free_leaf_info(li);
  1340. }
  1341. }
  1342. return found;
  1343. }
  1344. /* rcu_read_lock needs to be hold by caller from readside */
  1345. static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
  1346. {
  1347. struct node *c = (struct node *) thisleaf;
  1348. struct tnode *p;
  1349. int idx;
  1350. struct node *trie = rcu_dereference(t->trie);
  1351. if (c == NULL) {
  1352. if (trie == NULL)
  1353. return NULL;
  1354. if (IS_LEAF(trie)) /* trie w. just a leaf */
  1355. return (struct leaf *) trie;
  1356. p = (struct tnode*) trie; /* Start */
  1357. } else
  1358. p = (struct tnode *) NODE_PARENT(c);
  1359. while (p) {
  1360. int pos, last;
  1361. /* Find the next child of the parent */
  1362. if (c)
  1363. pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
  1364. else
  1365. pos = 0;
  1366. last = 1 << p->bits;
  1367. for (idx = pos; idx < last ; idx++) {
  1368. c = rcu_dereference(p->child[idx]);
  1369. if (!c)
  1370. continue;
  1371. /* Decend if tnode */
  1372. while (IS_TNODE(c)) {
  1373. p = (struct tnode *) c;
  1374. idx = 0;
  1375. /* Rightmost non-NULL branch */
  1376. if (p && IS_TNODE(p))
  1377. while (!(c = rcu_dereference(p->child[idx]))
  1378. && idx < (1<<p->bits)) idx++;
  1379. /* Done with this tnode? */
  1380. if (idx >= (1 << p->bits) || !c)
  1381. goto up;
  1382. }
  1383. return (struct leaf *) c;
  1384. }
  1385. up:
  1386. /* No more children go up one step */
  1387. c = (struct node *) p;
  1388. p = (struct tnode *) NODE_PARENT(p);
  1389. }
  1390. return NULL; /* Ready. Root of trie */
  1391. }
  1392. static int fn_trie_flush(struct fib_table *tb)
  1393. {
  1394. struct trie *t = (struct trie *) tb->tb_data;
  1395. struct leaf *ll = NULL, *l = NULL;
  1396. int found = 0, h;
  1397. t->revision++;
  1398. rcu_read_lock();
  1399. for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
  1400. found += trie_flush_leaf(t, l);
  1401. if (ll && hlist_empty(&ll->list))
  1402. trie_leaf_remove(t, ll->key);
  1403. ll = l;
  1404. }
  1405. rcu_read_unlock();
  1406. if (ll && hlist_empty(&ll->list))
  1407. trie_leaf_remove(t, ll->key);
  1408. pr_debug("trie_flush found=%d\n", found);
  1409. return found;
  1410. }
  1411. static int trie_last_dflt = -1;
  1412. static void
  1413. fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
  1414. {
  1415. struct trie *t = (struct trie *) tb->tb_data;
  1416. int order, last_idx;
  1417. struct fib_info *fi = NULL;
  1418. struct fib_info *last_resort;
  1419. struct fib_alias *fa = NULL;
  1420. struct list_head *fa_head;
  1421. struct leaf *l;
  1422. last_idx = -1;
  1423. last_resort = NULL;
  1424. order = -1;
  1425. rcu_read_lock();
  1426. l = fib_find_node(t, 0);
  1427. if (!l)
  1428. goto out;
  1429. fa_head = get_fa_head(l, 0);
  1430. if (!fa_head)
  1431. goto out;
  1432. if (list_empty(fa_head))
  1433. goto out;
  1434. list_for_each_entry_rcu(fa, fa_head, fa_list) {
  1435. struct fib_info *next_fi = fa->fa_info;
  1436. if (fa->fa_scope != res->scope ||
  1437. fa->fa_type != RTN_UNICAST)
  1438. continue;
  1439. if (next_fi->fib_priority > res->fi->fib_priority)
  1440. break;
  1441. if (!next_fi->fib_nh[0].nh_gw ||
  1442. next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
  1443. continue;
  1444. fa->fa_state |= FA_S_ACCESSED;
  1445. if (fi == NULL) {
  1446. if (next_fi != res->fi)
  1447. break;
  1448. } else if (!fib_detect_death(fi, order, &last_resort,
  1449. &last_idx, &trie_last_dflt)) {
  1450. if (res->fi)
  1451. fib_info_put(res->fi);
  1452. res->fi = fi;
  1453. atomic_inc(&fi->fib_clntref);
  1454. trie_last_dflt = order;
  1455. goto out;
  1456. }
  1457. fi = next_fi;
  1458. order++;
  1459. }
  1460. if (order <= 0 || fi == NULL) {
  1461. trie_last_dflt = -1;
  1462. goto out;
  1463. }
  1464. if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) {
  1465. if (res->fi)
  1466. fib_info_put(res->fi);
  1467. res->fi = fi;
  1468. atomic_inc(&fi->fib_clntref);
  1469. trie_last_dflt = order;
  1470. goto out;
  1471. }
  1472. if (last_idx >= 0) {
  1473. if (res->fi)
  1474. fib_info_put(res->fi);
  1475. res->fi = last_resort;
  1476. if (last_resort)
  1477. atomic_inc(&last_resort->fib_clntref);
  1478. }
  1479. trie_last_dflt = last_idx;
  1480. out:;
  1481. rcu_read_unlock();
  1482. }
  1483. static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
  1484. struct sk_buff *skb, struct netlink_callback *cb)
  1485. {
  1486. int i, s_i;
  1487. struct fib_alias *fa;
  1488. u32 xkey = htonl(key);
  1489. s_i = cb->args[3];
  1490. i = 0;
  1491. /* rcu_read_lock is hold by caller */
  1492. list_for_each_entry_rcu(fa, fah, fa_list) {
  1493. if (i < s_i) {
  1494. i++;
  1495. continue;
  1496. }
  1497. if (fa->fa_info->fib_nh == NULL) {
  1498. printk("Trie error _fib_nh=NULL in fa[%d] k=%08x plen=%d\n", i, key, plen);
  1499. i++;
  1500. continue;
  1501. }
  1502. if (fa->fa_info == NULL) {
  1503. printk("Trie error fa_info=NULL in fa[%d] k=%08x plen=%d\n", i, key, plen);
  1504. i++;
  1505. continue;
  1506. }
  1507. if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
  1508. cb->nlh->nlmsg_seq,
  1509. RTM_NEWROUTE,
  1510. tb->tb_id,
  1511. fa->fa_type,
  1512. fa->fa_scope,
  1513. &xkey,
  1514. plen,
  1515. fa->fa_tos,
  1516. fa->fa_info, 0) < 0) {
  1517. cb->args[3] = i;
  1518. return -1;
  1519. }
  1520. i++;
  1521. }
  1522. cb->args[3] = i;
  1523. return skb->len;
  1524. }
  1525. static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
  1526. struct netlink_callback *cb)
  1527. {
  1528. int h, s_h;
  1529. struct list_head *fa_head;
  1530. struct leaf *l = NULL;
  1531. s_h = cb->args[2];
  1532. for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
  1533. if (h < s_h)
  1534. continue;
  1535. if (h > s_h)
  1536. memset(&cb->args[3], 0,
  1537. sizeof(cb->args) - 3*sizeof(cb->args[0]));
  1538. fa_head = get_fa_head(l, plen);
  1539. if (!fa_head)
  1540. continue;
  1541. if (list_empty(fa_head))
  1542. continue;
  1543. if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
  1544. cb->args[2] = h;
  1545. return -1;
  1546. }
  1547. }
  1548. cb->args[2] = h;
  1549. return skb->len;
  1550. }
  1551. static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
  1552. {
  1553. int m, s_m;
  1554. struct trie *t = (struct trie *) tb->tb_data;
  1555. s_m = cb->args[1];
  1556. rcu_read_lock();
  1557. for (m = 0; m <= 32; m++) {
  1558. if (m < s_m)
  1559. continue;
  1560. if (m > s_m)
  1561. memset(&cb->args[2], 0,
  1562. sizeof(cb->args) - 2*sizeof(cb->args[0]));
  1563. if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
  1564. cb->args[1] = m;
  1565. goto out;
  1566. }
  1567. }
  1568. rcu_read_unlock();
  1569. cb->args[1] = m;
  1570. return skb->len;
  1571. out:
  1572. rcu_read_unlock();
  1573. return -1;
  1574. }
  1575. /* Fix more generic FIB names for init later */
  1576. #ifdef CONFIG_IP_MULTIPLE_TABLES
  1577. struct fib_table * fib_hash_init(int id)
  1578. #else
  1579. struct fib_table * __init fib_hash_init(int id)
  1580. #endif
  1581. {
  1582. struct fib_table *tb;
  1583. struct trie *t;
  1584. if (fn_alias_kmem == NULL)
  1585. fn_alias_kmem = kmem_cache_create("ip_fib_alias",
  1586. sizeof(struct fib_alias),
  1587. 0, SLAB_HWCACHE_ALIGN,
  1588. NULL, NULL);
  1589. tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
  1590. GFP_KERNEL);
  1591. if (tb == NULL)
  1592. return NULL;
  1593. tb->tb_id = id;
  1594. tb->tb_lookup = fn_trie_lookup;
  1595. tb->tb_insert = fn_trie_insert;
  1596. tb->tb_delete = fn_trie_delete;
  1597. tb->tb_flush = fn_trie_flush;
  1598. tb->tb_select_default = fn_trie_select_default;
  1599. tb->tb_dump = fn_trie_dump;
  1600. memset(tb->tb_data, 0, sizeof(struct trie));
  1601. t = (struct trie *) tb->tb_data;
  1602. trie_init(t);
  1603. if (id == RT_TABLE_LOCAL)
  1604. trie_local = t;
  1605. else if (id == RT_TABLE_MAIN)
  1606. trie_main = t;
  1607. if (id == RT_TABLE_LOCAL)
  1608. printk("IPv4 FIB: Using LC-trie version %s\n", VERSION);
  1609. return tb;
  1610. }
  1611. /* Trie dump functions */
  1612. static void putspace_seq(struct seq_file *seq, int n)
  1613. {
  1614. while (n--)
  1615. seq_printf(seq, " ");
  1616. }
  1617. static void printbin_seq(struct seq_file *seq, unsigned int v, int bits)
  1618. {
  1619. while (bits--)
  1620. seq_printf(seq, "%s", (v & (1<<bits))?"1":"0");
  1621. }
  1622. static void printnode_seq(struct seq_file *seq, int indent, struct node *n,
  1623. int pend, int cindex, int bits)
  1624. {
  1625. putspace_seq(seq, indent);
  1626. if (IS_LEAF(n))
  1627. seq_printf(seq, "|");
  1628. else
  1629. seq_printf(seq, "+");
  1630. if (bits) {
  1631. seq_printf(seq, "%d/", cindex);
  1632. printbin_seq(seq, cindex, bits);
  1633. seq_printf(seq, ": ");
  1634. } else
  1635. seq_printf(seq, "<root>: ");
  1636. seq_printf(seq, "%s:%p ", IS_LEAF(n)?"Leaf":"Internal node", n);
  1637. if (IS_LEAF(n)) {
  1638. struct leaf *l = (struct leaf *)n;
  1639. struct fib_alias *fa;
  1640. int i;
  1641. seq_printf(seq, "key=%d.%d.%d.%d\n",
  1642. n->key >> 24, (n->key >> 16) % 256, (n->key >> 8) % 256, n->key % 256);
  1643. for (i = 32; i >= 0; i--)
  1644. if (find_leaf_info(&l->list, i)) {
  1645. struct list_head *fa_head = get_fa_head(l, i);
  1646. if (!fa_head)
  1647. continue;
  1648. if (list_empty(fa_head))
  1649. continue;
  1650. putspace_seq(seq, indent+2);
  1651. seq_printf(seq, "{/%d...dumping}\n", i);
  1652. list_for_each_entry_rcu(fa, fa_head, fa_list) {
  1653. putspace_seq(seq, indent+2);
  1654. if (fa->fa_info == NULL) {
  1655. seq_printf(seq, "Error fa_info=NULL\n");
  1656. continue;
  1657. }
  1658. if (fa->fa_info->fib_nh == NULL) {
  1659. seq_printf(seq, "Error _fib_nh=NULL\n");
  1660. continue;
  1661. }
  1662. seq_printf(seq, "{type=%d scope=%d TOS=%d}\n",
  1663. fa->fa_type,
  1664. fa->fa_scope,
  1665. fa->fa_tos);
  1666. }
  1667. }
  1668. } else {
  1669. struct tnode *tn = (struct tnode *)n;
  1670. int plen = ((struct tnode *)n)->pos;
  1671. t_key prf = MASK_PFX(n->key, plen);
  1672. seq_printf(seq, "key=%d.%d.%d.%d/%d\n",
  1673. prf >> 24, (prf >> 16) % 256, (prf >> 8) % 256, prf % 256, plen);
  1674. putspace_seq(seq, indent); seq_printf(seq, "| ");
  1675. seq_printf(seq, "{key prefix=%08x/", tn->key & TKEY_GET_MASK(0, tn->pos));
  1676. printbin_seq(seq, tkey_extract_bits(tn->key, 0, tn->pos), tn->pos);
  1677. seq_printf(seq, "}\n");
  1678. putspace_seq(seq, indent); seq_printf(seq, "| ");
  1679. seq_printf(seq, "{pos=%d", tn->pos);
  1680. seq_printf(seq, " (skip=%d bits)", tn->pos - pend);
  1681. seq_printf(seq, " bits=%d (%u children)}\n", tn->bits, (1 << tn->bits));
  1682. putspace_seq(seq, indent); seq_printf(seq, "| ");
  1683. seq_printf(seq, "{empty=%d full=%d}\n", tn->empty_children, tn->full_children);
  1684. }
  1685. }
  1686. static void trie_dump_seq(struct seq_file *seq, struct trie *t)
  1687. {
  1688. struct node *n;
  1689. int cindex = 0;
  1690. int indent = 1;
  1691. int pend = 0;
  1692. int depth = 0;
  1693. struct tnode *tn;
  1694. rcu_read_lock();
  1695. n = rcu_dereference(t->trie);
  1696. seq_printf(seq, "------ trie_dump of t=%p ------\n", t);
  1697. if (!n) {
  1698. seq_printf(seq, "------ trie is empty\n");
  1699. rcu_read_unlock();
  1700. return;
  1701. }
  1702. printnode_seq(seq, indent, n, pend, cindex, 0);
  1703. if (!IS_TNODE(n)) {
  1704. rcu_read_unlock();
  1705. return;
  1706. }
  1707. tn = (struct tnode *)n;
  1708. pend = tn->pos+tn->bits;
  1709. putspace_seq(seq, indent); seq_printf(seq, "\\--\n");
  1710. indent += 3;
  1711. depth++;
  1712. while (tn && cindex < (1 << tn->bits)) {
  1713. struct node *child = rcu_dereference(tn->child[cindex]);
  1714. if (!child)
  1715. cindex++;
  1716. else {
  1717. /* Got a child */
  1718. printnode_seq(seq, indent, child, pend,
  1719. cindex, tn->bits);
  1720. if (IS_LEAF(child))
  1721. cindex++;
  1722. else {
  1723. /*
  1724. * New tnode. Decend one level
  1725. */
  1726. depth++;
  1727. n = child;
  1728. tn = (struct tnode *)n;
  1729. pend = tn->pos+tn->bits;
  1730. putspace_seq(seq, indent);
  1731. seq_printf(seq, "\\--\n");
  1732. indent += 3;
  1733. cindex = 0;
  1734. }
  1735. }
  1736. /*
  1737. * Test if we are done
  1738. */
  1739. while (cindex >= (1 << tn->bits)) {
  1740. /*
  1741. * Move upwards and test for root
  1742. * pop off all traversed nodes
  1743. */
  1744. if (NODE_PARENT(tn) == NULL) {
  1745. tn = NULL;
  1746. break;
  1747. }
  1748. cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits);
  1749. cindex++;
  1750. tn = NODE_PARENT(tn);
  1751. pend = tn->pos + tn->bits;
  1752. indent -= 3;
  1753. depth--;
  1754. }
  1755. }
  1756. rcu_read_unlock();
  1757. }
  1758. static struct trie_stat *trie_stat_new(void)
  1759. {
  1760. struct trie_stat *s;
  1761. int i;
  1762. s = kmalloc(sizeof(struct trie_stat), GFP_KERNEL);
  1763. if (!s)
  1764. return NULL;
  1765. s->totdepth = 0;
  1766. s->maxdepth = 0;
  1767. s->tnodes = 0;
  1768. s->leaves = 0;
  1769. s->nullpointers = 0;
  1770. for (i = 0; i < MAX_CHILDS; i++)
  1771. s->nodesizes[i] = 0;
  1772. return s;
  1773. }
  1774. static struct trie_stat *trie_collect_stats(struct trie *t)
  1775. {
  1776. struct node *n;
  1777. struct trie_stat *s = trie_stat_new();
  1778. int cindex = 0;
  1779. int pend = 0;
  1780. int depth = 0;
  1781. if (!s)
  1782. return NULL;
  1783. rcu_read_lock();
  1784. n = rcu_dereference(t->trie);
  1785. if (!n)
  1786. return s;
  1787. if (IS_TNODE(n)) {
  1788. struct tnode *tn = (struct tnode *)n;
  1789. pend = tn->pos+tn->bits;
  1790. s->nodesizes[tn->bits]++;
  1791. depth++;
  1792. while (tn && cindex < (1 << tn->bits)) {
  1793. struct node *ch = rcu_dereference(tn->child[cindex]);
  1794. if (ch) {
  1795. /* Got a child */
  1796. if (IS_LEAF(tn->child[cindex])) {
  1797. cindex++;
  1798. /* stats */
  1799. if (depth > s->maxdepth)
  1800. s->maxdepth = depth;
  1801. s->totdepth += depth;
  1802. s->leaves++;
  1803. } else {
  1804. /*
  1805. * New tnode. Decend one level
  1806. */
  1807. s->tnodes++;
  1808. s->nodesizes[tn->bits]++;
  1809. depth++;
  1810. n = ch;
  1811. tn = (struct tnode *)n;
  1812. pend = tn->pos+tn->bits;
  1813. cindex = 0;
  1814. }
  1815. } else {
  1816. cindex++;
  1817. s->nullpointers++;
  1818. }
  1819. /*
  1820. * Test if we are done
  1821. */
  1822. while (cindex >= (1 << tn->bits)) {
  1823. /*
  1824. * Move upwards and test for root
  1825. * pop off all traversed nodes
  1826. */
  1827. if (NODE_PARENT(tn) == NULL) {
  1828. tn = NULL;
  1829. n = NULL;
  1830. break;
  1831. }
  1832. cindex = tkey_extract_bits(tn->key, NODE_PARENT(tn)->pos, NODE_PARENT(tn)->bits);
  1833. tn = NODE_PARENT(tn);
  1834. cindex++;
  1835. n = (struct node *)tn;
  1836. pend = tn->pos+tn->bits;
  1837. depth--;
  1838. }
  1839. }
  1840. }
  1841. rcu_read_unlock();
  1842. return s;
  1843. }
  1844. #ifdef CONFIG_PROC_FS
  1845. static struct fib_alias *fib_triestat_get_first(struct seq_file *seq)
  1846. {
  1847. return NULL;
  1848. }
  1849. static struct fib_alias *fib_triestat_get_next(struct seq_file *seq)
  1850. {
  1851. return NULL;
  1852. }
  1853. static void *fib_triestat_seq_start(struct seq_file *seq, loff_t *pos)
  1854. {
  1855. if (!ip_fib_main_table)
  1856. return NULL;
  1857. if (*pos)
  1858. return fib_triestat_get_next(seq);
  1859. else
  1860. return SEQ_START_TOKEN;
  1861. }
  1862. static void *fib_triestat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1863. {
  1864. ++*pos;
  1865. if (v == SEQ_START_TOKEN)
  1866. return fib_triestat_get_first(seq);
  1867. else
  1868. return fib_triestat_get_next(seq);
  1869. }
  1870. static void fib_triestat_seq_stop(struct seq_file *seq, void *v)
  1871. {
  1872. }
  1873. /*
  1874. * This outputs /proc/net/fib_triestats
  1875. *
  1876. * It always works in backward compatibility mode.
  1877. * The format of the file is not supposed to be changed.
  1878. */
  1879. static void collect_and_show(struct trie *t, struct seq_file *seq)
  1880. {
  1881. int bytes = 0; /* How many bytes are used, a ref is 4 bytes */
  1882. int i, max, pointers;
  1883. struct trie_stat *stat;
  1884. int avdepth;
  1885. stat = trie_collect_stats(t);
  1886. bytes = 0;
  1887. seq_printf(seq, "trie=%p\n", t);
  1888. if (stat) {
  1889. if (stat->leaves)
  1890. avdepth = stat->totdepth*100 / stat->leaves;
  1891. else
  1892. avdepth = 0;
  1893. seq_printf(seq, "Aver depth: %d.%02d\n", avdepth / 100, avdepth % 100);
  1894. seq_printf(seq, "Max depth: %4d\n", stat->maxdepth);
  1895. seq_printf(seq, "Leaves: %d\n", stat->leaves);
  1896. bytes += sizeof(struct leaf) * stat->leaves;
  1897. seq_printf(seq, "Internal nodes: %d\n", stat->tnodes);
  1898. bytes += sizeof(struct tnode) * stat->tnodes;
  1899. max = MAX_CHILDS-1;
  1900. while (max >= 0 && stat->nodesizes[max] == 0)
  1901. max--;
  1902. pointers = 0;
  1903. for (i = 1; i <= max; i++)
  1904. if (stat->nodesizes[i] != 0) {
  1905. seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
  1906. pointers += (1<<i) * stat->nodesizes[i];
  1907. }
  1908. seq_printf(seq, "\n");
  1909. seq_printf(seq, "Pointers: %d\n", pointers);
  1910. bytes += sizeof(struct node *) * pointers;
  1911. seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
  1912. seq_printf(seq, "Total size: %d kB\n", bytes / 1024);
  1913. kfree(stat);
  1914. }
  1915. #ifdef CONFIG_IP_FIB_TRIE_STATS
  1916. seq_printf(seq, "Counters:\n---------\n");
  1917. seq_printf(seq,"gets = %d\n", t->stats.gets);
  1918. seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
  1919. seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
  1920. seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
  1921. seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
  1922. seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
  1923. #ifdef CLEAR_STATS
  1924. memset(&(t->stats), 0, sizeof(t->stats));
  1925. #endif
  1926. #endif /* CONFIG_IP_FIB_TRIE_STATS */
  1927. }
  1928. static int fib_triestat_seq_show(struct seq_file *seq, void *v)
  1929. {
  1930. char bf[128];
  1931. if (v == SEQ_START_TOKEN) {
  1932. seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
  1933. sizeof(struct leaf), sizeof(struct tnode));
  1934. if (trie_local)
  1935. collect_and_show(trie_local, seq);
  1936. if (trie_main)
  1937. collect_and_show(trie_main, seq);
  1938. } else {
  1939. snprintf(bf, sizeof(bf), "*\t%08X\t%08X", 200, 400);
  1940. seq_printf(seq, "%-127s\n", bf);
  1941. }
  1942. return 0;
  1943. }
  1944. static struct seq_operations fib_triestat_seq_ops = {
  1945. .start = fib_triestat_seq_start,
  1946. .next = fib_triestat_seq_next,
  1947. .stop = fib_triestat_seq_stop,
  1948. .show = fib_triestat_seq_show,
  1949. };
  1950. static int fib_triestat_seq_open(struct inode *inode, struct file *file)
  1951. {
  1952. struct seq_file *seq;
  1953. int rc = -ENOMEM;
  1954. rc = seq_open(file, &fib_triestat_seq_ops);
  1955. if (rc)
  1956. goto out_kfree;
  1957. seq = file->private_data;
  1958. out:
  1959. return rc;
  1960. out_kfree:
  1961. goto out;
  1962. }
  1963. static struct file_operations fib_triestat_seq_fops = {
  1964. .owner = THIS_MODULE,
  1965. .open = fib_triestat_seq_open,
  1966. .read = seq_read,
  1967. .llseek = seq_lseek,
  1968. .release = seq_release_private,
  1969. };
  1970. int __init fib_stat_proc_init(void)
  1971. {
  1972. if (!proc_net_fops_create("fib_triestat", S_IRUGO, &fib_triestat_seq_fops))
  1973. return -ENOMEM;
  1974. return 0;
  1975. }
  1976. void __init fib_stat_proc_exit(void)
  1977. {
  1978. proc_net_remove("fib_triestat");
  1979. }
  1980. static struct fib_alias *fib_trie_get_first(struct seq_file *seq)
  1981. {
  1982. return NULL;
  1983. }
  1984. static struct fib_alias *fib_trie_get_next(struct seq_file *seq)
  1985. {
  1986. return NULL;
  1987. }
  1988. static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
  1989. {
  1990. if (!ip_fib_main_table)
  1991. return NULL;
  1992. if (*pos)
  1993. return fib_trie_get_next(seq);
  1994. else
  1995. return SEQ_START_TOKEN;
  1996. }
  1997. static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1998. {
  1999. ++*pos;
  2000. if (v == SEQ_START_TOKEN)
  2001. return fib_trie_get_first(seq);
  2002. else
  2003. return fib_trie_get_next(seq);
  2004. }
  2005. static void fib_trie_seq_stop(struct seq_file *seq, void *v)
  2006. {
  2007. }
  2008. /*
  2009. * This outputs /proc/net/fib_trie.
  2010. *
  2011. * It always works in backward compatibility mode.
  2012. * The format of the file is not supposed to be changed.
  2013. */
  2014. static int fib_trie_seq_show(struct seq_file *seq, void *v)
  2015. {
  2016. char bf[128];
  2017. if (v == SEQ_START_TOKEN) {
  2018. if (trie_local)
  2019. trie_dump_seq(seq, trie_local);
  2020. if (trie_main)
  2021. trie_dump_seq(seq, trie_main);
  2022. } else {
  2023. snprintf(bf, sizeof(bf),
  2024. "*\t%08X\t%08X", 200, 400);
  2025. seq_printf(seq, "%-127s\n", bf);
  2026. }
  2027. return 0;
  2028. }
  2029. static struct seq_operations fib_trie_seq_ops = {
  2030. .start = fib_trie_seq_start,
  2031. .next = fib_trie_seq_next,
  2032. .stop = fib_trie_seq_stop,
  2033. .show = fib_trie_seq_show,
  2034. };
  2035. static int fib_trie_seq_open(struct inode *inode, struct file *file)
  2036. {
  2037. struct seq_file *seq;
  2038. int rc = -ENOMEM;
  2039. rc = seq_open(file, &fib_trie_seq_ops);
  2040. if (rc)
  2041. goto out_kfree;
  2042. seq = file->private_data;
  2043. out:
  2044. return rc;
  2045. out_kfree:
  2046. goto out;
  2047. }
  2048. static struct file_operations fib_trie_seq_fops = {
  2049. .owner = THIS_MODULE,
  2050. .open = fib_trie_seq_open,
  2051. .read = seq_read,
  2052. .llseek = seq_lseek,
  2053. .release= seq_release_private,
  2054. };
  2055. int __init fib_proc_init(void)
  2056. {
  2057. if (!proc_net_fops_create("fib_trie", S_IRUGO, &fib_trie_seq_fops))
  2058. return -ENOMEM;
  2059. return 0;
  2060. }
  2061. void __init fib_proc_exit(void)
  2062. {
  2063. proc_net_remove("fib_trie");
  2064. }
  2065. #endif /* CONFIG_PROC_FS */