xfrm_policy.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/kmod.h>
  17. #include <linux/list.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/notifier.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/module.h>
  24. #include <linux/cache.h>
  25. #include <net/xfrm.h>
  26. #include <net/ip.h>
  27. #include "xfrm_hash.h"
  28. DEFINE_MUTEX(xfrm_cfg_mutex);
  29. EXPORT_SYMBOL(xfrm_cfg_mutex);
  30. static DEFINE_RWLOCK(xfrm_policy_lock);
  31. unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
  32. EXPORT_SYMBOL(xfrm_policy_count);
  33. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  34. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  35. static kmem_cache_t *xfrm_dst_cache __read_mostly;
  36. static struct work_struct xfrm_policy_gc_work;
  37. static HLIST_HEAD(xfrm_policy_gc_list);
  38. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  39. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  40. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  41. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
  42. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
  43. static inline int
  44. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  45. {
  46. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  47. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  48. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  49. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  50. (fl->proto == sel->proto || !sel->proto) &&
  51. (fl->oif == sel->ifindex || !sel->ifindex);
  52. }
  53. static inline int
  54. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  55. {
  56. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  57. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  58. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  59. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  60. (fl->proto == sel->proto || !sel->proto) &&
  61. (fl->oif == sel->ifindex || !sel->ifindex);
  62. }
  63. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  64. unsigned short family)
  65. {
  66. switch (family) {
  67. case AF_INET:
  68. return __xfrm4_selector_match(sel, fl);
  69. case AF_INET6:
  70. return __xfrm6_selector_match(sel, fl);
  71. }
  72. return 0;
  73. }
  74. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  75. {
  76. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  77. struct xfrm_type **typemap;
  78. int err = 0;
  79. if (unlikely(afinfo == NULL))
  80. return -EAFNOSUPPORT;
  81. typemap = afinfo->type_map;
  82. if (likely(typemap[type->proto] == NULL))
  83. typemap[type->proto] = type;
  84. else
  85. err = -EEXIST;
  86. xfrm_policy_unlock_afinfo(afinfo);
  87. return err;
  88. }
  89. EXPORT_SYMBOL(xfrm_register_type);
  90. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  91. {
  92. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  93. struct xfrm_type **typemap;
  94. int err = 0;
  95. if (unlikely(afinfo == NULL))
  96. return -EAFNOSUPPORT;
  97. typemap = afinfo->type_map;
  98. if (unlikely(typemap[type->proto] != type))
  99. err = -ENOENT;
  100. else
  101. typemap[type->proto] = NULL;
  102. xfrm_policy_unlock_afinfo(afinfo);
  103. return err;
  104. }
  105. EXPORT_SYMBOL(xfrm_unregister_type);
  106. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  107. {
  108. struct xfrm_policy_afinfo *afinfo;
  109. struct xfrm_type **typemap;
  110. struct xfrm_type *type;
  111. int modload_attempted = 0;
  112. retry:
  113. afinfo = xfrm_policy_get_afinfo(family);
  114. if (unlikely(afinfo == NULL))
  115. return NULL;
  116. typemap = afinfo->type_map;
  117. type = typemap[proto];
  118. if (unlikely(type && !try_module_get(type->owner)))
  119. type = NULL;
  120. if (!type && !modload_attempted) {
  121. xfrm_policy_put_afinfo(afinfo);
  122. request_module("xfrm-type-%d-%d",
  123. (int) family, (int) proto);
  124. modload_attempted = 1;
  125. goto retry;
  126. }
  127. xfrm_policy_put_afinfo(afinfo);
  128. return type;
  129. }
  130. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  131. unsigned short family)
  132. {
  133. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  134. int err = 0;
  135. if (unlikely(afinfo == NULL))
  136. return -EAFNOSUPPORT;
  137. if (likely(afinfo->dst_lookup != NULL))
  138. err = afinfo->dst_lookup(dst, fl);
  139. else
  140. err = -EINVAL;
  141. xfrm_policy_put_afinfo(afinfo);
  142. return err;
  143. }
  144. EXPORT_SYMBOL(xfrm_dst_lookup);
  145. void xfrm_put_type(struct xfrm_type *type)
  146. {
  147. module_put(type->owner);
  148. }
  149. int xfrm_register_mode(struct xfrm_mode *mode, int family)
  150. {
  151. struct xfrm_policy_afinfo *afinfo;
  152. struct xfrm_mode **modemap;
  153. int err;
  154. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  155. return -EINVAL;
  156. afinfo = xfrm_policy_lock_afinfo(family);
  157. if (unlikely(afinfo == NULL))
  158. return -EAFNOSUPPORT;
  159. err = -EEXIST;
  160. modemap = afinfo->mode_map;
  161. if (likely(modemap[mode->encap] == NULL)) {
  162. modemap[mode->encap] = mode;
  163. err = 0;
  164. }
  165. xfrm_policy_unlock_afinfo(afinfo);
  166. return err;
  167. }
  168. EXPORT_SYMBOL(xfrm_register_mode);
  169. int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
  170. {
  171. struct xfrm_policy_afinfo *afinfo;
  172. struct xfrm_mode **modemap;
  173. int err;
  174. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  175. return -EINVAL;
  176. afinfo = xfrm_policy_lock_afinfo(family);
  177. if (unlikely(afinfo == NULL))
  178. return -EAFNOSUPPORT;
  179. err = -ENOENT;
  180. modemap = afinfo->mode_map;
  181. if (likely(modemap[mode->encap] == mode)) {
  182. modemap[mode->encap] = NULL;
  183. err = 0;
  184. }
  185. xfrm_policy_unlock_afinfo(afinfo);
  186. return err;
  187. }
  188. EXPORT_SYMBOL(xfrm_unregister_mode);
  189. struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
  190. {
  191. struct xfrm_policy_afinfo *afinfo;
  192. struct xfrm_mode *mode;
  193. int modload_attempted = 0;
  194. if (unlikely(encap >= XFRM_MODE_MAX))
  195. return NULL;
  196. retry:
  197. afinfo = xfrm_policy_get_afinfo(family);
  198. if (unlikely(afinfo == NULL))
  199. return NULL;
  200. mode = afinfo->mode_map[encap];
  201. if (unlikely(mode && !try_module_get(mode->owner)))
  202. mode = NULL;
  203. if (!mode && !modload_attempted) {
  204. xfrm_policy_put_afinfo(afinfo);
  205. request_module("xfrm-mode-%d-%d", family, encap);
  206. modload_attempted = 1;
  207. goto retry;
  208. }
  209. xfrm_policy_put_afinfo(afinfo);
  210. return mode;
  211. }
  212. void xfrm_put_mode(struct xfrm_mode *mode)
  213. {
  214. module_put(mode->owner);
  215. }
  216. static inline unsigned long make_jiffies(long secs)
  217. {
  218. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  219. return MAX_SCHEDULE_TIMEOUT-1;
  220. else
  221. return secs*HZ;
  222. }
  223. static void xfrm_policy_timer(unsigned long data)
  224. {
  225. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  226. unsigned long now = (unsigned long)xtime.tv_sec;
  227. long next = LONG_MAX;
  228. int warn = 0;
  229. int dir;
  230. read_lock(&xp->lock);
  231. if (xp->dead)
  232. goto out;
  233. dir = xfrm_policy_id2dir(xp->index);
  234. if (xp->lft.hard_add_expires_seconds) {
  235. long tmo = xp->lft.hard_add_expires_seconds +
  236. xp->curlft.add_time - now;
  237. if (tmo <= 0)
  238. goto expired;
  239. if (tmo < next)
  240. next = tmo;
  241. }
  242. if (xp->lft.hard_use_expires_seconds) {
  243. long tmo = xp->lft.hard_use_expires_seconds +
  244. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  245. if (tmo <= 0)
  246. goto expired;
  247. if (tmo < next)
  248. next = tmo;
  249. }
  250. if (xp->lft.soft_add_expires_seconds) {
  251. long tmo = xp->lft.soft_add_expires_seconds +
  252. xp->curlft.add_time - now;
  253. if (tmo <= 0) {
  254. warn = 1;
  255. tmo = XFRM_KM_TIMEOUT;
  256. }
  257. if (tmo < next)
  258. next = tmo;
  259. }
  260. if (xp->lft.soft_use_expires_seconds) {
  261. long tmo = xp->lft.soft_use_expires_seconds +
  262. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  263. if (tmo <= 0) {
  264. warn = 1;
  265. tmo = XFRM_KM_TIMEOUT;
  266. }
  267. if (tmo < next)
  268. next = tmo;
  269. }
  270. if (warn)
  271. km_policy_expired(xp, dir, 0, 0);
  272. if (next != LONG_MAX &&
  273. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  274. xfrm_pol_hold(xp);
  275. out:
  276. read_unlock(&xp->lock);
  277. xfrm_pol_put(xp);
  278. return;
  279. expired:
  280. read_unlock(&xp->lock);
  281. if (!xfrm_policy_delete(xp, dir))
  282. km_policy_expired(xp, dir, 1, 0);
  283. xfrm_pol_put(xp);
  284. }
  285. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  286. * SPD calls.
  287. */
  288. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  289. {
  290. struct xfrm_policy *policy;
  291. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  292. if (policy) {
  293. INIT_HLIST_NODE(&policy->bydst);
  294. INIT_HLIST_NODE(&policy->byidx);
  295. rwlock_init(&policy->lock);
  296. atomic_set(&policy->refcnt, 1);
  297. init_timer(&policy->timer);
  298. policy->timer.data = (unsigned long)policy;
  299. policy->timer.function = xfrm_policy_timer;
  300. }
  301. return policy;
  302. }
  303. EXPORT_SYMBOL(xfrm_policy_alloc);
  304. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  305. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  306. {
  307. BUG_ON(!policy->dead);
  308. BUG_ON(policy->bundles);
  309. if (del_timer(&policy->timer))
  310. BUG();
  311. security_xfrm_policy_free(policy);
  312. kfree(policy);
  313. }
  314. EXPORT_SYMBOL(__xfrm_policy_destroy);
  315. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  316. {
  317. struct dst_entry *dst;
  318. while ((dst = policy->bundles) != NULL) {
  319. policy->bundles = dst->next;
  320. dst_free(dst);
  321. }
  322. if (del_timer(&policy->timer))
  323. atomic_dec(&policy->refcnt);
  324. if (atomic_read(&policy->refcnt) > 1)
  325. flow_cache_flush();
  326. xfrm_pol_put(policy);
  327. }
  328. static void xfrm_policy_gc_task(void *data)
  329. {
  330. struct xfrm_policy *policy;
  331. struct hlist_node *entry, *tmp;
  332. struct hlist_head gc_list;
  333. spin_lock_bh(&xfrm_policy_gc_lock);
  334. gc_list.first = xfrm_policy_gc_list.first;
  335. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  336. spin_unlock_bh(&xfrm_policy_gc_lock);
  337. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  338. xfrm_policy_gc_kill(policy);
  339. }
  340. /* Rule must be locked. Release descentant resources, announce
  341. * entry dead. The rule must be unlinked from lists to the moment.
  342. */
  343. static void xfrm_policy_kill(struct xfrm_policy *policy)
  344. {
  345. int dead;
  346. write_lock_bh(&policy->lock);
  347. dead = policy->dead;
  348. policy->dead = 1;
  349. write_unlock_bh(&policy->lock);
  350. if (unlikely(dead)) {
  351. WARN_ON(1);
  352. return;
  353. }
  354. spin_lock(&xfrm_policy_gc_lock);
  355. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  356. spin_unlock(&xfrm_policy_gc_lock);
  357. schedule_work(&xfrm_policy_gc_work);
  358. }
  359. struct xfrm_policy_hash {
  360. struct hlist_head *table;
  361. unsigned int hmask;
  362. };
  363. static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
  364. static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
  365. static struct hlist_head *xfrm_policy_byidx __read_mostly;
  366. static unsigned int xfrm_idx_hmask __read_mostly;
  367. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  368. static inline unsigned int idx_hash(u32 index)
  369. {
  370. return __idx_hash(index, xfrm_idx_hmask);
  371. }
  372. static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
  373. {
  374. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  375. unsigned int hash = __sel_hash(sel, family, hmask);
  376. return (hash == hmask + 1 ?
  377. &xfrm_policy_inexact[dir] :
  378. xfrm_policy_bydst[dir].table + hash);
  379. }
  380. static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  381. {
  382. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  383. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  384. return xfrm_policy_bydst[dir].table + hash;
  385. }
  386. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  387. struct hlist_head *ndsttable,
  388. unsigned int nhashmask)
  389. {
  390. struct hlist_node *entry, *tmp;
  391. struct xfrm_policy *pol;
  392. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  393. unsigned int h;
  394. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  395. pol->family, nhashmask);
  396. hlist_add_head(&pol->bydst, ndsttable+h);
  397. }
  398. }
  399. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  400. struct hlist_head *nidxtable,
  401. unsigned int nhashmask)
  402. {
  403. struct hlist_node *entry, *tmp;
  404. struct xfrm_policy *pol;
  405. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  406. unsigned int h;
  407. h = __idx_hash(pol->index, nhashmask);
  408. hlist_add_head(&pol->byidx, nidxtable+h);
  409. }
  410. }
  411. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  412. {
  413. return ((old_hmask + 1) << 1) - 1;
  414. }
  415. static void xfrm_bydst_resize(int dir)
  416. {
  417. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  418. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  419. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  420. struct hlist_head *odst = xfrm_policy_bydst[dir].table;
  421. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  422. int i;
  423. if (!ndst)
  424. return;
  425. write_lock_bh(&xfrm_policy_lock);
  426. for (i = hmask; i >= 0; i--)
  427. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  428. xfrm_policy_bydst[dir].table = ndst;
  429. xfrm_policy_bydst[dir].hmask = nhashmask;
  430. write_unlock_bh(&xfrm_policy_lock);
  431. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  432. }
  433. static void xfrm_byidx_resize(int total)
  434. {
  435. unsigned int hmask = xfrm_idx_hmask;
  436. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  437. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  438. struct hlist_head *oidx = xfrm_policy_byidx;
  439. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  440. int i;
  441. if (!nidx)
  442. return;
  443. write_lock_bh(&xfrm_policy_lock);
  444. for (i = hmask; i >= 0; i--)
  445. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  446. xfrm_policy_byidx = nidx;
  447. xfrm_idx_hmask = nhashmask;
  448. write_unlock_bh(&xfrm_policy_lock);
  449. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  450. }
  451. static inline int xfrm_bydst_should_resize(int dir, int *total)
  452. {
  453. unsigned int cnt = xfrm_policy_count[dir];
  454. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  455. if (total)
  456. *total += cnt;
  457. if ((hmask + 1) < xfrm_policy_hashmax &&
  458. cnt > hmask)
  459. return 1;
  460. return 0;
  461. }
  462. static inline int xfrm_byidx_should_resize(int total)
  463. {
  464. unsigned int hmask = xfrm_idx_hmask;
  465. if ((hmask + 1) < xfrm_policy_hashmax &&
  466. total > hmask)
  467. return 1;
  468. return 0;
  469. }
  470. static DEFINE_MUTEX(hash_resize_mutex);
  471. static void xfrm_hash_resize(void *__unused)
  472. {
  473. int dir, total;
  474. mutex_lock(&hash_resize_mutex);
  475. total = 0;
  476. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  477. if (xfrm_bydst_should_resize(dir, &total))
  478. xfrm_bydst_resize(dir);
  479. }
  480. if (xfrm_byidx_should_resize(total))
  481. xfrm_byidx_resize(total);
  482. mutex_unlock(&hash_resize_mutex);
  483. }
  484. static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
  485. /* Generate new index... KAME seems to generate them ordered by cost
  486. * of an absolute inpredictability of ordering of rules. This will not pass. */
  487. static u32 xfrm_gen_index(u8 type, int dir)
  488. {
  489. static u32 idx_generator;
  490. for (;;) {
  491. struct hlist_node *entry;
  492. struct hlist_head *list;
  493. struct xfrm_policy *p;
  494. u32 idx;
  495. int found;
  496. idx = (idx_generator | dir);
  497. idx_generator += 8;
  498. if (idx == 0)
  499. idx = 8;
  500. list = xfrm_policy_byidx + idx_hash(idx);
  501. found = 0;
  502. hlist_for_each_entry(p, entry, list, byidx) {
  503. if (p->index == idx) {
  504. found = 1;
  505. break;
  506. }
  507. }
  508. if (!found)
  509. return idx;
  510. }
  511. }
  512. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  513. {
  514. u32 *p1 = (u32 *) s1;
  515. u32 *p2 = (u32 *) s2;
  516. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  517. int i;
  518. for (i = 0; i < len; i++) {
  519. if (p1[i] != p2[i])
  520. return 1;
  521. }
  522. return 0;
  523. }
  524. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  525. {
  526. struct xfrm_policy *pol;
  527. struct xfrm_policy *delpol;
  528. struct hlist_head *chain;
  529. struct hlist_node *entry, *newpos, *last;
  530. struct dst_entry *gc_list;
  531. write_lock_bh(&xfrm_policy_lock);
  532. chain = policy_hash_bysel(&policy->selector, policy->family, dir);
  533. delpol = NULL;
  534. newpos = NULL;
  535. last = NULL;
  536. hlist_for_each_entry(pol, entry, chain, bydst) {
  537. if (!delpol &&
  538. pol->type == policy->type &&
  539. !selector_cmp(&pol->selector, &policy->selector) &&
  540. xfrm_sec_ctx_match(pol->security, policy->security)) {
  541. if (excl) {
  542. write_unlock_bh(&xfrm_policy_lock);
  543. return -EEXIST;
  544. }
  545. delpol = pol;
  546. if (policy->priority > pol->priority)
  547. continue;
  548. } else if (policy->priority >= pol->priority) {
  549. last = &pol->bydst;
  550. continue;
  551. }
  552. if (!newpos)
  553. newpos = &pol->bydst;
  554. if (delpol)
  555. break;
  556. last = &pol->bydst;
  557. }
  558. if (!newpos)
  559. newpos = last;
  560. if (newpos)
  561. hlist_add_after(newpos, &policy->bydst);
  562. else
  563. hlist_add_head(&policy->bydst, chain);
  564. xfrm_pol_hold(policy);
  565. xfrm_policy_count[dir]++;
  566. atomic_inc(&flow_cache_genid);
  567. if (delpol) {
  568. hlist_del(&delpol->bydst);
  569. hlist_del(&delpol->byidx);
  570. xfrm_policy_count[dir]--;
  571. }
  572. policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
  573. hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
  574. policy->curlft.add_time = (unsigned long)xtime.tv_sec;
  575. policy->curlft.use_time = 0;
  576. if (!mod_timer(&policy->timer, jiffies + HZ))
  577. xfrm_pol_hold(policy);
  578. write_unlock_bh(&xfrm_policy_lock);
  579. if (delpol)
  580. xfrm_policy_kill(delpol);
  581. else if (xfrm_bydst_should_resize(dir, NULL))
  582. schedule_work(&xfrm_hash_work);
  583. read_lock_bh(&xfrm_policy_lock);
  584. gc_list = NULL;
  585. entry = &policy->bydst;
  586. hlist_for_each_entry_continue(policy, entry, bydst) {
  587. struct dst_entry *dst;
  588. write_lock(&policy->lock);
  589. dst = policy->bundles;
  590. if (dst) {
  591. struct dst_entry *tail = dst;
  592. while (tail->next)
  593. tail = tail->next;
  594. tail->next = gc_list;
  595. gc_list = dst;
  596. policy->bundles = NULL;
  597. }
  598. write_unlock(&policy->lock);
  599. }
  600. read_unlock_bh(&xfrm_policy_lock);
  601. while (gc_list) {
  602. struct dst_entry *dst = gc_list;
  603. gc_list = dst->next;
  604. dst_free(dst);
  605. }
  606. return 0;
  607. }
  608. EXPORT_SYMBOL(xfrm_policy_insert);
  609. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  610. struct xfrm_selector *sel,
  611. struct xfrm_sec_ctx *ctx, int delete)
  612. {
  613. struct xfrm_policy *pol, *ret;
  614. struct hlist_head *chain;
  615. struct hlist_node *entry;
  616. write_lock_bh(&xfrm_policy_lock);
  617. chain = policy_hash_bysel(sel, sel->family, dir);
  618. ret = NULL;
  619. hlist_for_each_entry(pol, entry, chain, bydst) {
  620. if (pol->type == type &&
  621. !selector_cmp(sel, &pol->selector) &&
  622. xfrm_sec_ctx_match(ctx, pol->security)) {
  623. xfrm_pol_hold(pol);
  624. if (delete) {
  625. hlist_del(&pol->bydst);
  626. hlist_del(&pol->byidx);
  627. xfrm_policy_count[dir]--;
  628. }
  629. ret = pol;
  630. break;
  631. }
  632. }
  633. write_unlock_bh(&xfrm_policy_lock);
  634. if (ret && delete) {
  635. atomic_inc(&flow_cache_genid);
  636. xfrm_policy_kill(ret);
  637. }
  638. return ret;
  639. }
  640. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  641. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete)
  642. {
  643. struct xfrm_policy *pol, *ret;
  644. struct hlist_head *chain;
  645. struct hlist_node *entry;
  646. write_lock_bh(&xfrm_policy_lock);
  647. chain = xfrm_policy_byidx + idx_hash(id);
  648. ret = NULL;
  649. hlist_for_each_entry(pol, entry, chain, byidx) {
  650. if (pol->type == type && pol->index == id) {
  651. xfrm_pol_hold(pol);
  652. if (delete) {
  653. hlist_del(&pol->bydst);
  654. hlist_del(&pol->byidx);
  655. xfrm_policy_count[dir]--;
  656. }
  657. ret = pol;
  658. break;
  659. }
  660. }
  661. write_unlock_bh(&xfrm_policy_lock);
  662. if (ret && delete) {
  663. atomic_inc(&flow_cache_genid);
  664. xfrm_policy_kill(ret);
  665. }
  666. return ret;
  667. }
  668. EXPORT_SYMBOL(xfrm_policy_byid);
  669. void xfrm_policy_flush(u8 type)
  670. {
  671. int dir;
  672. write_lock_bh(&xfrm_policy_lock);
  673. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  674. struct xfrm_policy *pol;
  675. struct hlist_node *entry;
  676. int i, killed;
  677. killed = 0;
  678. again1:
  679. hlist_for_each_entry(pol, entry,
  680. &xfrm_policy_inexact[dir], bydst) {
  681. if (pol->type != type)
  682. continue;
  683. hlist_del(&pol->bydst);
  684. hlist_del(&pol->byidx);
  685. write_unlock_bh(&xfrm_policy_lock);
  686. xfrm_policy_kill(pol);
  687. killed++;
  688. write_lock_bh(&xfrm_policy_lock);
  689. goto again1;
  690. }
  691. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  692. again2:
  693. hlist_for_each_entry(pol, entry,
  694. xfrm_policy_bydst[dir].table + i,
  695. bydst) {
  696. if (pol->type != type)
  697. continue;
  698. hlist_del(&pol->bydst);
  699. hlist_del(&pol->byidx);
  700. write_unlock_bh(&xfrm_policy_lock);
  701. xfrm_policy_kill(pol);
  702. killed++;
  703. write_lock_bh(&xfrm_policy_lock);
  704. goto again2;
  705. }
  706. }
  707. xfrm_policy_count[dir] -= killed;
  708. }
  709. atomic_inc(&flow_cache_genid);
  710. write_unlock_bh(&xfrm_policy_lock);
  711. }
  712. EXPORT_SYMBOL(xfrm_policy_flush);
  713. int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
  714. void *data)
  715. {
  716. struct xfrm_policy *pol;
  717. struct hlist_node *entry;
  718. int dir, count, error;
  719. read_lock_bh(&xfrm_policy_lock);
  720. count = 0;
  721. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  722. struct hlist_head *table = xfrm_policy_bydst[dir].table;
  723. int i;
  724. hlist_for_each_entry(pol, entry,
  725. &xfrm_policy_inexact[dir], bydst) {
  726. if (pol->type == type)
  727. count++;
  728. }
  729. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  730. hlist_for_each_entry(pol, entry, table + i, bydst) {
  731. if (pol->type == type)
  732. count++;
  733. }
  734. }
  735. }
  736. if (count == 0) {
  737. error = -ENOENT;
  738. goto out;
  739. }
  740. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  741. struct hlist_head *table = xfrm_policy_bydst[dir].table;
  742. int i;
  743. hlist_for_each_entry(pol, entry,
  744. &xfrm_policy_inexact[dir], bydst) {
  745. if (pol->type != type)
  746. continue;
  747. error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
  748. if (error)
  749. goto out;
  750. }
  751. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  752. hlist_for_each_entry(pol, entry, table + i, bydst) {
  753. if (pol->type != type)
  754. continue;
  755. error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
  756. if (error)
  757. goto out;
  758. }
  759. }
  760. }
  761. error = 0;
  762. out:
  763. read_unlock_bh(&xfrm_policy_lock);
  764. return error;
  765. }
  766. EXPORT_SYMBOL(xfrm_policy_walk);
  767. /*
  768. * Find policy to apply to this flow.
  769. *
  770. * Returns 0 if policy found, else an -errno.
  771. */
  772. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  773. u8 type, u16 family, int dir)
  774. {
  775. struct xfrm_selector *sel = &pol->selector;
  776. int match, ret = -ESRCH;
  777. if (pol->family != family ||
  778. pol->type != type)
  779. return ret;
  780. match = xfrm_selector_match(sel, fl, family);
  781. if (match)
  782. ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
  783. return ret;
  784. }
  785. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  786. u16 family, u8 dir)
  787. {
  788. int err;
  789. struct xfrm_policy *pol, *ret;
  790. xfrm_address_t *daddr, *saddr;
  791. struct hlist_node *entry;
  792. struct hlist_head *chain;
  793. u32 priority = ~0U;
  794. daddr = xfrm_flowi_daddr(fl, family);
  795. saddr = xfrm_flowi_saddr(fl, family);
  796. if (unlikely(!daddr || !saddr))
  797. return NULL;
  798. read_lock_bh(&xfrm_policy_lock);
  799. chain = policy_hash_direct(daddr, saddr, family, dir);
  800. ret = NULL;
  801. hlist_for_each_entry(pol, entry, chain, bydst) {
  802. err = xfrm_policy_match(pol, fl, type, family, dir);
  803. if (err) {
  804. if (err == -ESRCH)
  805. continue;
  806. else {
  807. ret = ERR_PTR(err);
  808. goto fail;
  809. }
  810. } else {
  811. ret = pol;
  812. priority = ret->priority;
  813. break;
  814. }
  815. }
  816. chain = &xfrm_policy_inexact[dir];
  817. hlist_for_each_entry(pol, entry, chain, bydst) {
  818. err = xfrm_policy_match(pol, fl, type, family, dir);
  819. if (err) {
  820. if (err == -ESRCH)
  821. continue;
  822. else {
  823. ret = ERR_PTR(err);
  824. goto fail;
  825. }
  826. } else if (pol->priority < priority) {
  827. ret = pol;
  828. break;
  829. }
  830. }
  831. if (ret)
  832. xfrm_pol_hold(ret);
  833. fail:
  834. read_unlock_bh(&xfrm_policy_lock);
  835. return ret;
  836. }
  837. static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  838. void **objp, atomic_t **obj_refp)
  839. {
  840. struct xfrm_policy *pol;
  841. int err = 0;
  842. #ifdef CONFIG_XFRM_SUB_POLICY
  843. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  844. if (IS_ERR(pol)) {
  845. err = PTR_ERR(pol);
  846. pol = NULL;
  847. }
  848. if (pol || err)
  849. goto end;
  850. #endif
  851. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  852. if (IS_ERR(pol)) {
  853. err = PTR_ERR(pol);
  854. pol = NULL;
  855. }
  856. #ifdef CONFIG_XFRM_SUB_POLICY
  857. end:
  858. #endif
  859. if ((*objp = (void *) pol) != NULL)
  860. *obj_refp = &pol->refcnt;
  861. return err;
  862. }
  863. static inline int policy_to_flow_dir(int dir)
  864. {
  865. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  866. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  867. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  868. return dir;
  869. switch (dir) {
  870. default:
  871. case XFRM_POLICY_IN:
  872. return FLOW_DIR_IN;
  873. case XFRM_POLICY_OUT:
  874. return FLOW_DIR_OUT;
  875. case XFRM_POLICY_FWD:
  876. return FLOW_DIR_FWD;
  877. };
  878. }
  879. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  880. {
  881. struct xfrm_policy *pol;
  882. read_lock_bh(&xfrm_policy_lock);
  883. if ((pol = sk->sk_policy[dir]) != NULL) {
  884. int match = xfrm_selector_match(&pol->selector, fl,
  885. sk->sk_family);
  886. int err = 0;
  887. if (match) {
  888. err = security_xfrm_policy_lookup(pol, fl->secid,
  889. policy_to_flow_dir(dir));
  890. if (!err)
  891. xfrm_pol_hold(pol);
  892. else if (err == -ESRCH)
  893. pol = NULL;
  894. else
  895. pol = ERR_PTR(err);
  896. } else
  897. pol = NULL;
  898. }
  899. read_unlock_bh(&xfrm_policy_lock);
  900. return pol;
  901. }
  902. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  903. {
  904. struct hlist_head *chain = policy_hash_bysel(&pol->selector,
  905. pol->family, dir);
  906. hlist_add_head(&pol->bydst, chain);
  907. hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
  908. xfrm_policy_count[dir]++;
  909. xfrm_pol_hold(pol);
  910. if (xfrm_bydst_should_resize(dir, NULL))
  911. schedule_work(&xfrm_hash_work);
  912. }
  913. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  914. int dir)
  915. {
  916. if (hlist_unhashed(&pol->bydst))
  917. return NULL;
  918. hlist_del(&pol->bydst);
  919. hlist_del(&pol->byidx);
  920. xfrm_policy_count[dir]--;
  921. return pol;
  922. }
  923. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  924. {
  925. write_lock_bh(&xfrm_policy_lock);
  926. pol = __xfrm_policy_unlink(pol, dir);
  927. write_unlock_bh(&xfrm_policy_lock);
  928. if (pol) {
  929. if (dir < XFRM_POLICY_MAX)
  930. atomic_inc(&flow_cache_genid);
  931. xfrm_policy_kill(pol);
  932. return 0;
  933. }
  934. return -ENOENT;
  935. }
  936. EXPORT_SYMBOL(xfrm_policy_delete);
  937. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  938. {
  939. struct xfrm_policy *old_pol;
  940. #ifdef CONFIG_XFRM_SUB_POLICY
  941. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  942. return -EINVAL;
  943. #endif
  944. write_lock_bh(&xfrm_policy_lock);
  945. old_pol = sk->sk_policy[dir];
  946. sk->sk_policy[dir] = pol;
  947. if (pol) {
  948. pol->curlft.add_time = (unsigned long)xtime.tv_sec;
  949. pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
  950. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  951. }
  952. if (old_pol)
  953. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  954. write_unlock_bh(&xfrm_policy_lock);
  955. if (old_pol) {
  956. xfrm_policy_kill(old_pol);
  957. }
  958. return 0;
  959. }
  960. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  961. {
  962. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  963. if (newp) {
  964. newp->selector = old->selector;
  965. if (security_xfrm_policy_clone(old, newp)) {
  966. kfree(newp);
  967. return NULL; /* ENOMEM */
  968. }
  969. newp->lft = old->lft;
  970. newp->curlft = old->curlft;
  971. newp->action = old->action;
  972. newp->flags = old->flags;
  973. newp->xfrm_nr = old->xfrm_nr;
  974. newp->index = old->index;
  975. newp->type = old->type;
  976. memcpy(newp->xfrm_vec, old->xfrm_vec,
  977. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  978. write_lock_bh(&xfrm_policy_lock);
  979. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  980. write_unlock_bh(&xfrm_policy_lock);
  981. xfrm_pol_put(newp);
  982. }
  983. return newp;
  984. }
  985. int __xfrm_sk_clone_policy(struct sock *sk)
  986. {
  987. struct xfrm_policy *p0 = sk->sk_policy[0],
  988. *p1 = sk->sk_policy[1];
  989. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  990. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  991. return -ENOMEM;
  992. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  993. return -ENOMEM;
  994. return 0;
  995. }
  996. static int
  997. xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
  998. unsigned short family)
  999. {
  1000. int err;
  1001. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1002. if (unlikely(afinfo == NULL))
  1003. return -EINVAL;
  1004. err = afinfo->get_saddr(local, remote);
  1005. xfrm_policy_put_afinfo(afinfo);
  1006. return err;
  1007. }
  1008. /* Resolve list of templates for the flow, given policy. */
  1009. static int
  1010. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  1011. struct xfrm_state **xfrm,
  1012. unsigned short family)
  1013. {
  1014. int nx;
  1015. int i, error;
  1016. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1017. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1018. xfrm_address_t tmp;
  1019. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1020. struct xfrm_state *x;
  1021. xfrm_address_t *remote = daddr;
  1022. xfrm_address_t *local = saddr;
  1023. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1024. if (tmpl->mode == XFRM_MODE_TUNNEL) {
  1025. remote = &tmpl->id.daddr;
  1026. local = &tmpl->saddr;
  1027. family = tmpl->encap_family;
  1028. if (xfrm_addr_any(local, family)) {
  1029. error = xfrm_get_saddr(&tmp, remote, family);
  1030. if (error)
  1031. goto fail;
  1032. local = &tmp;
  1033. }
  1034. }
  1035. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1036. if (x && x->km.state == XFRM_STATE_VALID) {
  1037. xfrm[nx++] = x;
  1038. daddr = remote;
  1039. saddr = local;
  1040. continue;
  1041. }
  1042. if (x) {
  1043. error = (x->km.state == XFRM_STATE_ERROR ?
  1044. -EINVAL : -EAGAIN);
  1045. xfrm_state_put(x);
  1046. }
  1047. if (!tmpl->optional)
  1048. goto fail;
  1049. }
  1050. return nx;
  1051. fail:
  1052. for (nx--; nx>=0; nx--)
  1053. xfrm_state_put(xfrm[nx]);
  1054. return error;
  1055. }
  1056. static int
  1057. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1058. struct xfrm_state **xfrm,
  1059. unsigned short family)
  1060. {
  1061. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1062. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1063. int cnx = 0;
  1064. int error;
  1065. int ret;
  1066. int i;
  1067. for (i = 0; i < npols; i++) {
  1068. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1069. error = -ENOBUFS;
  1070. goto fail;
  1071. }
  1072. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1073. if (ret < 0) {
  1074. error = ret;
  1075. goto fail;
  1076. } else
  1077. cnx += ret;
  1078. }
  1079. /* found states are sorted for outbound processing */
  1080. if (npols > 1)
  1081. xfrm_state_sort(xfrm, tpp, cnx, family);
  1082. return cnx;
  1083. fail:
  1084. for (cnx--; cnx>=0; cnx--)
  1085. xfrm_state_put(tpp[cnx]);
  1086. return error;
  1087. }
  1088. /* Check that the bundle accepts the flow and its components are
  1089. * still valid.
  1090. */
  1091. static struct dst_entry *
  1092. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1093. {
  1094. struct dst_entry *x;
  1095. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1096. if (unlikely(afinfo == NULL))
  1097. return ERR_PTR(-EINVAL);
  1098. x = afinfo->find_bundle(fl, policy);
  1099. xfrm_policy_put_afinfo(afinfo);
  1100. return x;
  1101. }
  1102. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1103. * all the metrics... Shortly, bundle a bundle.
  1104. */
  1105. static int
  1106. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  1107. struct flowi *fl, struct dst_entry **dst_p,
  1108. unsigned short family)
  1109. {
  1110. int err;
  1111. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1112. if (unlikely(afinfo == NULL))
  1113. return -EINVAL;
  1114. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  1115. xfrm_policy_put_afinfo(afinfo);
  1116. return err;
  1117. }
  1118. static int stale_bundle(struct dst_entry *dst);
  1119. /* Main function: finds/creates a bundle for given flow.
  1120. *
  1121. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1122. * on interfaces with disabled IPsec.
  1123. */
  1124. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1125. struct sock *sk, int flags)
  1126. {
  1127. struct xfrm_policy *policy;
  1128. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1129. int npols;
  1130. int pol_dead;
  1131. int xfrm_nr;
  1132. int pi;
  1133. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1134. struct dst_entry *dst, *dst_orig = *dst_p;
  1135. int nx = 0;
  1136. int err;
  1137. u32 genid;
  1138. u16 family;
  1139. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1140. restart:
  1141. genid = atomic_read(&flow_cache_genid);
  1142. policy = NULL;
  1143. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1144. pols[pi] = NULL;
  1145. npols = 0;
  1146. pol_dead = 0;
  1147. xfrm_nr = 0;
  1148. if (sk && sk->sk_policy[1]) {
  1149. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1150. if (IS_ERR(policy))
  1151. return PTR_ERR(policy);
  1152. }
  1153. if (!policy) {
  1154. /* To accelerate a bit... */
  1155. if ((dst_orig->flags & DST_NOXFRM) ||
  1156. !xfrm_policy_count[XFRM_POLICY_OUT])
  1157. return 0;
  1158. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  1159. dir, xfrm_policy_lookup);
  1160. if (IS_ERR(policy))
  1161. return PTR_ERR(policy);
  1162. }
  1163. if (!policy)
  1164. return 0;
  1165. family = dst_orig->ops->family;
  1166. policy->curlft.use_time = (unsigned long)xtime.tv_sec;
  1167. pols[0] = policy;
  1168. npols ++;
  1169. xfrm_nr += pols[0]->xfrm_nr;
  1170. switch (policy->action) {
  1171. case XFRM_POLICY_BLOCK:
  1172. /* Prohibit the flow */
  1173. err = -EPERM;
  1174. goto error;
  1175. case XFRM_POLICY_ALLOW:
  1176. #ifndef CONFIG_XFRM_SUB_POLICY
  1177. if (policy->xfrm_nr == 0) {
  1178. /* Flow passes not transformed. */
  1179. xfrm_pol_put(policy);
  1180. return 0;
  1181. }
  1182. #endif
  1183. /* Try to find matching bundle.
  1184. *
  1185. * LATER: help from flow cache. It is optional, this
  1186. * is required only for output policy.
  1187. */
  1188. dst = xfrm_find_bundle(fl, policy, family);
  1189. if (IS_ERR(dst)) {
  1190. err = PTR_ERR(dst);
  1191. goto error;
  1192. }
  1193. if (dst)
  1194. break;
  1195. #ifdef CONFIG_XFRM_SUB_POLICY
  1196. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1197. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1198. fl, family,
  1199. XFRM_POLICY_OUT);
  1200. if (pols[1]) {
  1201. if (IS_ERR(pols[1])) {
  1202. err = PTR_ERR(pols[1]);
  1203. goto error;
  1204. }
  1205. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1206. err = -EPERM;
  1207. goto error;
  1208. }
  1209. npols ++;
  1210. xfrm_nr += pols[1]->xfrm_nr;
  1211. }
  1212. }
  1213. /*
  1214. * Because neither flowi nor bundle information knows about
  1215. * transformation template size. On more than one policy usage
  1216. * we can realize whether all of them is bypass or not after
  1217. * they are searched. See above not-transformed bypass
  1218. * is surrounded by non-sub policy configuration, too.
  1219. */
  1220. if (xfrm_nr == 0) {
  1221. /* Flow passes not transformed. */
  1222. xfrm_pols_put(pols, npols);
  1223. return 0;
  1224. }
  1225. #endif
  1226. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1227. if (unlikely(nx<0)) {
  1228. err = nx;
  1229. if (err == -EAGAIN && flags) {
  1230. DECLARE_WAITQUEUE(wait, current);
  1231. add_wait_queue(&km_waitq, &wait);
  1232. set_current_state(TASK_INTERRUPTIBLE);
  1233. schedule();
  1234. set_current_state(TASK_RUNNING);
  1235. remove_wait_queue(&km_waitq, &wait);
  1236. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1237. if (nx == -EAGAIN && signal_pending(current)) {
  1238. err = -ERESTART;
  1239. goto error;
  1240. }
  1241. if (nx == -EAGAIN ||
  1242. genid != atomic_read(&flow_cache_genid)) {
  1243. xfrm_pols_put(pols, npols);
  1244. goto restart;
  1245. }
  1246. err = nx;
  1247. }
  1248. if (err < 0)
  1249. goto error;
  1250. }
  1251. if (nx == 0) {
  1252. /* Flow passes not transformed. */
  1253. xfrm_pols_put(pols, npols);
  1254. return 0;
  1255. }
  1256. dst = dst_orig;
  1257. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  1258. if (unlikely(err)) {
  1259. int i;
  1260. for (i=0; i<nx; i++)
  1261. xfrm_state_put(xfrm[i]);
  1262. goto error;
  1263. }
  1264. for (pi = 0; pi < npols; pi++) {
  1265. read_lock_bh(&pols[pi]->lock);
  1266. pol_dead |= pols[pi]->dead;
  1267. read_unlock_bh(&pols[pi]->lock);
  1268. }
  1269. write_lock_bh(&policy->lock);
  1270. if (unlikely(pol_dead || stale_bundle(dst))) {
  1271. /* Wow! While we worked on resolving, this
  1272. * policy has gone. Retry. It is not paranoia,
  1273. * we just cannot enlist new bundle to dead object.
  1274. * We can't enlist stable bundles either.
  1275. */
  1276. write_unlock_bh(&policy->lock);
  1277. if (dst)
  1278. dst_free(dst);
  1279. err = -EHOSTUNREACH;
  1280. goto error;
  1281. }
  1282. dst->next = policy->bundles;
  1283. policy->bundles = dst;
  1284. dst_hold(dst);
  1285. write_unlock_bh(&policy->lock);
  1286. }
  1287. *dst_p = dst;
  1288. dst_release(dst_orig);
  1289. xfrm_pols_put(pols, npols);
  1290. return 0;
  1291. error:
  1292. dst_release(dst_orig);
  1293. xfrm_pols_put(pols, npols);
  1294. *dst_p = NULL;
  1295. return err;
  1296. }
  1297. EXPORT_SYMBOL(xfrm_lookup);
  1298. static inline int
  1299. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1300. {
  1301. struct xfrm_state *x;
  1302. int err;
  1303. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1304. return 0;
  1305. x = skb->sp->xvec[idx];
  1306. if (!x->type->reject)
  1307. return 0;
  1308. xfrm_state_hold(x);
  1309. err = x->type->reject(x, skb, fl);
  1310. xfrm_state_put(x);
  1311. return err;
  1312. }
  1313. /* When skb is transformed back to its "native" form, we have to
  1314. * check policy restrictions. At the moment we make this in maximally
  1315. * stupid way. Shame on me. :-) Of course, connected sockets must
  1316. * have policy cached at them.
  1317. */
  1318. static inline int
  1319. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1320. unsigned short family)
  1321. {
  1322. if (xfrm_state_kern(x))
  1323. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
  1324. return x->id.proto == tmpl->id.proto &&
  1325. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1326. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1327. x->props.mode == tmpl->mode &&
  1328. ((tmpl->aalgos & (1<<x->props.aalgo)) ||
  1329. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1330. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1331. xfrm_state_addr_cmp(tmpl, x, family));
  1332. }
  1333. /*
  1334. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1335. * because of optional transport mode, or next index of the mathced secpath
  1336. * state with the template.
  1337. * -1 is returned when no matching template is found.
  1338. * Otherwise "-2 - errored_index" is returned.
  1339. */
  1340. static inline int
  1341. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1342. unsigned short family)
  1343. {
  1344. int idx = start;
  1345. if (tmpl->optional) {
  1346. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1347. return start;
  1348. } else
  1349. start = -1;
  1350. for (; idx < sp->len; idx++) {
  1351. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1352. return ++idx;
  1353. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1354. if (start == -1)
  1355. start = -2-idx;
  1356. break;
  1357. }
  1358. }
  1359. return start;
  1360. }
  1361. int
  1362. xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  1363. {
  1364. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1365. int err;
  1366. if (unlikely(afinfo == NULL))
  1367. return -EAFNOSUPPORT;
  1368. afinfo->decode_session(skb, fl);
  1369. err = security_xfrm_decode_session(skb, &fl->secid);
  1370. xfrm_policy_put_afinfo(afinfo);
  1371. return err;
  1372. }
  1373. EXPORT_SYMBOL(xfrm_decode_session);
  1374. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1375. {
  1376. for (; k < sp->len; k++) {
  1377. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1378. *idxp = k;
  1379. return 1;
  1380. }
  1381. }
  1382. return 0;
  1383. }
  1384. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1385. unsigned short family)
  1386. {
  1387. struct xfrm_policy *pol;
  1388. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1389. int npols = 0;
  1390. int xfrm_nr;
  1391. int pi;
  1392. struct flowi fl;
  1393. u8 fl_dir = policy_to_flow_dir(dir);
  1394. int xerr_idx = -1;
  1395. if (xfrm_decode_session(skb, &fl, family) < 0)
  1396. return 0;
  1397. nf_nat_decode_session(skb, &fl, family);
  1398. /* First, check used SA against their selectors. */
  1399. if (skb->sp) {
  1400. int i;
  1401. for (i=skb->sp->len-1; i>=0; i--) {
  1402. struct xfrm_state *x = skb->sp->xvec[i];
  1403. if (!xfrm_selector_match(&x->sel, &fl, family))
  1404. return 0;
  1405. }
  1406. }
  1407. pol = NULL;
  1408. if (sk && sk->sk_policy[dir]) {
  1409. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1410. if (IS_ERR(pol))
  1411. return 0;
  1412. }
  1413. if (!pol)
  1414. pol = flow_cache_lookup(&fl, family, fl_dir,
  1415. xfrm_policy_lookup);
  1416. if (IS_ERR(pol))
  1417. return 0;
  1418. if (!pol) {
  1419. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1420. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1421. return 0;
  1422. }
  1423. return 1;
  1424. }
  1425. pol->curlft.use_time = (unsigned long)xtime.tv_sec;
  1426. pols[0] = pol;
  1427. npols ++;
  1428. #ifdef CONFIG_XFRM_SUB_POLICY
  1429. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1430. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1431. &fl, family,
  1432. XFRM_POLICY_IN);
  1433. if (pols[1]) {
  1434. if (IS_ERR(pols[1]))
  1435. return 0;
  1436. pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec;
  1437. npols ++;
  1438. }
  1439. }
  1440. #endif
  1441. if (pol->action == XFRM_POLICY_ALLOW) {
  1442. struct sec_path *sp;
  1443. static struct sec_path dummy;
  1444. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1445. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1446. struct xfrm_tmpl **tpp = tp;
  1447. int ti = 0;
  1448. int i, k;
  1449. if ((sp = skb->sp) == NULL)
  1450. sp = &dummy;
  1451. for (pi = 0; pi < npols; pi++) {
  1452. if (pols[pi] != pol &&
  1453. pols[pi]->action != XFRM_POLICY_ALLOW)
  1454. goto reject;
  1455. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
  1456. goto reject_error;
  1457. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1458. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1459. }
  1460. xfrm_nr = ti;
  1461. if (npols > 1) {
  1462. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1463. tpp = stp;
  1464. }
  1465. /* For each tunnel xfrm, find the first matching tmpl.
  1466. * For each tmpl before that, find corresponding xfrm.
  1467. * Order is _important_. Later we will implement
  1468. * some barriers, but at the moment barriers
  1469. * are implied between each two transformations.
  1470. */
  1471. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1472. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1473. if (k < 0) {
  1474. if (k < -1)
  1475. /* "-2 - errored_index" returned */
  1476. xerr_idx = -(2+k);
  1477. goto reject;
  1478. }
  1479. }
  1480. if (secpath_has_nontransport(sp, k, &xerr_idx))
  1481. goto reject;
  1482. xfrm_pols_put(pols, npols);
  1483. return 1;
  1484. }
  1485. reject:
  1486. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1487. reject_error:
  1488. xfrm_pols_put(pols, npols);
  1489. return 0;
  1490. }
  1491. EXPORT_SYMBOL(__xfrm_policy_check);
  1492. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1493. {
  1494. struct flowi fl;
  1495. if (xfrm_decode_session(skb, &fl, family) < 0)
  1496. return 0;
  1497. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1498. }
  1499. EXPORT_SYMBOL(__xfrm_route_forward);
  1500. /* Optimize later using cookies and generation ids. */
  1501. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1502. {
  1503. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1504. * to "-1" to force all XFRM destinations to get validated by
  1505. * dst_ops->check on every use. We do this because when a
  1506. * normal route referenced by an XFRM dst is obsoleted we do
  1507. * not go looking around for all parent referencing XFRM dsts
  1508. * so that we can invalidate them. It is just too much work.
  1509. * Instead we make the checks here on every use. For example:
  1510. *
  1511. * XFRM dst A --> IPv4 dst X
  1512. *
  1513. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1514. * in this example). If X is marked obsolete, "A" will not
  1515. * notice. That's what we are validating here via the
  1516. * stale_bundle() check.
  1517. *
  1518. * When a policy's bundle is pruned, we dst_free() the XFRM
  1519. * dst which causes it's ->obsolete field to be set to a
  1520. * positive non-zero integer. If an XFRM dst has been pruned
  1521. * like this, we want to force a new route lookup.
  1522. */
  1523. if (dst->obsolete < 0 && !stale_bundle(dst))
  1524. return dst;
  1525. return NULL;
  1526. }
  1527. static int stale_bundle(struct dst_entry *dst)
  1528. {
  1529. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1530. }
  1531. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1532. {
  1533. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1534. dst->dev = &loopback_dev;
  1535. dev_hold(&loopback_dev);
  1536. dev_put(dev);
  1537. }
  1538. }
  1539. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1540. static void xfrm_link_failure(struct sk_buff *skb)
  1541. {
  1542. /* Impossible. Such dst must be popped before reaches point of failure. */
  1543. return;
  1544. }
  1545. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1546. {
  1547. if (dst) {
  1548. if (dst->obsolete) {
  1549. dst_release(dst);
  1550. dst = NULL;
  1551. }
  1552. }
  1553. return dst;
  1554. }
  1555. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1556. {
  1557. struct dst_entry *dst, **dstp;
  1558. write_lock(&pol->lock);
  1559. dstp = &pol->bundles;
  1560. while ((dst=*dstp) != NULL) {
  1561. if (func(dst)) {
  1562. *dstp = dst->next;
  1563. dst->next = *gc_list_p;
  1564. *gc_list_p = dst;
  1565. } else {
  1566. dstp = &dst->next;
  1567. }
  1568. }
  1569. write_unlock(&pol->lock);
  1570. }
  1571. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1572. {
  1573. struct dst_entry *gc_list = NULL;
  1574. int dir;
  1575. read_lock_bh(&xfrm_policy_lock);
  1576. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1577. struct xfrm_policy *pol;
  1578. struct hlist_node *entry;
  1579. struct hlist_head *table;
  1580. int i;
  1581. hlist_for_each_entry(pol, entry,
  1582. &xfrm_policy_inexact[dir], bydst)
  1583. prune_one_bundle(pol, func, &gc_list);
  1584. table = xfrm_policy_bydst[dir].table;
  1585. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  1586. hlist_for_each_entry(pol, entry, table + i, bydst)
  1587. prune_one_bundle(pol, func, &gc_list);
  1588. }
  1589. }
  1590. read_unlock_bh(&xfrm_policy_lock);
  1591. while (gc_list) {
  1592. struct dst_entry *dst = gc_list;
  1593. gc_list = dst->next;
  1594. dst_free(dst);
  1595. }
  1596. }
  1597. static int unused_bundle(struct dst_entry *dst)
  1598. {
  1599. return !atomic_read(&dst->__refcnt);
  1600. }
  1601. static void __xfrm_garbage_collect(void)
  1602. {
  1603. xfrm_prune_bundles(unused_bundle);
  1604. }
  1605. static int xfrm_flush_bundles(void)
  1606. {
  1607. xfrm_prune_bundles(stale_bundle);
  1608. return 0;
  1609. }
  1610. void xfrm_init_pmtu(struct dst_entry *dst)
  1611. {
  1612. do {
  1613. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1614. u32 pmtu, route_mtu_cached;
  1615. pmtu = dst_mtu(dst->child);
  1616. xdst->child_mtu_cached = pmtu;
  1617. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1618. route_mtu_cached = dst_mtu(xdst->route);
  1619. xdst->route_mtu_cached = route_mtu_cached;
  1620. if (pmtu > route_mtu_cached)
  1621. pmtu = route_mtu_cached;
  1622. dst->metrics[RTAX_MTU-1] = pmtu;
  1623. } while ((dst = dst->next));
  1624. }
  1625. EXPORT_SYMBOL(xfrm_init_pmtu);
  1626. /* Check that the bundle accepts the flow and its components are
  1627. * still valid.
  1628. */
  1629. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1630. struct flowi *fl, int family, int strict)
  1631. {
  1632. struct dst_entry *dst = &first->u.dst;
  1633. struct xfrm_dst *last;
  1634. u32 mtu;
  1635. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1636. (dst->dev && !netif_running(dst->dev)))
  1637. return 0;
  1638. last = NULL;
  1639. do {
  1640. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1641. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1642. return 0;
  1643. if (fl && pol &&
  1644. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1645. return 0;
  1646. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1647. return 0;
  1648. if (xdst->genid != dst->xfrm->genid)
  1649. return 0;
  1650. if (strict && fl && dst->xfrm->props.mode != XFRM_MODE_TUNNEL &&
  1651. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1652. return 0;
  1653. mtu = dst_mtu(dst->child);
  1654. if (xdst->child_mtu_cached != mtu) {
  1655. last = xdst;
  1656. xdst->child_mtu_cached = mtu;
  1657. }
  1658. if (!dst_check(xdst->route, xdst->route_cookie))
  1659. return 0;
  1660. mtu = dst_mtu(xdst->route);
  1661. if (xdst->route_mtu_cached != mtu) {
  1662. last = xdst;
  1663. xdst->route_mtu_cached = mtu;
  1664. }
  1665. dst = dst->child;
  1666. } while (dst->xfrm);
  1667. if (likely(!last))
  1668. return 1;
  1669. mtu = last->child_mtu_cached;
  1670. for (;;) {
  1671. dst = &last->u.dst;
  1672. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1673. if (mtu > last->route_mtu_cached)
  1674. mtu = last->route_mtu_cached;
  1675. dst->metrics[RTAX_MTU-1] = mtu;
  1676. if (last == first)
  1677. break;
  1678. last = last->u.next;
  1679. last->child_mtu_cached = mtu;
  1680. }
  1681. return 1;
  1682. }
  1683. EXPORT_SYMBOL(xfrm_bundle_ok);
  1684. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1685. {
  1686. int err = 0;
  1687. if (unlikely(afinfo == NULL))
  1688. return -EINVAL;
  1689. if (unlikely(afinfo->family >= NPROTO))
  1690. return -EAFNOSUPPORT;
  1691. write_lock_bh(&xfrm_policy_afinfo_lock);
  1692. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1693. err = -ENOBUFS;
  1694. else {
  1695. struct dst_ops *dst_ops = afinfo->dst_ops;
  1696. if (likely(dst_ops->kmem_cachep == NULL))
  1697. dst_ops->kmem_cachep = xfrm_dst_cache;
  1698. if (likely(dst_ops->check == NULL))
  1699. dst_ops->check = xfrm_dst_check;
  1700. if (likely(dst_ops->negative_advice == NULL))
  1701. dst_ops->negative_advice = xfrm_negative_advice;
  1702. if (likely(dst_ops->link_failure == NULL))
  1703. dst_ops->link_failure = xfrm_link_failure;
  1704. if (likely(afinfo->garbage_collect == NULL))
  1705. afinfo->garbage_collect = __xfrm_garbage_collect;
  1706. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1707. }
  1708. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1709. return err;
  1710. }
  1711. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1712. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1713. {
  1714. int err = 0;
  1715. if (unlikely(afinfo == NULL))
  1716. return -EINVAL;
  1717. if (unlikely(afinfo->family >= NPROTO))
  1718. return -EAFNOSUPPORT;
  1719. write_lock_bh(&xfrm_policy_afinfo_lock);
  1720. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1721. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1722. err = -EINVAL;
  1723. else {
  1724. struct dst_ops *dst_ops = afinfo->dst_ops;
  1725. xfrm_policy_afinfo[afinfo->family] = NULL;
  1726. dst_ops->kmem_cachep = NULL;
  1727. dst_ops->check = NULL;
  1728. dst_ops->negative_advice = NULL;
  1729. dst_ops->link_failure = NULL;
  1730. afinfo->garbage_collect = NULL;
  1731. }
  1732. }
  1733. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1734. return err;
  1735. }
  1736. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1737. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1738. {
  1739. struct xfrm_policy_afinfo *afinfo;
  1740. if (unlikely(family >= NPROTO))
  1741. return NULL;
  1742. read_lock(&xfrm_policy_afinfo_lock);
  1743. afinfo = xfrm_policy_afinfo[family];
  1744. if (unlikely(!afinfo))
  1745. read_unlock(&xfrm_policy_afinfo_lock);
  1746. return afinfo;
  1747. }
  1748. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1749. {
  1750. read_unlock(&xfrm_policy_afinfo_lock);
  1751. }
  1752. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family)
  1753. {
  1754. struct xfrm_policy_afinfo *afinfo;
  1755. if (unlikely(family >= NPROTO))
  1756. return NULL;
  1757. write_lock_bh(&xfrm_policy_afinfo_lock);
  1758. afinfo = xfrm_policy_afinfo[family];
  1759. if (unlikely(!afinfo))
  1760. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1761. return afinfo;
  1762. }
  1763. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo)
  1764. {
  1765. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1766. }
  1767. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1768. {
  1769. switch (event) {
  1770. case NETDEV_DOWN:
  1771. xfrm_flush_bundles();
  1772. }
  1773. return NOTIFY_DONE;
  1774. }
  1775. static struct notifier_block xfrm_dev_notifier = {
  1776. xfrm_dev_event,
  1777. NULL,
  1778. 0
  1779. };
  1780. static void __init xfrm_policy_init(void)
  1781. {
  1782. unsigned int hmask, sz;
  1783. int dir;
  1784. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1785. sizeof(struct xfrm_dst),
  1786. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1787. NULL, NULL);
  1788. hmask = 8 - 1;
  1789. sz = (hmask+1) * sizeof(struct hlist_head);
  1790. xfrm_policy_byidx = xfrm_hash_alloc(sz);
  1791. xfrm_idx_hmask = hmask;
  1792. if (!xfrm_policy_byidx)
  1793. panic("XFRM: failed to allocate byidx hash\n");
  1794. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1795. struct xfrm_policy_hash *htab;
  1796. INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
  1797. htab = &xfrm_policy_bydst[dir];
  1798. htab->table = xfrm_hash_alloc(sz);
  1799. htab->hmask = hmask;
  1800. if (!htab->table)
  1801. panic("XFRM: failed to allocate bydst hash\n");
  1802. }
  1803. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
  1804. register_netdevice_notifier(&xfrm_dev_notifier);
  1805. }
  1806. void __init xfrm_init(void)
  1807. {
  1808. xfrm_state_init();
  1809. xfrm_policy_init();
  1810. xfrm_input_init();
  1811. }