xfrm_policy.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <asm/bug.h>
  16. #include <linux/config.h>
  17. #include <linux/slab.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/notifier.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/module.h>
  25. #include <net/xfrm.h>
  26. #include <net/ip.h>
  27. DECLARE_MUTEX(xfrm_cfg_sem);
  28. EXPORT_SYMBOL(xfrm_cfg_sem);
  29. static DEFINE_RWLOCK(xfrm_policy_lock);
  30. struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
  31. EXPORT_SYMBOL(xfrm_policy_list);
  32. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  33. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  34. static kmem_cache_t *xfrm_dst_cache;
  35. static struct work_struct xfrm_policy_gc_work;
  36. static struct list_head xfrm_policy_gc_list =
  37. LIST_HEAD_INIT(xfrm_policy_gc_list);
  38. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  39. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  40. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  41. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  42. {
  43. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  44. struct xfrm_type_map *typemap;
  45. int err = 0;
  46. if (unlikely(afinfo == NULL))
  47. return -EAFNOSUPPORT;
  48. typemap = afinfo->type_map;
  49. write_lock(&typemap->lock);
  50. if (likely(typemap->map[type->proto] == NULL))
  51. typemap->map[type->proto] = type;
  52. else
  53. err = -EEXIST;
  54. write_unlock(&typemap->lock);
  55. xfrm_policy_put_afinfo(afinfo);
  56. return err;
  57. }
  58. EXPORT_SYMBOL(xfrm_register_type);
  59. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  60. {
  61. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  62. struct xfrm_type_map *typemap;
  63. int err = 0;
  64. if (unlikely(afinfo == NULL))
  65. return -EAFNOSUPPORT;
  66. typemap = afinfo->type_map;
  67. write_lock(&typemap->lock);
  68. if (unlikely(typemap->map[type->proto] != type))
  69. err = -ENOENT;
  70. else
  71. typemap->map[type->proto] = NULL;
  72. write_unlock(&typemap->lock);
  73. xfrm_policy_put_afinfo(afinfo);
  74. return err;
  75. }
  76. EXPORT_SYMBOL(xfrm_unregister_type);
  77. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  78. {
  79. struct xfrm_policy_afinfo *afinfo;
  80. struct xfrm_type_map *typemap;
  81. struct xfrm_type *type;
  82. int modload_attempted = 0;
  83. retry:
  84. afinfo = xfrm_policy_get_afinfo(family);
  85. if (unlikely(afinfo == NULL))
  86. return NULL;
  87. typemap = afinfo->type_map;
  88. read_lock(&typemap->lock);
  89. type = typemap->map[proto];
  90. if (unlikely(type && !try_module_get(type->owner)))
  91. type = NULL;
  92. read_unlock(&typemap->lock);
  93. if (!type && !modload_attempted) {
  94. xfrm_policy_put_afinfo(afinfo);
  95. request_module("xfrm-type-%d-%d",
  96. (int) family, (int) proto);
  97. modload_attempted = 1;
  98. goto retry;
  99. }
  100. xfrm_policy_put_afinfo(afinfo);
  101. return type;
  102. }
  103. EXPORT_SYMBOL(xfrm_get_type);
  104. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  105. unsigned short family)
  106. {
  107. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  108. int err = 0;
  109. if (unlikely(afinfo == NULL))
  110. return -EAFNOSUPPORT;
  111. if (likely(afinfo->dst_lookup != NULL))
  112. err = afinfo->dst_lookup(dst, fl);
  113. else
  114. err = -EINVAL;
  115. xfrm_policy_put_afinfo(afinfo);
  116. return err;
  117. }
  118. EXPORT_SYMBOL(xfrm_dst_lookup);
  119. void xfrm_put_type(struct xfrm_type *type)
  120. {
  121. module_put(type->owner);
  122. }
  123. static inline unsigned long make_jiffies(long secs)
  124. {
  125. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  126. return MAX_SCHEDULE_TIMEOUT-1;
  127. else
  128. return secs*HZ;
  129. }
  130. static void xfrm_policy_timer(unsigned long data)
  131. {
  132. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  133. unsigned long now = (unsigned long)xtime.tv_sec;
  134. long next = LONG_MAX;
  135. int warn = 0;
  136. int dir;
  137. read_lock(&xp->lock);
  138. if (xp->dead)
  139. goto out;
  140. dir = xp->index & 7;
  141. if (xp->lft.hard_add_expires_seconds) {
  142. long tmo = xp->lft.hard_add_expires_seconds +
  143. xp->curlft.add_time - now;
  144. if (tmo <= 0)
  145. goto expired;
  146. if (tmo < next)
  147. next = tmo;
  148. }
  149. if (xp->lft.hard_use_expires_seconds) {
  150. long tmo = xp->lft.hard_use_expires_seconds +
  151. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  152. if (tmo <= 0)
  153. goto expired;
  154. if (tmo < next)
  155. next = tmo;
  156. }
  157. if (xp->lft.soft_add_expires_seconds) {
  158. long tmo = xp->lft.soft_add_expires_seconds +
  159. xp->curlft.add_time - now;
  160. if (tmo <= 0) {
  161. warn = 1;
  162. tmo = XFRM_KM_TIMEOUT;
  163. }
  164. if (tmo < next)
  165. next = tmo;
  166. }
  167. if (xp->lft.soft_use_expires_seconds) {
  168. long tmo = xp->lft.soft_use_expires_seconds +
  169. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  170. if (tmo <= 0) {
  171. warn = 1;
  172. tmo = XFRM_KM_TIMEOUT;
  173. }
  174. if (tmo < next)
  175. next = tmo;
  176. }
  177. if (warn)
  178. km_policy_expired(xp, dir, 0);
  179. if (next != LONG_MAX &&
  180. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  181. xfrm_pol_hold(xp);
  182. out:
  183. read_unlock(&xp->lock);
  184. xfrm_pol_put(xp);
  185. return;
  186. expired:
  187. read_unlock(&xp->lock);
  188. km_policy_expired(xp, dir, 1);
  189. xfrm_policy_delete(xp, dir);
  190. xfrm_pol_put(xp);
  191. }
  192. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  193. * SPD calls.
  194. */
  195. struct xfrm_policy *xfrm_policy_alloc(int gfp)
  196. {
  197. struct xfrm_policy *policy;
  198. policy = kmalloc(sizeof(struct xfrm_policy), gfp);
  199. if (policy) {
  200. memset(policy, 0, sizeof(struct xfrm_policy));
  201. atomic_set(&policy->refcnt, 1);
  202. rwlock_init(&policy->lock);
  203. init_timer(&policy->timer);
  204. policy->timer.data = (unsigned long)policy;
  205. policy->timer.function = xfrm_policy_timer;
  206. }
  207. return policy;
  208. }
  209. EXPORT_SYMBOL(xfrm_policy_alloc);
  210. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  211. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  212. {
  213. if (!policy->dead)
  214. BUG();
  215. if (policy->bundles)
  216. BUG();
  217. if (del_timer(&policy->timer))
  218. BUG();
  219. kfree(policy);
  220. }
  221. EXPORT_SYMBOL(__xfrm_policy_destroy);
  222. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  223. {
  224. struct dst_entry *dst;
  225. while ((dst = policy->bundles) != NULL) {
  226. policy->bundles = dst->next;
  227. dst_free(dst);
  228. }
  229. if (del_timer(&policy->timer))
  230. atomic_dec(&policy->refcnt);
  231. if (atomic_read(&policy->refcnt) > 1)
  232. flow_cache_flush();
  233. xfrm_pol_put(policy);
  234. }
  235. static void xfrm_policy_gc_task(void *data)
  236. {
  237. struct xfrm_policy *policy;
  238. struct list_head *entry, *tmp;
  239. struct list_head gc_list = LIST_HEAD_INIT(gc_list);
  240. spin_lock_bh(&xfrm_policy_gc_lock);
  241. list_splice_init(&xfrm_policy_gc_list, &gc_list);
  242. spin_unlock_bh(&xfrm_policy_gc_lock);
  243. list_for_each_safe(entry, tmp, &gc_list) {
  244. policy = list_entry(entry, struct xfrm_policy, list);
  245. xfrm_policy_gc_kill(policy);
  246. }
  247. }
  248. /* Rule must be locked. Release descentant resources, announce
  249. * entry dead. The rule must be unlinked from lists to the moment.
  250. */
  251. static void xfrm_policy_kill(struct xfrm_policy *policy)
  252. {
  253. int dead;
  254. write_lock_bh(&policy->lock);
  255. dead = policy->dead;
  256. policy->dead = 1;
  257. write_unlock_bh(&policy->lock);
  258. if (unlikely(dead)) {
  259. WARN_ON(1);
  260. return;
  261. }
  262. spin_lock(&xfrm_policy_gc_lock);
  263. list_add(&policy->list, &xfrm_policy_gc_list);
  264. spin_unlock(&xfrm_policy_gc_lock);
  265. schedule_work(&xfrm_policy_gc_work);
  266. }
  267. /* Generate new index... KAME seems to generate them ordered by cost
  268. * of an absolute inpredictability of ordering of rules. This will not pass. */
  269. static u32 xfrm_gen_index(int dir)
  270. {
  271. u32 idx;
  272. struct xfrm_policy *p;
  273. static u32 idx_generator;
  274. for (;;) {
  275. idx = (idx_generator | dir);
  276. idx_generator += 8;
  277. if (idx == 0)
  278. idx = 8;
  279. for (p = xfrm_policy_list[dir]; p; p = p->next) {
  280. if (p->index == idx)
  281. break;
  282. }
  283. if (!p)
  284. return idx;
  285. }
  286. }
  287. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  288. {
  289. struct xfrm_policy *pol, **p;
  290. struct xfrm_policy *delpol = NULL;
  291. struct xfrm_policy **newpos = NULL;
  292. write_lock_bh(&xfrm_policy_lock);
  293. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
  294. if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) {
  295. if (excl) {
  296. write_unlock_bh(&xfrm_policy_lock);
  297. return -EEXIST;
  298. }
  299. *p = pol->next;
  300. delpol = pol;
  301. if (policy->priority > pol->priority)
  302. continue;
  303. } else if (policy->priority >= pol->priority) {
  304. p = &pol->next;
  305. continue;
  306. }
  307. if (!newpos)
  308. newpos = p;
  309. if (delpol)
  310. break;
  311. p = &pol->next;
  312. }
  313. if (newpos)
  314. p = newpos;
  315. xfrm_pol_hold(policy);
  316. policy->next = *p;
  317. *p = policy;
  318. atomic_inc(&flow_cache_genid);
  319. policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
  320. policy->curlft.add_time = (unsigned long)xtime.tv_sec;
  321. policy->curlft.use_time = 0;
  322. if (!mod_timer(&policy->timer, jiffies + HZ))
  323. xfrm_pol_hold(policy);
  324. write_unlock_bh(&xfrm_policy_lock);
  325. if (delpol) {
  326. xfrm_policy_kill(delpol);
  327. }
  328. return 0;
  329. }
  330. EXPORT_SYMBOL(xfrm_policy_insert);
  331. struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
  332. int delete)
  333. {
  334. struct xfrm_policy *pol, **p;
  335. write_lock_bh(&xfrm_policy_lock);
  336. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
  337. if (memcmp(sel, &pol->selector, sizeof(*sel)) == 0) {
  338. xfrm_pol_hold(pol);
  339. if (delete)
  340. *p = pol->next;
  341. break;
  342. }
  343. }
  344. write_unlock_bh(&xfrm_policy_lock);
  345. if (pol && delete) {
  346. atomic_inc(&flow_cache_genid);
  347. xfrm_policy_kill(pol);
  348. }
  349. return pol;
  350. }
  351. EXPORT_SYMBOL(xfrm_policy_bysel);
  352. struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
  353. {
  354. struct xfrm_policy *pol, **p;
  355. write_lock_bh(&xfrm_policy_lock);
  356. for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) {
  357. if (pol->index == id) {
  358. xfrm_pol_hold(pol);
  359. if (delete)
  360. *p = pol->next;
  361. break;
  362. }
  363. }
  364. write_unlock_bh(&xfrm_policy_lock);
  365. if (pol && delete) {
  366. atomic_inc(&flow_cache_genid);
  367. xfrm_policy_kill(pol);
  368. }
  369. return pol;
  370. }
  371. EXPORT_SYMBOL(xfrm_policy_byid);
  372. void xfrm_policy_flush(void)
  373. {
  374. struct xfrm_policy *xp;
  375. int dir;
  376. write_lock_bh(&xfrm_policy_lock);
  377. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  378. while ((xp = xfrm_policy_list[dir]) != NULL) {
  379. xfrm_policy_list[dir] = xp->next;
  380. write_unlock_bh(&xfrm_policy_lock);
  381. xfrm_policy_kill(xp);
  382. write_lock_bh(&xfrm_policy_lock);
  383. }
  384. }
  385. atomic_inc(&flow_cache_genid);
  386. write_unlock_bh(&xfrm_policy_lock);
  387. }
  388. EXPORT_SYMBOL(xfrm_policy_flush);
  389. int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
  390. void *data)
  391. {
  392. struct xfrm_policy *xp;
  393. int dir;
  394. int count = 0;
  395. int error = 0;
  396. read_lock_bh(&xfrm_policy_lock);
  397. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  398. for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
  399. count++;
  400. }
  401. if (count == 0) {
  402. error = -ENOENT;
  403. goto out;
  404. }
  405. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  406. for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
  407. error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
  408. if (error)
  409. goto out;
  410. }
  411. }
  412. out:
  413. read_unlock_bh(&xfrm_policy_lock);
  414. return error;
  415. }
  416. EXPORT_SYMBOL(xfrm_policy_walk);
  417. /* Find policy to apply to this flow. */
  418. static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  419. void **objp, atomic_t **obj_refp)
  420. {
  421. struct xfrm_policy *pol;
  422. read_lock_bh(&xfrm_policy_lock);
  423. for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
  424. struct xfrm_selector *sel = &pol->selector;
  425. int match;
  426. if (pol->family != family)
  427. continue;
  428. match = xfrm_selector_match(sel, fl, family);
  429. if (match) {
  430. xfrm_pol_hold(pol);
  431. break;
  432. }
  433. }
  434. read_unlock_bh(&xfrm_policy_lock);
  435. if ((*objp = (void *) pol) != NULL)
  436. *obj_refp = &pol->refcnt;
  437. }
  438. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  439. {
  440. struct xfrm_policy *pol;
  441. read_lock_bh(&xfrm_policy_lock);
  442. if ((pol = sk->sk_policy[dir]) != NULL) {
  443. int match = xfrm_selector_match(&pol->selector, fl,
  444. sk->sk_family);
  445. if (match)
  446. xfrm_pol_hold(pol);
  447. else
  448. pol = NULL;
  449. }
  450. read_unlock_bh(&xfrm_policy_lock);
  451. return pol;
  452. }
  453. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  454. {
  455. pol->next = xfrm_policy_list[dir];
  456. xfrm_policy_list[dir] = pol;
  457. xfrm_pol_hold(pol);
  458. }
  459. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  460. int dir)
  461. {
  462. struct xfrm_policy **polp;
  463. for (polp = &xfrm_policy_list[dir];
  464. *polp != NULL; polp = &(*polp)->next) {
  465. if (*polp == pol) {
  466. *polp = pol->next;
  467. return pol;
  468. }
  469. }
  470. return NULL;
  471. }
  472. void xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  473. {
  474. write_lock_bh(&xfrm_policy_lock);
  475. pol = __xfrm_policy_unlink(pol, dir);
  476. write_unlock_bh(&xfrm_policy_lock);
  477. if (pol) {
  478. if (dir < XFRM_POLICY_MAX)
  479. atomic_inc(&flow_cache_genid);
  480. xfrm_policy_kill(pol);
  481. }
  482. }
  483. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  484. {
  485. struct xfrm_policy *old_pol;
  486. write_lock_bh(&xfrm_policy_lock);
  487. old_pol = sk->sk_policy[dir];
  488. sk->sk_policy[dir] = pol;
  489. if (pol) {
  490. pol->curlft.add_time = (unsigned long)xtime.tv_sec;
  491. pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
  492. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  493. }
  494. if (old_pol)
  495. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  496. write_unlock_bh(&xfrm_policy_lock);
  497. if (old_pol) {
  498. xfrm_policy_kill(old_pol);
  499. }
  500. return 0;
  501. }
  502. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  503. {
  504. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  505. if (newp) {
  506. newp->selector = old->selector;
  507. newp->lft = old->lft;
  508. newp->curlft = old->curlft;
  509. newp->action = old->action;
  510. newp->flags = old->flags;
  511. newp->xfrm_nr = old->xfrm_nr;
  512. newp->index = old->index;
  513. memcpy(newp->xfrm_vec, old->xfrm_vec,
  514. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  515. write_lock_bh(&xfrm_policy_lock);
  516. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  517. write_unlock_bh(&xfrm_policy_lock);
  518. xfrm_pol_put(newp);
  519. }
  520. return newp;
  521. }
  522. int __xfrm_sk_clone_policy(struct sock *sk)
  523. {
  524. struct xfrm_policy *p0 = sk->sk_policy[0],
  525. *p1 = sk->sk_policy[1];
  526. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  527. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  528. return -ENOMEM;
  529. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  530. return -ENOMEM;
  531. return 0;
  532. }
  533. /* Resolve list of templates for the flow, given policy. */
  534. static int
  535. xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
  536. struct xfrm_state **xfrm,
  537. unsigned short family)
  538. {
  539. int nx;
  540. int i, error;
  541. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  542. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  543. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  544. struct xfrm_state *x;
  545. xfrm_address_t *remote = daddr;
  546. xfrm_address_t *local = saddr;
  547. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  548. if (tmpl->mode) {
  549. remote = &tmpl->id.daddr;
  550. local = &tmpl->saddr;
  551. }
  552. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  553. if (x && x->km.state == XFRM_STATE_VALID) {
  554. xfrm[nx++] = x;
  555. daddr = remote;
  556. saddr = local;
  557. continue;
  558. }
  559. if (x) {
  560. error = (x->km.state == XFRM_STATE_ERROR ?
  561. -EINVAL : -EAGAIN);
  562. xfrm_state_put(x);
  563. }
  564. if (!tmpl->optional)
  565. goto fail;
  566. }
  567. return nx;
  568. fail:
  569. for (nx--; nx>=0; nx--)
  570. xfrm_state_put(xfrm[nx]);
  571. return error;
  572. }
  573. /* Check that the bundle accepts the flow and its components are
  574. * still valid.
  575. */
  576. static struct dst_entry *
  577. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  578. {
  579. struct dst_entry *x;
  580. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  581. if (unlikely(afinfo == NULL))
  582. return ERR_PTR(-EINVAL);
  583. x = afinfo->find_bundle(fl, policy);
  584. xfrm_policy_put_afinfo(afinfo);
  585. return x;
  586. }
  587. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  588. * all the metrics... Shortly, bundle a bundle.
  589. */
  590. static int
  591. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  592. struct flowi *fl, struct dst_entry **dst_p,
  593. unsigned short family)
  594. {
  595. int err;
  596. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  597. if (unlikely(afinfo == NULL))
  598. return -EINVAL;
  599. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  600. xfrm_policy_put_afinfo(afinfo);
  601. return err;
  602. }
  603. static inline int policy_to_flow_dir(int dir)
  604. {
  605. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  606. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  607. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  608. return dir;
  609. switch (dir) {
  610. default:
  611. case XFRM_POLICY_IN:
  612. return FLOW_DIR_IN;
  613. case XFRM_POLICY_OUT:
  614. return FLOW_DIR_OUT;
  615. case XFRM_POLICY_FWD:
  616. return FLOW_DIR_FWD;
  617. };
  618. }
  619. static int stale_bundle(struct dst_entry *dst);
  620. /* Main function: finds/creates a bundle for given flow.
  621. *
  622. * At the moment we eat a raw IP route. Mostly to speed up lookups
  623. * on interfaces with disabled IPsec.
  624. */
  625. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  626. struct sock *sk, int flags)
  627. {
  628. struct xfrm_policy *policy;
  629. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  630. struct dst_entry *dst, *dst_orig = *dst_p;
  631. int nx = 0;
  632. int err;
  633. u32 genid;
  634. u16 family = dst_orig->ops->family;
  635. restart:
  636. genid = atomic_read(&flow_cache_genid);
  637. policy = NULL;
  638. if (sk && sk->sk_policy[1])
  639. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  640. if (!policy) {
  641. /* To accelerate a bit... */
  642. if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
  643. return 0;
  644. policy = flow_cache_lookup(fl, family,
  645. policy_to_flow_dir(XFRM_POLICY_OUT),
  646. xfrm_policy_lookup);
  647. }
  648. if (!policy)
  649. return 0;
  650. policy->curlft.use_time = (unsigned long)xtime.tv_sec;
  651. switch (policy->action) {
  652. case XFRM_POLICY_BLOCK:
  653. /* Prohibit the flow */
  654. xfrm_pol_put(policy);
  655. return -EPERM;
  656. case XFRM_POLICY_ALLOW:
  657. if (policy->xfrm_nr == 0) {
  658. /* Flow passes not transformed. */
  659. xfrm_pol_put(policy);
  660. return 0;
  661. }
  662. /* Try to find matching bundle.
  663. *
  664. * LATER: help from flow cache. It is optional, this
  665. * is required only for output policy.
  666. */
  667. dst = xfrm_find_bundle(fl, policy, family);
  668. if (IS_ERR(dst)) {
  669. xfrm_pol_put(policy);
  670. return PTR_ERR(dst);
  671. }
  672. if (dst)
  673. break;
  674. nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
  675. if (unlikely(nx<0)) {
  676. err = nx;
  677. if (err == -EAGAIN && flags) {
  678. DECLARE_WAITQUEUE(wait, current);
  679. add_wait_queue(&km_waitq, &wait);
  680. set_current_state(TASK_INTERRUPTIBLE);
  681. schedule();
  682. set_current_state(TASK_RUNNING);
  683. remove_wait_queue(&km_waitq, &wait);
  684. nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
  685. if (nx == -EAGAIN && signal_pending(current)) {
  686. err = -ERESTART;
  687. goto error;
  688. }
  689. if (nx == -EAGAIN ||
  690. genid != atomic_read(&flow_cache_genid)) {
  691. xfrm_pol_put(policy);
  692. goto restart;
  693. }
  694. err = nx;
  695. }
  696. if (err < 0)
  697. goto error;
  698. }
  699. if (nx == 0) {
  700. /* Flow passes not transformed. */
  701. xfrm_pol_put(policy);
  702. return 0;
  703. }
  704. dst = dst_orig;
  705. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  706. if (unlikely(err)) {
  707. int i;
  708. for (i=0; i<nx; i++)
  709. xfrm_state_put(xfrm[i]);
  710. goto error;
  711. }
  712. write_lock_bh(&policy->lock);
  713. if (unlikely(policy->dead || stale_bundle(dst))) {
  714. /* Wow! While we worked on resolving, this
  715. * policy has gone. Retry. It is not paranoia,
  716. * we just cannot enlist new bundle to dead object.
  717. * We can't enlist stable bundles either.
  718. */
  719. write_unlock_bh(&policy->lock);
  720. xfrm_pol_put(policy);
  721. if (dst)
  722. dst_free(dst);
  723. goto restart;
  724. }
  725. dst->next = policy->bundles;
  726. policy->bundles = dst;
  727. dst_hold(dst);
  728. write_unlock_bh(&policy->lock);
  729. }
  730. *dst_p = dst;
  731. dst_release(dst_orig);
  732. xfrm_pol_put(policy);
  733. return 0;
  734. error:
  735. dst_release(dst_orig);
  736. xfrm_pol_put(policy);
  737. *dst_p = NULL;
  738. return err;
  739. }
  740. EXPORT_SYMBOL(xfrm_lookup);
  741. /* When skb is transformed back to its "native" form, we have to
  742. * check policy restrictions. At the moment we make this in maximally
  743. * stupid way. Shame on me. :-) Of course, connected sockets must
  744. * have policy cached at them.
  745. */
  746. static inline int
  747. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  748. unsigned short family)
  749. {
  750. if (xfrm_state_kern(x))
  751. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
  752. return x->id.proto == tmpl->id.proto &&
  753. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  754. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  755. x->props.mode == tmpl->mode &&
  756. (tmpl->aalgos & (1<<x->props.aalgo)) &&
  757. !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
  758. }
  759. static inline int
  760. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  761. unsigned short family)
  762. {
  763. int idx = start;
  764. if (tmpl->optional) {
  765. if (!tmpl->mode)
  766. return start;
  767. } else
  768. start = -1;
  769. for (; idx < sp->len; idx++) {
  770. if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
  771. return ++idx;
  772. if (sp->x[idx].xvec->props.mode)
  773. break;
  774. }
  775. return start;
  776. }
  777. static int
  778. _decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  779. {
  780. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  781. if (unlikely(afinfo == NULL))
  782. return -EAFNOSUPPORT;
  783. afinfo->decode_session(skb, fl);
  784. xfrm_policy_put_afinfo(afinfo);
  785. return 0;
  786. }
  787. static inline int secpath_has_tunnel(struct sec_path *sp, int k)
  788. {
  789. for (; k < sp->len; k++) {
  790. if (sp->x[k].xvec->props.mode)
  791. return 1;
  792. }
  793. return 0;
  794. }
  795. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  796. unsigned short family)
  797. {
  798. struct xfrm_policy *pol;
  799. struct flowi fl;
  800. if (_decode_session(skb, &fl, family) < 0)
  801. return 0;
  802. /* First, check used SA against their selectors. */
  803. if (skb->sp) {
  804. int i;
  805. for (i=skb->sp->len-1; i>=0; i--) {
  806. struct sec_decap_state *xvec = &(skb->sp->x[i]);
  807. if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
  808. return 0;
  809. /* If there is a post_input processor, try running it */
  810. if (xvec->xvec->type->post_input &&
  811. (xvec->xvec->type->post_input)(xvec->xvec,
  812. &(xvec->decap),
  813. skb) != 0)
  814. return 0;
  815. }
  816. }
  817. pol = NULL;
  818. if (sk && sk->sk_policy[dir])
  819. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  820. if (!pol)
  821. pol = flow_cache_lookup(&fl, family,
  822. policy_to_flow_dir(dir),
  823. xfrm_policy_lookup);
  824. if (!pol)
  825. return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
  826. pol->curlft.use_time = (unsigned long)xtime.tv_sec;
  827. if (pol->action == XFRM_POLICY_ALLOW) {
  828. struct sec_path *sp;
  829. static struct sec_path dummy;
  830. int i, k;
  831. if ((sp = skb->sp) == NULL)
  832. sp = &dummy;
  833. /* For each tunnel xfrm, find the first matching tmpl.
  834. * For each tmpl before that, find corresponding xfrm.
  835. * Order is _important_. Later we will implement
  836. * some barriers, but at the moment barriers
  837. * are implied between each two transformations.
  838. */
  839. for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
  840. k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
  841. if (k < 0)
  842. goto reject;
  843. }
  844. if (secpath_has_tunnel(sp, k))
  845. goto reject;
  846. xfrm_pol_put(pol);
  847. return 1;
  848. }
  849. reject:
  850. xfrm_pol_put(pol);
  851. return 0;
  852. }
  853. EXPORT_SYMBOL(__xfrm_policy_check);
  854. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  855. {
  856. struct flowi fl;
  857. if (_decode_session(skb, &fl, family) < 0)
  858. return 0;
  859. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  860. }
  861. EXPORT_SYMBOL(__xfrm_route_forward);
  862. /* Optimize later using cookies and generation ids. */
  863. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  864. {
  865. if (!stale_bundle(dst))
  866. return dst;
  867. return NULL;
  868. }
  869. static int stale_bundle(struct dst_entry *dst)
  870. {
  871. return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
  872. }
  873. static void xfrm_dst_destroy(struct dst_entry *dst)
  874. {
  875. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  876. dst_release(xdst->route);
  877. if (!dst->xfrm)
  878. return;
  879. xfrm_state_put(dst->xfrm);
  880. dst->xfrm = NULL;
  881. }
  882. static void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
  883. int unregister)
  884. {
  885. if (!unregister)
  886. return;
  887. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  888. dst->dev = &loopback_dev;
  889. dev_hold(&loopback_dev);
  890. dev_put(dev);
  891. }
  892. }
  893. static void xfrm_link_failure(struct sk_buff *skb)
  894. {
  895. /* Impossible. Such dst must be popped before reaches point of failure. */
  896. return;
  897. }
  898. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  899. {
  900. if (dst) {
  901. if (dst->obsolete) {
  902. dst_release(dst);
  903. dst = NULL;
  904. }
  905. }
  906. return dst;
  907. }
  908. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  909. {
  910. int i;
  911. struct xfrm_policy *pol;
  912. struct dst_entry *dst, **dstp, *gc_list = NULL;
  913. read_lock_bh(&xfrm_policy_lock);
  914. for (i=0; i<2*XFRM_POLICY_MAX; i++) {
  915. for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
  916. write_lock(&pol->lock);
  917. dstp = &pol->bundles;
  918. while ((dst=*dstp) != NULL) {
  919. if (func(dst)) {
  920. *dstp = dst->next;
  921. dst->next = gc_list;
  922. gc_list = dst;
  923. } else {
  924. dstp = &dst->next;
  925. }
  926. }
  927. write_unlock(&pol->lock);
  928. }
  929. }
  930. read_unlock_bh(&xfrm_policy_lock);
  931. while (gc_list) {
  932. dst = gc_list;
  933. gc_list = dst->next;
  934. dst_free(dst);
  935. }
  936. }
  937. static int unused_bundle(struct dst_entry *dst)
  938. {
  939. return !atomic_read(&dst->__refcnt);
  940. }
  941. static void __xfrm_garbage_collect(void)
  942. {
  943. xfrm_prune_bundles(unused_bundle);
  944. }
  945. int xfrm_flush_bundles(void)
  946. {
  947. xfrm_prune_bundles(stale_bundle);
  948. return 0;
  949. }
  950. void xfrm_init_pmtu(struct dst_entry *dst)
  951. {
  952. do {
  953. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  954. u32 pmtu, route_mtu_cached;
  955. pmtu = dst_mtu(dst->child);
  956. xdst->child_mtu_cached = pmtu;
  957. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  958. route_mtu_cached = dst_mtu(xdst->route);
  959. xdst->route_mtu_cached = route_mtu_cached;
  960. if (pmtu > route_mtu_cached)
  961. pmtu = route_mtu_cached;
  962. dst->metrics[RTAX_MTU-1] = pmtu;
  963. } while ((dst = dst->next));
  964. }
  965. EXPORT_SYMBOL(xfrm_init_pmtu);
  966. /* Check that the bundle accepts the flow and its components are
  967. * still valid.
  968. */
  969. int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
  970. {
  971. struct dst_entry *dst = &first->u.dst;
  972. struct xfrm_dst *last;
  973. u32 mtu;
  974. if (!dst_check(dst->path, 0) ||
  975. (dst->dev && !netif_running(dst->dev)))
  976. return 0;
  977. last = NULL;
  978. do {
  979. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  980. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  981. return 0;
  982. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  983. return 0;
  984. mtu = dst_mtu(dst->child);
  985. if (xdst->child_mtu_cached != mtu) {
  986. last = xdst;
  987. xdst->child_mtu_cached = mtu;
  988. }
  989. if (!dst_check(xdst->route, 0))
  990. return 0;
  991. mtu = dst_mtu(xdst->route);
  992. if (xdst->route_mtu_cached != mtu) {
  993. last = xdst;
  994. xdst->route_mtu_cached = mtu;
  995. }
  996. dst = dst->child;
  997. } while (dst->xfrm);
  998. if (likely(!last))
  999. return 1;
  1000. mtu = last->child_mtu_cached;
  1001. for (;;) {
  1002. dst = &last->u.dst;
  1003. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1004. if (mtu > last->route_mtu_cached)
  1005. mtu = last->route_mtu_cached;
  1006. dst->metrics[RTAX_MTU-1] = mtu;
  1007. if (last == first)
  1008. break;
  1009. last = last->u.next;
  1010. last->child_mtu_cached = mtu;
  1011. }
  1012. return 1;
  1013. }
  1014. EXPORT_SYMBOL(xfrm_bundle_ok);
  1015. /* Well... that's _TASK_. We need to scan through transformation
  1016. * list and figure out what mss tcp should generate in order to
  1017. * final datagram fit to mtu. Mama mia... :-)
  1018. *
  1019. * Apparently, some easy way exists, but we used to choose the most
  1020. * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
  1021. *
  1022. * Consider this function as something like dark humour. :-)
  1023. */
  1024. static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
  1025. {
  1026. int res = mtu - dst->header_len;
  1027. for (;;) {
  1028. struct dst_entry *d = dst;
  1029. int m = res;
  1030. do {
  1031. struct xfrm_state *x = d->xfrm;
  1032. if (x) {
  1033. spin_lock_bh(&x->lock);
  1034. if (x->km.state == XFRM_STATE_VALID &&
  1035. x->type && x->type->get_max_size)
  1036. m = x->type->get_max_size(d->xfrm, m);
  1037. else
  1038. m += x->props.header_len;
  1039. spin_unlock_bh(&x->lock);
  1040. }
  1041. } while ((d = d->child) != NULL);
  1042. if (m <= mtu)
  1043. break;
  1044. res -= (m - mtu);
  1045. if (res < 88)
  1046. return mtu;
  1047. }
  1048. return res + dst->header_len;
  1049. }
  1050. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1051. {
  1052. int err = 0;
  1053. if (unlikely(afinfo == NULL))
  1054. return -EINVAL;
  1055. if (unlikely(afinfo->family >= NPROTO))
  1056. return -EAFNOSUPPORT;
  1057. write_lock(&xfrm_policy_afinfo_lock);
  1058. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1059. err = -ENOBUFS;
  1060. else {
  1061. struct dst_ops *dst_ops = afinfo->dst_ops;
  1062. if (likely(dst_ops->kmem_cachep == NULL))
  1063. dst_ops->kmem_cachep = xfrm_dst_cache;
  1064. if (likely(dst_ops->check == NULL))
  1065. dst_ops->check = xfrm_dst_check;
  1066. if (likely(dst_ops->destroy == NULL))
  1067. dst_ops->destroy = xfrm_dst_destroy;
  1068. if (likely(dst_ops->ifdown == NULL))
  1069. dst_ops->ifdown = xfrm_dst_ifdown;
  1070. if (likely(dst_ops->negative_advice == NULL))
  1071. dst_ops->negative_advice = xfrm_negative_advice;
  1072. if (likely(dst_ops->link_failure == NULL))
  1073. dst_ops->link_failure = xfrm_link_failure;
  1074. if (likely(dst_ops->get_mss == NULL))
  1075. dst_ops->get_mss = xfrm_get_mss;
  1076. if (likely(afinfo->garbage_collect == NULL))
  1077. afinfo->garbage_collect = __xfrm_garbage_collect;
  1078. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1079. }
  1080. write_unlock(&xfrm_policy_afinfo_lock);
  1081. return err;
  1082. }
  1083. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1084. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1085. {
  1086. int err = 0;
  1087. if (unlikely(afinfo == NULL))
  1088. return -EINVAL;
  1089. if (unlikely(afinfo->family >= NPROTO))
  1090. return -EAFNOSUPPORT;
  1091. write_lock(&xfrm_policy_afinfo_lock);
  1092. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1093. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1094. err = -EINVAL;
  1095. else {
  1096. struct dst_ops *dst_ops = afinfo->dst_ops;
  1097. xfrm_policy_afinfo[afinfo->family] = NULL;
  1098. dst_ops->kmem_cachep = NULL;
  1099. dst_ops->check = NULL;
  1100. dst_ops->destroy = NULL;
  1101. dst_ops->ifdown = NULL;
  1102. dst_ops->negative_advice = NULL;
  1103. dst_ops->link_failure = NULL;
  1104. dst_ops->get_mss = NULL;
  1105. afinfo->garbage_collect = NULL;
  1106. }
  1107. }
  1108. write_unlock(&xfrm_policy_afinfo_lock);
  1109. return err;
  1110. }
  1111. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1112. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1113. {
  1114. struct xfrm_policy_afinfo *afinfo;
  1115. if (unlikely(family >= NPROTO))
  1116. return NULL;
  1117. read_lock(&xfrm_policy_afinfo_lock);
  1118. afinfo = xfrm_policy_afinfo[family];
  1119. if (likely(afinfo != NULL))
  1120. read_lock(&afinfo->lock);
  1121. read_unlock(&xfrm_policy_afinfo_lock);
  1122. return afinfo;
  1123. }
  1124. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1125. {
  1126. if (unlikely(afinfo == NULL))
  1127. return;
  1128. read_unlock(&afinfo->lock);
  1129. }
  1130. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1131. {
  1132. switch (event) {
  1133. case NETDEV_DOWN:
  1134. xfrm_flush_bundles();
  1135. }
  1136. return NOTIFY_DONE;
  1137. }
  1138. static struct notifier_block xfrm_dev_notifier = {
  1139. xfrm_dev_event,
  1140. NULL,
  1141. 0
  1142. };
  1143. static void __init xfrm_policy_init(void)
  1144. {
  1145. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1146. sizeof(struct xfrm_dst),
  1147. 0, SLAB_HWCACHE_ALIGN,
  1148. NULL, NULL);
  1149. if (!xfrm_dst_cache)
  1150. panic("XFRM: failed to allocate xfrm_dst_cache\n");
  1151. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
  1152. register_netdevice_notifier(&xfrm_dev_notifier);
  1153. }
  1154. void __init xfrm_init(void)
  1155. {
  1156. xfrm_state_init();
  1157. xfrm_policy_init();
  1158. xfrm_input_init();
  1159. }