xfrm_policy.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <asm/bug.h>
  16. #include <linux/config.h>
  17. #include <linux/slab.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/notifier.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/module.h>
  25. #include <net/xfrm.h>
  26. #include <net/ip.h>
  27. DECLARE_MUTEX(xfrm_cfg_sem);
  28. EXPORT_SYMBOL(xfrm_cfg_sem);
  29. static DEFINE_RWLOCK(xfrm_policy_lock);
  30. struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
  31. EXPORT_SYMBOL(xfrm_policy_list);
  32. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  33. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  34. static kmem_cache_t *xfrm_dst_cache;
  35. static struct work_struct xfrm_policy_gc_work;
  36. static struct list_head xfrm_policy_gc_list =
  37. LIST_HEAD_INIT(xfrm_policy_gc_list);
  38. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  39. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  40. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  41. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  42. {
  43. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  44. struct xfrm_type_map *typemap;
  45. int err = 0;
  46. if (unlikely(afinfo == NULL))
  47. return -EAFNOSUPPORT;
  48. typemap = afinfo->type_map;
  49. write_lock(&typemap->lock);
  50. if (likely(typemap->map[type->proto] == NULL))
  51. typemap->map[type->proto] = type;
  52. else
  53. err = -EEXIST;
  54. write_unlock(&typemap->lock);
  55. xfrm_policy_put_afinfo(afinfo);
  56. return err;
  57. }
  58. EXPORT_SYMBOL(xfrm_register_type);
  59. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  60. {
  61. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  62. struct xfrm_type_map *typemap;
  63. int err = 0;
  64. if (unlikely(afinfo == NULL))
  65. return -EAFNOSUPPORT;
  66. typemap = afinfo->type_map;
  67. write_lock(&typemap->lock);
  68. if (unlikely(typemap->map[type->proto] != type))
  69. err = -ENOENT;
  70. else
  71. typemap->map[type->proto] = NULL;
  72. write_unlock(&typemap->lock);
  73. xfrm_policy_put_afinfo(afinfo);
  74. return err;
  75. }
  76. EXPORT_SYMBOL(xfrm_unregister_type);
  77. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  78. {
  79. struct xfrm_policy_afinfo *afinfo;
  80. struct xfrm_type_map *typemap;
  81. struct xfrm_type *type;
  82. int modload_attempted = 0;
  83. retry:
  84. afinfo = xfrm_policy_get_afinfo(family);
  85. if (unlikely(afinfo == NULL))
  86. return NULL;
  87. typemap = afinfo->type_map;
  88. read_lock(&typemap->lock);
  89. type = typemap->map[proto];
  90. if (unlikely(type && !try_module_get(type->owner)))
  91. type = NULL;
  92. read_unlock(&typemap->lock);
  93. if (!type && !modload_attempted) {
  94. xfrm_policy_put_afinfo(afinfo);
  95. request_module("xfrm-type-%d-%d",
  96. (int) family, (int) proto);
  97. modload_attempted = 1;
  98. goto retry;
  99. }
  100. xfrm_policy_put_afinfo(afinfo);
  101. return type;
  102. }
  103. EXPORT_SYMBOL(xfrm_get_type);
  104. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  105. unsigned short family)
  106. {
  107. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  108. int err = 0;
  109. if (unlikely(afinfo == NULL))
  110. return -EAFNOSUPPORT;
  111. if (likely(afinfo->dst_lookup != NULL))
  112. err = afinfo->dst_lookup(dst, fl);
  113. else
  114. err = -EINVAL;
  115. xfrm_policy_put_afinfo(afinfo);
  116. return err;
  117. }
  118. EXPORT_SYMBOL(xfrm_dst_lookup);
  119. void xfrm_put_type(struct xfrm_type *type)
  120. {
  121. module_put(type->owner);
  122. }
  123. static inline unsigned long make_jiffies(long secs)
  124. {
  125. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  126. return MAX_SCHEDULE_TIMEOUT-1;
  127. else
  128. return secs*HZ;
  129. }
  130. static void xfrm_policy_timer(unsigned long data)
  131. {
  132. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  133. unsigned long now = (unsigned long)xtime.tv_sec;
  134. long next = LONG_MAX;
  135. int warn = 0;
  136. int dir;
  137. read_lock(&xp->lock);
  138. if (xp->dead)
  139. goto out;
  140. dir = xp->index & 7;
  141. if (xp->lft.hard_add_expires_seconds) {
  142. long tmo = xp->lft.hard_add_expires_seconds +
  143. xp->curlft.add_time - now;
  144. if (tmo <= 0)
  145. goto expired;
  146. if (tmo < next)
  147. next = tmo;
  148. }
  149. if (xp->lft.hard_use_expires_seconds) {
  150. long tmo = xp->lft.hard_use_expires_seconds +
  151. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  152. if (tmo <= 0)
  153. goto expired;
  154. if (tmo < next)
  155. next = tmo;
  156. }
  157. if (xp->lft.soft_add_expires_seconds) {
  158. long tmo = xp->lft.soft_add_expires_seconds +
  159. xp->curlft.add_time - now;
  160. if (tmo <= 0) {
  161. warn = 1;
  162. tmo = XFRM_KM_TIMEOUT;
  163. }
  164. if (tmo < next)
  165. next = tmo;
  166. }
  167. if (xp->lft.soft_use_expires_seconds) {
  168. long tmo = xp->lft.soft_use_expires_seconds +
  169. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  170. if (tmo <= 0) {
  171. warn = 1;
  172. tmo = XFRM_KM_TIMEOUT;
  173. }
  174. if (tmo < next)
  175. next = tmo;
  176. }
  177. if (warn)
  178. km_policy_expired(xp, dir, 0);
  179. if (next != LONG_MAX &&
  180. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  181. xfrm_pol_hold(xp);
  182. out:
  183. read_unlock(&xp->lock);
  184. xfrm_pol_put(xp);
  185. return;
  186. expired:
  187. read_unlock(&xp->lock);
  188. if (!xfrm_policy_delete(xp, dir))
  189. km_policy_expired(xp, dir, 1);
  190. xfrm_pol_put(xp);
  191. }
  192. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  193. * SPD calls.
  194. */
  195. struct xfrm_policy *xfrm_policy_alloc(int gfp)
  196. {
  197. struct xfrm_policy *policy;
  198. policy = kmalloc(sizeof(struct xfrm_policy), gfp);
  199. if (policy) {
  200. memset(policy, 0, sizeof(struct xfrm_policy));
  201. atomic_set(&policy->refcnt, 1);
  202. rwlock_init(&policy->lock);
  203. init_timer(&policy->timer);
  204. policy->timer.data = (unsigned long)policy;
  205. policy->timer.function = xfrm_policy_timer;
  206. }
  207. return policy;
  208. }
  209. EXPORT_SYMBOL(xfrm_policy_alloc);
  210. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  211. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  212. {
  213. if (!policy->dead)
  214. BUG();
  215. if (policy->bundles)
  216. BUG();
  217. if (del_timer(&policy->timer))
  218. BUG();
  219. kfree(policy);
  220. }
  221. EXPORT_SYMBOL(__xfrm_policy_destroy);
  222. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  223. {
  224. struct dst_entry *dst;
  225. while ((dst = policy->bundles) != NULL) {
  226. policy->bundles = dst->next;
  227. dst_free(dst);
  228. }
  229. if (del_timer(&policy->timer))
  230. atomic_dec(&policy->refcnt);
  231. if (atomic_read(&policy->refcnt) > 1)
  232. flow_cache_flush();
  233. xfrm_pol_put(policy);
  234. }
  235. static void xfrm_policy_gc_task(void *data)
  236. {
  237. struct xfrm_policy *policy;
  238. struct list_head *entry, *tmp;
  239. struct list_head gc_list = LIST_HEAD_INIT(gc_list);
  240. spin_lock_bh(&xfrm_policy_gc_lock);
  241. list_splice_init(&xfrm_policy_gc_list, &gc_list);
  242. spin_unlock_bh(&xfrm_policy_gc_lock);
  243. list_for_each_safe(entry, tmp, &gc_list) {
  244. policy = list_entry(entry, struct xfrm_policy, list);
  245. xfrm_policy_gc_kill(policy);
  246. }
  247. }
  248. /* Rule must be locked. Release descentant resources, announce
  249. * entry dead. The rule must be unlinked from lists to the moment.
  250. */
  251. static void xfrm_policy_kill(struct xfrm_policy *policy)
  252. {
  253. int dead;
  254. write_lock_bh(&policy->lock);
  255. dead = policy->dead;
  256. policy->dead = 1;
  257. write_unlock_bh(&policy->lock);
  258. if (unlikely(dead)) {
  259. WARN_ON(1);
  260. return;
  261. }
  262. spin_lock(&xfrm_policy_gc_lock);
  263. list_add(&policy->list, &xfrm_policy_gc_list);
  264. spin_unlock(&xfrm_policy_gc_lock);
  265. schedule_work(&xfrm_policy_gc_work);
  266. }
  267. /* Generate new index... KAME seems to generate them ordered by cost
  268. * of an absolute inpredictability of ordering of rules. This will not pass. */
  269. static u32 xfrm_gen_index(int dir)
  270. {
  271. u32 idx;
  272. struct xfrm_policy *p;
  273. static u32 idx_generator;
  274. for (;;) {
  275. idx = (idx_generator | dir);
  276. idx_generator += 8;
  277. if (idx == 0)
  278. idx = 8;
  279. for (p = xfrm_policy_list[dir]; p; p = p->next) {
  280. if (p->index == idx)
  281. break;
  282. }
  283. if (!p)
  284. return idx;
  285. }
  286. }
  287. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  288. {
  289. struct xfrm_policy *pol, **p;
  290. struct xfrm_policy *delpol = NULL;
  291. struct xfrm_policy **newpos = NULL;
  292. write_lock_bh(&xfrm_policy_lock);
  293. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
  294. if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) {
  295. if (excl) {
  296. write_unlock_bh(&xfrm_policy_lock);
  297. return -EEXIST;
  298. }
  299. *p = pol->next;
  300. delpol = pol;
  301. if (policy->priority > pol->priority)
  302. continue;
  303. } else if (policy->priority >= pol->priority) {
  304. p = &pol->next;
  305. continue;
  306. }
  307. if (!newpos)
  308. newpos = p;
  309. if (delpol)
  310. break;
  311. p = &pol->next;
  312. }
  313. if (newpos)
  314. p = newpos;
  315. xfrm_pol_hold(policy);
  316. policy->next = *p;
  317. *p = policy;
  318. atomic_inc(&flow_cache_genid);
  319. policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
  320. policy->curlft.add_time = (unsigned long)xtime.tv_sec;
  321. policy->curlft.use_time = 0;
  322. if (!mod_timer(&policy->timer, jiffies + HZ))
  323. xfrm_pol_hold(policy);
  324. write_unlock_bh(&xfrm_policy_lock);
  325. if (delpol) {
  326. xfrm_policy_kill(delpol);
  327. }
  328. return 0;
  329. }
  330. EXPORT_SYMBOL(xfrm_policy_insert);
  331. struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
  332. int delete)
  333. {
  334. struct xfrm_policy *pol, **p;
  335. write_lock_bh(&xfrm_policy_lock);
  336. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
  337. if (memcmp(sel, &pol->selector, sizeof(*sel)) == 0) {
  338. xfrm_pol_hold(pol);
  339. if (delete)
  340. *p = pol->next;
  341. break;
  342. }
  343. }
  344. write_unlock_bh(&xfrm_policy_lock);
  345. if (pol && delete) {
  346. atomic_inc(&flow_cache_genid);
  347. xfrm_policy_kill(pol);
  348. }
  349. return pol;
  350. }
  351. EXPORT_SYMBOL(xfrm_policy_bysel);
  352. struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
  353. {
  354. struct xfrm_policy *pol, **p;
  355. write_lock_bh(&xfrm_policy_lock);
  356. for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) {
  357. if (pol->index == id) {
  358. xfrm_pol_hold(pol);
  359. if (delete)
  360. *p = pol->next;
  361. break;
  362. }
  363. }
  364. write_unlock_bh(&xfrm_policy_lock);
  365. if (pol && delete) {
  366. atomic_inc(&flow_cache_genid);
  367. xfrm_policy_kill(pol);
  368. }
  369. return pol;
  370. }
  371. EXPORT_SYMBOL(xfrm_policy_byid);
  372. void xfrm_policy_flush(void)
  373. {
  374. struct xfrm_policy *xp;
  375. int dir;
  376. write_lock_bh(&xfrm_policy_lock);
  377. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  378. while ((xp = xfrm_policy_list[dir]) != NULL) {
  379. xfrm_policy_list[dir] = xp->next;
  380. write_unlock_bh(&xfrm_policy_lock);
  381. xfrm_policy_kill(xp);
  382. write_lock_bh(&xfrm_policy_lock);
  383. }
  384. }
  385. atomic_inc(&flow_cache_genid);
  386. write_unlock_bh(&xfrm_policy_lock);
  387. }
  388. EXPORT_SYMBOL(xfrm_policy_flush);
  389. int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
  390. void *data)
  391. {
  392. struct xfrm_policy *xp;
  393. int dir;
  394. int count = 0;
  395. int error = 0;
  396. read_lock_bh(&xfrm_policy_lock);
  397. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  398. for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
  399. count++;
  400. }
  401. if (count == 0) {
  402. error = -ENOENT;
  403. goto out;
  404. }
  405. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  406. for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
  407. error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
  408. if (error)
  409. goto out;
  410. }
  411. }
  412. out:
  413. read_unlock_bh(&xfrm_policy_lock);
  414. return error;
  415. }
  416. EXPORT_SYMBOL(xfrm_policy_walk);
  417. /* Find policy to apply to this flow. */
  418. static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  419. void **objp, atomic_t **obj_refp)
  420. {
  421. struct xfrm_policy *pol;
  422. read_lock_bh(&xfrm_policy_lock);
  423. for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
  424. struct xfrm_selector *sel = &pol->selector;
  425. int match;
  426. if (pol->family != family)
  427. continue;
  428. match = xfrm_selector_match(sel, fl, family);
  429. if (match) {
  430. xfrm_pol_hold(pol);
  431. break;
  432. }
  433. }
  434. read_unlock_bh(&xfrm_policy_lock);
  435. if ((*objp = (void *) pol) != NULL)
  436. *obj_refp = &pol->refcnt;
  437. }
  438. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  439. {
  440. struct xfrm_policy *pol;
  441. read_lock_bh(&xfrm_policy_lock);
  442. if ((pol = sk->sk_policy[dir]) != NULL) {
  443. int match = xfrm_selector_match(&pol->selector, fl,
  444. sk->sk_family);
  445. if (match)
  446. xfrm_pol_hold(pol);
  447. else
  448. pol = NULL;
  449. }
  450. read_unlock_bh(&xfrm_policy_lock);
  451. return pol;
  452. }
  453. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  454. {
  455. pol->next = xfrm_policy_list[dir];
  456. xfrm_policy_list[dir] = pol;
  457. xfrm_pol_hold(pol);
  458. }
  459. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  460. int dir)
  461. {
  462. struct xfrm_policy **polp;
  463. for (polp = &xfrm_policy_list[dir];
  464. *polp != NULL; polp = &(*polp)->next) {
  465. if (*polp == pol) {
  466. *polp = pol->next;
  467. return pol;
  468. }
  469. }
  470. return NULL;
  471. }
  472. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  473. {
  474. write_lock_bh(&xfrm_policy_lock);
  475. pol = __xfrm_policy_unlink(pol, dir);
  476. write_unlock_bh(&xfrm_policy_lock);
  477. if (pol) {
  478. if (dir < XFRM_POLICY_MAX)
  479. atomic_inc(&flow_cache_genid);
  480. xfrm_policy_kill(pol);
  481. return 0;
  482. }
  483. return -ENOENT;
  484. }
  485. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  486. {
  487. struct xfrm_policy *old_pol;
  488. write_lock_bh(&xfrm_policy_lock);
  489. old_pol = sk->sk_policy[dir];
  490. sk->sk_policy[dir] = pol;
  491. if (pol) {
  492. pol->curlft.add_time = (unsigned long)xtime.tv_sec;
  493. pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
  494. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  495. }
  496. if (old_pol)
  497. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  498. write_unlock_bh(&xfrm_policy_lock);
  499. if (old_pol) {
  500. xfrm_policy_kill(old_pol);
  501. }
  502. return 0;
  503. }
  504. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  505. {
  506. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  507. if (newp) {
  508. newp->selector = old->selector;
  509. newp->lft = old->lft;
  510. newp->curlft = old->curlft;
  511. newp->action = old->action;
  512. newp->flags = old->flags;
  513. newp->xfrm_nr = old->xfrm_nr;
  514. newp->index = old->index;
  515. memcpy(newp->xfrm_vec, old->xfrm_vec,
  516. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  517. write_lock_bh(&xfrm_policy_lock);
  518. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  519. write_unlock_bh(&xfrm_policy_lock);
  520. xfrm_pol_put(newp);
  521. }
  522. return newp;
  523. }
  524. int __xfrm_sk_clone_policy(struct sock *sk)
  525. {
  526. struct xfrm_policy *p0 = sk->sk_policy[0],
  527. *p1 = sk->sk_policy[1];
  528. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  529. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  530. return -ENOMEM;
  531. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  532. return -ENOMEM;
  533. return 0;
  534. }
  535. /* Resolve list of templates for the flow, given policy. */
  536. static int
  537. xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
  538. struct xfrm_state **xfrm,
  539. unsigned short family)
  540. {
  541. int nx;
  542. int i, error;
  543. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  544. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  545. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  546. struct xfrm_state *x;
  547. xfrm_address_t *remote = daddr;
  548. xfrm_address_t *local = saddr;
  549. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  550. if (tmpl->mode) {
  551. remote = &tmpl->id.daddr;
  552. local = &tmpl->saddr;
  553. }
  554. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  555. if (x && x->km.state == XFRM_STATE_VALID) {
  556. xfrm[nx++] = x;
  557. daddr = remote;
  558. saddr = local;
  559. continue;
  560. }
  561. if (x) {
  562. error = (x->km.state == XFRM_STATE_ERROR ?
  563. -EINVAL : -EAGAIN);
  564. xfrm_state_put(x);
  565. }
  566. if (!tmpl->optional)
  567. goto fail;
  568. }
  569. return nx;
  570. fail:
  571. for (nx--; nx>=0; nx--)
  572. xfrm_state_put(xfrm[nx]);
  573. return error;
  574. }
  575. /* Check that the bundle accepts the flow and its components are
  576. * still valid.
  577. */
  578. static struct dst_entry *
  579. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  580. {
  581. struct dst_entry *x;
  582. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  583. if (unlikely(afinfo == NULL))
  584. return ERR_PTR(-EINVAL);
  585. x = afinfo->find_bundle(fl, policy);
  586. xfrm_policy_put_afinfo(afinfo);
  587. return x;
  588. }
  589. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  590. * all the metrics... Shortly, bundle a bundle.
  591. */
  592. static int
  593. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  594. struct flowi *fl, struct dst_entry **dst_p,
  595. unsigned short family)
  596. {
  597. int err;
  598. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  599. if (unlikely(afinfo == NULL))
  600. return -EINVAL;
  601. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  602. xfrm_policy_put_afinfo(afinfo);
  603. return err;
  604. }
  605. static inline int policy_to_flow_dir(int dir)
  606. {
  607. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  608. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  609. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  610. return dir;
  611. switch (dir) {
  612. default:
  613. case XFRM_POLICY_IN:
  614. return FLOW_DIR_IN;
  615. case XFRM_POLICY_OUT:
  616. return FLOW_DIR_OUT;
  617. case XFRM_POLICY_FWD:
  618. return FLOW_DIR_FWD;
  619. };
  620. }
  621. static int stale_bundle(struct dst_entry *dst);
  622. /* Main function: finds/creates a bundle for given flow.
  623. *
  624. * At the moment we eat a raw IP route. Mostly to speed up lookups
  625. * on interfaces with disabled IPsec.
  626. */
  627. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  628. struct sock *sk, int flags)
  629. {
  630. struct xfrm_policy *policy;
  631. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  632. struct dst_entry *dst, *dst_orig = *dst_p;
  633. int nx = 0;
  634. int err;
  635. u32 genid;
  636. u16 family = dst_orig->ops->family;
  637. restart:
  638. genid = atomic_read(&flow_cache_genid);
  639. policy = NULL;
  640. if (sk && sk->sk_policy[1])
  641. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  642. if (!policy) {
  643. /* To accelerate a bit... */
  644. if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
  645. return 0;
  646. policy = flow_cache_lookup(fl, family,
  647. policy_to_flow_dir(XFRM_POLICY_OUT),
  648. xfrm_policy_lookup);
  649. }
  650. if (!policy)
  651. return 0;
  652. policy->curlft.use_time = (unsigned long)xtime.tv_sec;
  653. switch (policy->action) {
  654. case XFRM_POLICY_BLOCK:
  655. /* Prohibit the flow */
  656. xfrm_pol_put(policy);
  657. return -EPERM;
  658. case XFRM_POLICY_ALLOW:
  659. if (policy->xfrm_nr == 0) {
  660. /* Flow passes not transformed. */
  661. xfrm_pol_put(policy);
  662. return 0;
  663. }
  664. /* Try to find matching bundle.
  665. *
  666. * LATER: help from flow cache. It is optional, this
  667. * is required only for output policy.
  668. */
  669. dst = xfrm_find_bundle(fl, policy, family);
  670. if (IS_ERR(dst)) {
  671. xfrm_pol_put(policy);
  672. return PTR_ERR(dst);
  673. }
  674. if (dst)
  675. break;
  676. nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
  677. if (unlikely(nx<0)) {
  678. err = nx;
  679. if (err == -EAGAIN && flags) {
  680. DECLARE_WAITQUEUE(wait, current);
  681. add_wait_queue(&km_waitq, &wait);
  682. set_current_state(TASK_INTERRUPTIBLE);
  683. schedule();
  684. set_current_state(TASK_RUNNING);
  685. remove_wait_queue(&km_waitq, &wait);
  686. nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
  687. if (nx == -EAGAIN && signal_pending(current)) {
  688. err = -ERESTART;
  689. goto error;
  690. }
  691. if (nx == -EAGAIN ||
  692. genid != atomic_read(&flow_cache_genid)) {
  693. xfrm_pol_put(policy);
  694. goto restart;
  695. }
  696. err = nx;
  697. }
  698. if (err < 0)
  699. goto error;
  700. }
  701. if (nx == 0) {
  702. /* Flow passes not transformed. */
  703. xfrm_pol_put(policy);
  704. return 0;
  705. }
  706. dst = dst_orig;
  707. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  708. if (unlikely(err)) {
  709. int i;
  710. for (i=0; i<nx; i++)
  711. xfrm_state_put(xfrm[i]);
  712. goto error;
  713. }
  714. write_lock_bh(&policy->lock);
  715. if (unlikely(policy->dead || stale_bundle(dst))) {
  716. /* Wow! While we worked on resolving, this
  717. * policy has gone. Retry. It is not paranoia,
  718. * we just cannot enlist new bundle to dead object.
  719. * We can't enlist stable bundles either.
  720. */
  721. write_unlock_bh(&policy->lock);
  722. xfrm_pol_put(policy);
  723. if (dst)
  724. dst_free(dst);
  725. goto restart;
  726. }
  727. dst->next = policy->bundles;
  728. policy->bundles = dst;
  729. dst_hold(dst);
  730. write_unlock_bh(&policy->lock);
  731. }
  732. *dst_p = dst;
  733. dst_release(dst_orig);
  734. xfrm_pol_put(policy);
  735. return 0;
  736. error:
  737. dst_release(dst_orig);
  738. xfrm_pol_put(policy);
  739. *dst_p = NULL;
  740. return err;
  741. }
  742. EXPORT_SYMBOL(xfrm_lookup);
  743. /* When skb is transformed back to its "native" form, we have to
  744. * check policy restrictions. At the moment we make this in maximally
  745. * stupid way. Shame on me. :-) Of course, connected sockets must
  746. * have policy cached at them.
  747. */
  748. static inline int
  749. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  750. unsigned short family)
  751. {
  752. if (xfrm_state_kern(x))
  753. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
  754. return x->id.proto == tmpl->id.proto &&
  755. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  756. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  757. x->props.mode == tmpl->mode &&
  758. (tmpl->aalgos & (1<<x->props.aalgo)) &&
  759. !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
  760. }
  761. static inline int
  762. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  763. unsigned short family)
  764. {
  765. int idx = start;
  766. if (tmpl->optional) {
  767. if (!tmpl->mode)
  768. return start;
  769. } else
  770. start = -1;
  771. for (; idx < sp->len; idx++) {
  772. if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
  773. return ++idx;
  774. if (sp->x[idx].xvec->props.mode)
  775. break;
  776. }
  777. return start;
  778. }
  779. static int
  780. _decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  781. {
  782. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  783. if (unlikely(afinfo == NULL))
  784. return -EAFNOSUPPORT;
  785. afinfo->decode_session(skb, fl);
  786. xfrm_policy_put_afinfo(afinfo);
  787. return 0;
  788. }
  789. static inline int secpath_has_tunnel(struct sec_path *sp, int k)
  790. {
  791. for (; k < sp->len; k++) {
  792. if (sp->x[k].xvec->props.mode)
  793. return 1;
  794. }
  795. return 0;
  796. }
  797. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  798. unsigned short family)
  799. {
  800. struct xfrm_policy *pol;
  801. struct flowi fl;
  802. if (_decode_session(skb, &fl, family) < 0)
  803. return 0;
  804. /* First, check used SA against their selectors. */
  805. if (skb->sp) {
  806. int i;
  807. for (i=skb->sp->len-1; i>=0; i--) {
  808. struct sec_decap_state *xvec = &(skb->sp->x[i]);
  809. if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
  810. return 0;
  811. /* If there is a post_input processor, try running it */
  812. if (xvec->xvec->type->post_input &&
  813. (xvec->xvec->type->post_input)(xvec->xvec,
  814. &(xvec->decap),
  815. skb) != 0)
  816. return 0;
  817. }
  818. }
  819. pol = NULL;
  820. if (sk && sk->sk_policy[dir])
  821. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  822. if (!pol)
  823. pol = flow_cache_lookup(&fl, family,
  824. policy_to_flow_dir(dir),
  825. xfrm_policy_lookup);
  826. if (!pol)
  827. return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
  828. pol->curlft.use_time = (unsigned long)xtime.tv_sec;
  829. if (pol->action == XFRM_POLICY_ALLOW) {
  830. struct sec_path *sp;
  831. static struct sec_path dummy;
  832. int i, k;
  833. if ((sp = skb->sp) == NULL)
  834. sp = &dummy;
  835. /* For each tunnel xfrm, find the first matching tmpl.
  836. * For each tmpl before that, find corresponding xfrm.
  837. * Order is _important_. Later we will implement
  838. * some barriers, but at the moment barriers
  839. * are implied between each two transformations.
  840. */
  841. for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
  842. k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
  843. if (k < 0)
  844. goto reject;
  845. }
  846. if (secpath_has_tunnel(sp, k))
  847. goto reject;
  848. xfrm_pol_put(pol);
  849. return 1;
  850. }
  851. reject:
  852. xfrm_pol_put(pol);
  853. return 0;
  854. }
  855. EXPORT_SYMBOL(__xfrm_policy_check);
  856. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  857. {
  858. struct flowi fl;
  859. if (_decode_session(skb, &fl, family) < 0)
  860. return 0;
  861. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  862. }
  863. EXPORT_SYMBOL(__xfrm_route_forward);
  864. /* Optimize later using cookies and generation ids. */
  865. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  866. {
  867. if (!stale_bundle(dst))
  868. return dst;
  869. return NULL;
  870. }
  871. static int stale_bundle(struct dst_entry *dst)
  872. {
  873. return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
  874. }
  875. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  876. {
  877. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  878. dst->dev = &loopback_dev;
  879. dev_hold(&loopback_dev);
  880. dev_put(dev);
  881. }
  882. }
  883. EXPORT_SYMBOL(xfrm_dst_ifdown);
  884. static void xfrm_link_failure(struct sk_buff *skb)
  885. {
  886. /* Impossible. Such dst must be popped before reaches point of failure. */
  887. return;
  888. }
  889. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  890. {
  891. if (dst) {
  892. if (dst->obsolete) {
  893. dst_release(dst);
  894. dst = NULL;
  895. }
  896. }
  897. return dst;
  898. }
  899. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  900. {
  901. int i;
  902. struct xfrm_policy *pol;
  903. struct dst_entry *dst, **dstp, *gc_list = NULL;
  904. read_lock_bh(&xfrm_policy_lock);
  905. for (i=0; i<2*XFRM_POLICY_MAX; i++) {
  906. for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
  907. write_lock(&pol->lock);
  908. dstp = &pol->bundles;
  909. while ((dst=*dstp) != NULL) {
  910. if (func(dst)) {
  911. *dstp = dst->next;
  912. dst->next = gc_list;
  913. gc_list = dst;
  914. } else {
  915. dstp = &dst->next;
  916. }
  917. }
  918. write_unlock(&pol->lock);
  919. }
  920. }
  921. read_unlock_bh(&xfrm_policy_lock);
  922. while (gc_list) {
  923. dst = gc_list;
  924. gc_list = dst->next;
  925. dst_free(dst);
  926. }
  927. }
  928. static int unused_bundle(struct dst_entry *dst)
  929. {
  930. return !atomic_read(&dst->__refcnt);
  931. }
  932. static void __xfrm_garbage_collect(void)
  933. {
  934. xfrm_prune_bundles(unused_bundle);
  935. }
  936. int xfrm_flush_bundles(void)
  937. {
  938. xfrm_prune_bundles(stale_bundle);
  939. return 0;
  940. }
  941. void xfrm_init_pmtu(struct dst_entry *dst)
  942. {
  943. do {
  944. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  945. u32 pmtu, route_mtu_cached;
  946. pmtu = dst_mtu(dst->child);
  947. xdst->child_mtu_cached = pmtu;
  948. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  949. route_mtu_cached = dst_mtu(xdst->route);
  950. xdst->route_mtu_cached = route_mtu_cached;
  951. if (pmtu > route_mtu_cached)
  952. pmtu = route_mtu_cached;
  953. dst->metrics[RTAX_MTU-1] = pmtu;
  954. } while ((dst = dst->next));
  955. }
  956. EXPORT_SYMBOL(xfrm_init_pmtu);
  957. /* Check that the bundle accepts the flow and its components are
  958. * still valid.
  959. */
  960. int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
  961. {
  962. struct dst_entry *dst = &first->u.dst;
  963. struct xfrm_dst *last;
  964. u32 mtu;
  965. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  966. (dst->dev && !netif_running(dst->dev)))
  967. return 0;
  968. last = NULL;
  969. do {
  970. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  971. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  972. return 0;
  973. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  974. return 0;
  975. mtu = dst_mtu(dst->child);
  976. if (xdst->child_mtu_cached != mtu) {
  977. last = xdst;
  978. xdst->child_mtu_cached = mtu;
  979. }
  980. if (!dst_check(xdst->route, xdst->route_cookie))
  981. return 0;
  982. mtu = dst_mtu(xdst->route);
  983. if (xdst->route_mtu_cached != mtu) {
  984. last = xdst;
  985. xdst->route_mtu_cached = mtu;
  986. }
  987. dst = dst->child;
  988. } while (dst->xfrm);
  989. if (likely(!last))
  990. return 1;
  991. mtu = last->child_mtu_cached;
  992. for (;;) {
  993. dst = &last->u.dst;
  994. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  995. if (mtu > last->route_mtu_cached)
  996. mtu = last->route_mtu_cached;
  997. dst->metrics[RTAX_MTU-1] = mtu;
  998. if (last == first)
  999. break;
  1000. last = last->u.next;
  1001. last->child_mtu_cached = mtu;
  1002. }
  1003. return 1;
  1004. }
  1005. EXPORT_SYMBOL(xfrm_bundle_ok);
  1006. /* Well... that's _TASK_. We need to scan through transformation
  1007. * list and figure out what mss tcp should generate in order to
  1008. * final datagram fit to mtu. Mama mia... :-)
  1009. *
  1010. * Apparently, some easy way exists, but we used to choose the most
  1011. * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
  1012. *
  1013. * Consider this function as something like dark humour. :-)
  1014. */
  1015. static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
  1016. {
  1017. int res = mtu - dst->header_len;
  1018. for (;;) {
  1019. struct dst_entry *d = dst;
  1020. int m = res;
  1021. do {
  1022. struct xfrm_state *x = d->xfrm;
  1023. if (x) {
  1024. spin_lock_bh(&x->lock);
  1025. if (x->km.state == XFRM_STATE_VALID &&
  1026. x->type && x->type->get_max_size)
  1027. m = x->type->get_max_size(d->xfrm, m);
  1028. else
  1029. m += x->props.header_len;
  1030. spin_unlock_bh(&x->lock);
  1031. }
  1032. } while ((d = d->child) != NULL);
  1033. if (m <= mtu)
  1034. break;
  1035. res -= (m - mtu);
  1036. if (res < 88)
  1037. return mtu;
  1038. }
  1039. return res + dst->header_len;
  1040. }
  1041. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1042. {
  1043. int err = 0;
  1044. if (unlikely(afinfo == NULL))
  1045. return -EINVAL;
  1046. if (unlikely(afinfo->family >= NPROTO))
  1047. return -EAFNOSUPPORT;
  1048. write_lock(&xfrm_policy_afinfo_lock);
  1049. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1050. err = -ENOBUFS;
  1051. else {
  1052. struct dst_ops *dst_ops = afinfo->dst_ops;
  1053. if (likely(dst_ops->kmem_cachep == NULL))
  1054. dst_ops->kmem_cachep = xfrm_dst_cache;
  1055. if (likely(dst_ops->check == NULL))
  1056. dst_ops->check = xfrm_dst_check;
  1057. if (likely(dst_ops->negative_advice == NULL))
  1058. dst_ops->negative_advice = xfrm_negative_advice;
  1059. if (likely(dst_ops->link_failure == NULL))
  1060. dst_ops->link_failure = xfrm_link_failure;
  1061. if (likely(dst_ops->get_mss == NULL))
  1062. dst_ops->get_mss = xfrm_get_mss;
  1063. if (likely(afinfo->garbage_collect == NULL))
  1064. afinfo->garbage_collect = __xfrm_garbage_collect;
  1065. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1066. }
  1067. write_unlock(&xfrm_policy_afinfo_lock);
  1068. return err;
  1069. }
  1070. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1071. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1072. {
  1073. int err = 0;
  1074. if (unlikely(afinfo == NULL))
  1075. return -EINVAL;
  1076. if (unlikely(afinfo->family >= NPROTO))
  1077. return -EAFNOSUPPORT;
  1078. write_lock(&xfrm_policy_afinfo_lock);
  1079. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1080. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1081. err = -EINVAL;
  1082. else {
  1083. struct dst_ops *dst_ops = afinfo->dst_ops;
  1084. xfrm_policy_afinfo[afinfo->family] = NULL;
  1085. dst_ops->kmem_cachep = NULL;
  1086. dst_ops->check = NULL;
  1087. dst_ops->negative_advice = NULL;
  1088. dst_ops->link_failure = NULL;
  1089. dst_ops->get_mss = NULL;
  1090. afinfo->garbage_collect = NULL;
  1091. }
  1092. }
  1093. write_unlock(&xfrm_policy_afinfo_lock);
  1094. return err;
  1095. }
  1096. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1097. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1098. {
  1099. struct xfrm_policy_afinfo *afinfo;
  1100. if (unlikely(family >= NPROTO))
  1101. return NULL;
  1102. read_lock(&xfrm_policy_afinfo_lock);
  1103. afinfo = xfrm_policy_afinfo[family];
  1104. if (likely(afinfo != NULL))
  1105. read_lock(&afinfo->lock);
  1106. read_unlock(&xfrm_policy_afinfo_lock);
  1107. return afinfo;
  1108. }
  1109. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1110. {
  1111. if (unlikely(afinfo == NULL))
  1112. return;
  1113. read_unlock(&afinfo->lock);
  1114. }
  1115. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1116. {
  1117. switch (event) {
  1118. case NETDEV_DOWN:
  1119. xfrm_flush_bundles();
  1120. }
  1121. return NOTIFY_DONE;
  1122. }
  1123. static struct notifier_block xfrm_dev_notifier = {
  1124. xfrm_dev_event,
  1125. NULL,
  1126. 0
  1127. };
  1128. static void __init xfrm_policy_init(void)
  1129. {
  1130. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1131. sizeof(struct xfrm_dst),
  1132. 0, SLAB_HWCACHE_ALIGN,
  1133. NULL, NULL);
  1134. if (!xfrm_dst_cache)
  1135. panic("XFRM: failed to allocate xfrm_dst_cache\n");
  1136. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
  1137. register_netdevice_notifier(&xfrm_dev_notifier);
  1138. }
  1139. void __init xfrm_init(void)
  1140. {
  1141. xfrm_state_init();
  1142. xfrm_policy_init();
  1143. xfrm_input_init();
  1144. }