xfrm_policy.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <asm/bug.h>
  16. #include <linux/config.h>
  17. #include <linux/slab.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/notifier.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/module.h>
  25. #include <net/xfrm.h>
  26. #include <net/ip.h>
  27. DECLARE_MUTEX(xfrm_cfg_sem);
  28. EXPORT_SYMBOL(xfrm_cfg_sem);
  29. static DEFINE_RWLOCK(xfrm_policy_lock);
  30. struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
  31. EXPORT_SYMBOL(xfrm_policy_list);
  32. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  33. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  34. static kmem_cache_t *xfrm_dst_cache __read_mostly;
  35. static struct work_struct xfrm_policy_gc_work;
  36. static struct list_head xfrm_policy_gc_list =
  37. LIST_HEAD_INIT(xfrm_policy_gc_list);
  38. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  39. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  40. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  41. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  42. {
  43. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  44. struct xfrm_type_map *typemap;
  45. int err = 0;
  46. if (unlikely(afinfo == NULL))
  47. return -EAFNOSUPPORT;
  48. typemap = afinfo->type_map;
  49. write_lock(&typemap->lock);
  50. if (likely(typemap->map[type->proto] == NULL))
  51. typemap->map[type->proto] = type;
  52. else
  53. err = -EEXIST;
  54. write_unlock(&typemap->lock);
  55. xfrm_policy_put_afinfo(afinfo);
  56. return err;
  57. }
  58. EXPORT_SYMBOL(xfrm_register_type);
  59. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  60. {
  61. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  62. struct xfrm_type_map *typemap;
  63. int err = 0;
  64. if (unlikely(afinfo == NULL))
  65. return -EAFNOSUPPORT;
  66. typemap = afinfo->type_map;
  67. write_lock(&typemap->lock);
  68. if (unlikely(typemap->map[type->proto] != type))
  69. err = -ENOENT;
  70. else
  71. typemap->map[type->proto] = NULL;
  72. write_unlock(&typemap->lock);
  73. xfrm_policy_put_afinfo(afinfo);
  74. return err;
  75. }
  76. EXPORT_SYMBOL(xfrm_unregister_type);
  77. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  78. {
  79. struct xfrm_policy_afinfo *afinfo;
  80. struct xfrm_type_map *typemap;
  81. struct xfrm_type *type;
  82. int modload_attempted = 0;
  83. retry:
  84. afinfo = xfrm_policy_get_afinfo(family);
  85. if (unlikely(afinfo == NULL))
  86. return NULL;
  87. typemap = afinfo->type_map;
  88. read_lock(&typemap->lock);
  89. type = typemap->map[proto];
  90. if (unlikely(type && !try_module_get(type->owner)))
  91. type = NULL;
  92. read_unlock(&typemap->lock);
  93. if (!type && !modload_attempted) {
  94. xfrm_policy_put_afinfo(afinfo);
  95. request_module("xfrm-type-%d-%d",
  96. (int) family, (int) proto);
  97. modload_attempted = 1;
  98. goto retry;
  99. }
  100. xfrm_policy_put_afinfo(afinfo);
  101. return type;
  102. }
  103. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  104. unsigned short family)
  105. {
  106. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  107. int err = 0;
  108. if (unlikely(afinfo == NULL))
  109. return -EAFNOSUPPORT;
  110. if (likely(afinfo->dst_lookup != NULL))
  111. err = afinfo->dst_lookup(dst, fl);
  112. else
  113. err = -EINVAL;
  114. xfrm_policy_put_afinfo(afinfo);
  115. return err;
  116. }
  117. EXPORT_SYMBOL(xfrm_dst_lookup);
  118. void xfrm_put_type(struct xfrm_type *type)
  119. {
  120. module_put(type->owner);
  121. }
  122. static inline unsigned long make_jiffies(long secs)
  123. {
  124. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  125. return MAX_SCHEDULE_TIMEOUT-1;
  126. else
  127. return secs*HZ;
  128. }
  129. static void xfrm_policy_timer(unsigned long data)
  130. {
  131. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  132. unsigned long now = (unsigned long)xtime.tv_sec;
  133. long next = LONG_MAX;
  134. int warn = 0;
  135. int dir;
  136. read_lock(&xp->lock);
  137. if (xp->dead)
  138. goto out;
  139. dir = xfrm_policy_id2dir(xp->index);
  140. if (xp->lft.hard_add_expires_seconds) {
  141. long tmo = xp->lft.hard_add_expires_seconds +
  142. xp->curlft.add_time - now;
  143. if (tmo <= 0)
  144. goto expired;
  145. if (tmo < next)
  146. next = tmo;
  147. }
  148. if (xp->lft.hard_use_expires_seconds) {
  149. long tmo = xp->lft.hard_use_expires_seconds +
  150. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  151. if (tmo <= 0)
  152. goto expired;
  153. if (tmo < next)
  154. next = tmo;
  155. }
  156. if (xp->lft.soft_add_expires_seconds) {
  157. long tmo = xp->lft.soft_add_expires_seconds +
  158. xp->curlft.add_time - now;
  159. if (tmo <= 0) {
  160. warn = 1;
  161. tmo = XFRM_KM_TIMEOUT;
  162. }
  163. if (tmo < next)
  164. next = tmo;
  165. }
  166. if (xp->lft.soft_use_expires_seconds) {
  167. long tmo = xp->lft.soft_use_expires_seconds +
  168. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  169. if (tmo <= 0) {
  170. warn = 1;
  171. tmo = XFRM_KM_TIMEOUT;
  172. }
  173. if (tmo < next)
  174. next = tmo;
  175. }
  176. if (warn)
  177. km_policy_expired(xp, dir, 0);
  178. if (next != LONG_MAX &&
  179. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  180. xfrm_pol_hold(xp);
  181. out:
  182. read_unlock(&xp->lock);
  183. xfrm_pol_put(xp);
  184. return;
  185. expired:
  186. read_unlock(&xp->lock);
  187. if (!xfrm_policy_delete(xp, dir))
  188. km_policy_expired(xp, dir, 1);
  189. xfrm_pol_put(xp);
  190. }
  191. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  192. * SPD calls.
  193. */
  194. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  195. {
  196. struct xfrm_policy *policy;
  197. policy = kmalloc(sizeof(struct xfrm_policy), gfp);
  198. if (policy) {
  199. memset(policy, 0, sizeof(struct xfrm_policy));
  200. atomic_set(&policy->refcnt, 1);
  201. rwlock_init(&policy->lock);
  202. init_timer(&policy->timer);
  203. policy->timer.data = (unsigned long)policy;
  204. policy->timer.function = xfrm_policy_timer;
  205. }
  206. return policy;
  207. }
  208. EXPORT_SYMBOL(xfrm_policy_alloc);
  209. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  210. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  211. {
  212. if (!policy->dead)
  213. BUG();
  214. if (policy->bundles)
  215. BUG();
  216. if (del_timer(&policy->timer))
  217. BUG();
  218. kfree(policy);
  219. }
  220. EXPORT_SYMBOL(__xfrm_policy_destroy);
  221. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  222. {
  223. struct dst_entry *dst;
  224. while ((dst = policy->bundles) != NULL) {
  225. policy->bundles = dst->next;
  226. dst_free(dst);
  227. }
  228. if (del_timer(&policy->timer))
  229. atomic_dec(&policy->refcnt);
  230. if (atomic_read(&policy->refcnt) > 1)
  231. flow_cache_flush();
  232. xfrm_pol_put(policy);
  233. }
  234. static void xfrm_policy_gc_task(void *data)
  235. {
  236. struct xfrm_policy *policy;
  237. struct list_head *entry, *tmp;
  238. struct list_head gc_list = LIST_HEAD_INIT(gc_list);
  239. spin_lock_bh(&xfrm_policy_gc_lock);
  240. list_splice_init(&xfrm_policy_gc_list, &gc_list);
  241. spin_unlock_bh(&xfrm_policy_gc_lock);
  242. list_for_each_safe(entry, tmp, &gc_list) {
  243. policy = list_entry(entry, struct xfrm_policy, list);
  244. xfrm_policy_gc_kill(policy);
  245. }
  246. }
  247. /* Rule must be locked. Release descentant resources, announce
  248. * entry dead. The rule must be unlinked from lists to the moment.
  249. */
  250. static void xfrm_policy_kill(struct xfrm_policy *policy)
  251. {
  252. int dead;
  253. write_lock_bh(&policy->lock);
  254. dead = policy->dead;
  255. policy->dead = 1;
  256. write_unlock_bh(&policy->lock);
  257. if (unlikely(dead)) {
  258. WARN_ON(1);
  259. return;
  260. }
  261. spin_lock(&xfrm_policy_gc_lock);
  262. list_add(&policy->list, &xfrm_policy_gc_list);
  263. spin_unlock(&xfrm_policy_gc_lock);
  264. schedule_work(&xfrm_policy_gc_work);
  265. }
  266. /* Generate new index... KAME seems to generate them ordered by cost
  267. * of an absolute inpredictability of ordering of rules. This will not pass. */
  268. static u32 xfrm_gen_index(int dir)
  269. {
  270. u32 idx;
  271. struct xfrm_policy *p;
  272. static u32 idx_generator;
  273. for (;;) {
  274. idx = (idx_generator | dir);
  275. idx_generator += 8;
  276. if (idx == 0)
  277. idx = 8;
  278. for (p = xfrm_policy_list[dir]; p; p = p->next) {
  279. if (p->index == idx)
  280. break;
  281. }
  282. if (!p)
  283. return idx;
  284. }
  285. }
  286. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  287. {
  288. struct xfrm_policy *pol, **p;
  289. struct xfrm_policy *delpol = NULL;
  290. struct xfrm_policy **newpos = NULL;
  291. struct dst_entry *gc_list;
  292. write_lock_bh(&xfrm_policy_lock);
  293. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
  294. if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) {
  295. if (excl) {
  296. write_unlock_bh(&xfrm_policy_lock);
  297. return -EEXIST;
  298. }
  299. *p = pol->next;
  300. delpol = pol;
  301. if (policy->priority > pol->priority)
  302. continue;
  303. } else if (policy->priority >= pol->priority) {
  304. p = &pol->next;
  305. continue;
  306. }
  307. if (!newpos)
  308. newpos = p;
  309. if (delpol)
  310. break;
  311. p = &pol->next;
  312. }
  313. if (newpos)
  314. p = newpos;
  315. xfrm_pol_hold(policy);
  316. policy->next = *p;
  317. *p = policy;
  318. atomic_inc(&flow_cache_genid);
  319. policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
  320. policy->curlft.add_time = (unsigned long)xtime.tv_sec;
  321. policy->curlft.use_time = 0;
  322. if (!mod_timer(&policy->timer, jiffies + HZ))
  323. xfrm_pol_hold(policy);
  324. write_unlock_bh(&xfrm_policy_lock);
  325. if (delpol)
  326. xfrm_policy_kill(delpol);
  327. read_lock_bh(&xfrm_policy_lock);
  328. gc_list = NULL;
  329. for (policy = policy->next; policy; policy = policy->next) {
  330. struct dst_entry *dst;
  331. write_lock(&policy->lock);
  332. dst = policy->bundles;
  333. if (dst) {
  334. struct dst_entry *tail = dst;
  335. while (tail->next)
  336. tail = tail->next;
  337. tail->next = gc_list;
  338. gc_list = dst;
  339. policy->bundles = NULL;
  340. }
  341. write_unlock(&policy->lock);
  342. }
  343. read_unlock_bh(&xfrm_policy_lock);
  344. while (gc_list) {
  345. struct dst_entry *dst = gc_list;
  346. gc_list = dst->next;
  347. dst_free(dst);
  348. }
  349. return 0;
  350. }
  351. EXPORT_SYMBOL(xfrm_policy_insert);
  352. struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
  353. int delete)
  354. {
  355. struct xfrm_policy *pol, **p;
  356. write_lock_bh(&xfrm_policy_lock);
  357. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
  358. if (memcmp(sel, &pol->selector, sizeof(*sel)) == 0) {
  359. xfrm_pol_hold(pol);
  360. if (delete)
  361. *p = pol->next;
  362. break;
  363. }
  364. }
  365. write_unlock_bh(&xfrm_policy_lock);
  366. if (pol && delete) {
  367. atomic_inc(&flow_cache_genid);
  368. xfrm_policy_kill(pol);
  369. }
  370. return pol;
  371. }
  372. EXPORT_SYMBOL(xfrm_policy_bysel);
  373. struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
  374. {
  375. struct xfrm_policy *pol, **p;
  376. write_lock_bh(&xfrm_policy_lock);
  377. for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
  378. if (pol->index == id) {
  379. xfrm_pol_hold(pol);
  380. if (delete)
  381. *p = pol->next;
  382. break;
  383. }
  384. }
  385. write_unlock_bh(&xfrm_policy_lock);
  386. if (pol && delete) {
  387. atomic_inc(&flow_cache_genid);
  388. xfrm_policy_kill(pol);
  389. }
  390. return pol;
  391. }
  392. EXPORT_SYMBOL(xfrm_policy_byid);
  393. void xfrm_policy_flush(void)
  394. {
  395. struct xfrm_policy *xp;
  396. int dir;
  397. write_lock_bh(&xfrm_policy_lock);
  398. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  399. while ((xp = xfrm_policy_list[dir]) != NULL) {
  400. xfrm_policy_list[dir] = xp->next;
  401. write_unlock_bh(&xfrm_policy_lock);
  402. xfrm_policy_kill(xp);
  403. write_lock_bh(&xfrm_policy_lock);
  404. }
  405. }
  406. atomic_inc(&flow_cache_genid);
  407. write_unlock_bh(&xfrm_policy_lock);
  408. }
  409. EXPORT_SYMBOL(xfrm_policy_flush);
  410. int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
  411. void *data)
  412. {
  413. struct xfrm_policy *xp;
  414. int dir;
  415. int count = 0;
  416. int error = 0;
  417. read_lock_bh(&xfrm_policy_lock);
  418. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  419. for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
  420. count++;
  421. }
  422. if (count == 0) {
  423. error = -ENOENT;
  424. goto out;
  425. }
  426. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  427. for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
  428. error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
  429. if (error)
  430. goto out;
  431. }
  432. }
  433. out:
  434. read_unlock_bh(&xfrm_policy_lock);
  435. return error;
  436. }
  437. EXPORT_SYMBOL(xfrm_policy_walk);
  438. /* Find policy to apply to this flow. */
  439. static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  440. void **objp, atomic_t **obj_refp)
  441. {
  442. struct xfrm_policy *pol;
  443. read_lock_bh(&xfrm_policy_lock);
  444. for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
  445. struct xfrm_selector *sel = &pol->selector;
  446. int match;
  447. if (pol->family != family)
  448. continue;
  449. match = xfrm_selector_match(sel, fl, family);
  450. if (match) {
  451. xfrm_pol_hold(pol);
  452. break;
  453. }
  454. }
  455. read_unlock_bh(&xfrm_policy_lock);
  456. if ((*objp = (void *) pol) != NULL)
  457. *obj_refp = &pol->refcnt;
  458. }
  459. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  460. {
  461. struct xfrm_policy *pol;
  462. read_lock_bh(&xfrm_policy_lock);
  463. if ((pol = sk->sk_policy[dir]) != NULL) {
  464. int match = xfrm_selector_match(&pol->selector, fl,
  465. sk->sk_family);
  466. if (match)
  467. xfrm_pol_hold(pol);
  468. else
  469. pol = NULL;
  470. }
  471. read_unlock_bh(&xfrm_policy_lock);
  472. return pol;
  473. }
  474. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  475. {
  476. pol->next = xfrm_policy_list[dir];
  477. xfrm_policy_list[dir] = pol;
  478. xfrm_pol_hold(pol);
  479. }
  480. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  481. int dir)
  482. {
  483. struct xfrm_policy **polp;
  484. for (polp = &xfrm_policy_list[dir];
  485. *polp != NULL; polp = &(*polp)->next) {
  486. if (*polp == pol) {
  487. *polp = pol->next;
  488. return pol;
  489. }
  490. }
  491. return NULL;
  492. }
  493. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  494. {
  495. write_lock_bh(&xfrm_policy_lock);
  496. pol = __xfrm_policy_unlink(pol, dir);
  497. write_unlock_bh(&xfrm_policy_lock);
  498. if (pol) {
  499. if (dir < XFRM_POLICY_MAX)
  500. atomic_inc(&flow_cache_genid);
  501. xfrm_policy_kill(pol);
  502. return 0;
  503. }
  504. return -ENOENT;
  505. }
  506. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  507. {
  508. struct xfrm_policy *old_pol;
  509. write_lock_bh(&xfrm_policy_lock);
  510. old_pol = sk->sk_policy[dir];
  511. sk->sk_policy[dir] = pol;
  512. if (pol) {
  513. pol->curlft.add_time = (unsigned long)xtime.tv_sec;
  514. pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
  515. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  516. }
  517. if (old_pol)
  518. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  519. write_unlock_bh(&xfrm_policy_lock);
  520. if (old_pol) {
  521. xfrm_policy_kill(old_pol);
  522. }
  523. return 0;
  524. }
  525. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  526. {
  527. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  528. if (newp) {
  529. newp->selector = old->selector;
  530. newp->lft = old->lft;
  531. newp->curlft = old->curlft;
  532. newp->action = old->action;
  533. newp->flags = old->flags;
  534. newp->xfrm_nr = old->xfrm_nr;
  535. newp->index = old->index;
  536. memcpy(newp->xfrm_vec, old->xfrm_vec,
  537. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  538. write_lock_bh(&xfrm_policy_lock);
  539. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  540. write_unlock_bh(&xfrm_policy_lock);
  541. xfrm_pol_put(newp);
  542. }
  543. return newp;
  544. }
  545. int __xfrm_sk_clone_policy(struct sock *sk)
  546. {
  547. struct xfrm_policy *p0 = sk->sk_policy[0],
  548. *p1 = sk->sk_policy[1];
  549. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  550. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  551. return -ENOMEM;
  552. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  553. return -ENOMEM;
  554. return 0;
  555. }
  556. /* Resolve list of templates for the flow, given policy. */
  557. static int
  558. xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
  559. struct xfrm_state **xfrm,
  560. unsigned short family)
  561. {
  562. int nx;
  563. int i, error;
  564. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  565. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  566. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  567. struct xfrm_state *x;
  568. xfrm_address_t *remote = daddr;
  569. xfrm_address_t *local = saddr;
  570. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  571. if (tmpl->mode) {
  572. remote = &tmpl->id.daddr;
  573. local = &tmpl->saddr;
  574. }
  575. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  576. if (x && x->km.state == XFRM_STATE_VALID) {
  577. xfrm[nx++] = x;
  578. daddr = remote;
  579. saddr = local;
  580. continue;
  581. }
  582. if (x) {
  583. error = (x->km.state == XFRM_STATE_ERROR ?
  584. -EINVAL : -EAGAIN);
  585. xfrm_state_put(x);
  586. }
  587. if (!tmpl->optional)
  588. goto fail;
  589. }
  590. return nx;
  591. fail:
  592. for (nx--; nx>=0; nx--)
  593. xfrm_state_put(xfrm[nx]);
  594. return error;
  595. }
  596. /* Check that the bundle accepts the flow and its components are
  597. * still valid.
  598. */
  599. static struct dst_entry *
  600. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  601. {
  602. struct dst_entry *x;
  603. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  604. if (unlikely(afinfo == NULL))
  605. return ERR_PTR(-EINVAL);
  606. x = afinfo->find_bundle(fl, policy);
  607. xfrm_policy_put_afinfo(afinfo);
  608. return x;
  609. }
  610. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  611. * all the metrics... Shortly, bundle a bundle.
  612. */
  613. static int
  614. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  615. struct flowi *fl, struct dst_entry **dst_p,
  616. unsigned short family)
  617. {
  618. int err;
  619. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  620. if (unlikely(afinfo == NULL))
  621. return -EINVAL;
  622. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  623. xfrm_policy_put_afinfo(afinfo);
  624. return err;
  625. }
  626. static inline int policy_to_flow_dir(int dir)
  627. {
  628. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  629. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  630. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  631. return dir;
  632. switch (dir) {
  633. default:
  634. case XFRM_POLICY_IN:
  635. return FLOW_DIR_IN;
  636. case XFRM_POLICY_OUT:
  637. return FLOW_DIR_OUT;
  638. case XFRM_POLICY_FWD:
  639. return FLOW_DIR_FWD;
  640. };
  641. }
  642. static int stale_bundle(struct dst_entry *dst);
  643. /* Main function: finds/creates a bundle for given flow.
  644. *
  645. * At the moment we eat a raw IP route. Mostly to speed up lookups
  646. * on interfaces with disabled IPsec.
  647. */
  648. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  649. struct sock *sk, int flags)
  650. {
  651. struct xfrm_policy *policy;
  652. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  653. struct dst_entry *dst, *dst_orig = *dst_p;
  654. int nx = 0;
  655. int err;
  656. u32 genid;
  657. u16 family = dst_orig->ops->family;
  658. restart:
  659. genid = atomic_read(&flow_cache_genid);
  660. policy = NULL;
  661. if (sk && sk->sk_policy[1])
  662. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  663. if (!policy) {
  664. /* To accelerate a bit... */
  665. if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
  666. return 0;
  667. policy = flow_cache_lookup(fl, family,
  668. policy_to_flow_dir(XFRM_POLICY_OUT),
  669. xfrm_policy_lookup);
  670. }
  671. if (!policy)
  672. return 0;
  673. policy->curlft.use_time = (unsigned long)xtime.tv_sec;
  674. switch (policy->action) {
  675. case XFRM_POLICY_BLOCK:
  676. /* Prohibit the flow */
  677. err = -EPERM;
  678. goto error;
  679. case XFRM_POLICY_ALLOW:
  680. if (policy->xfrm_nr == 0) {
  681. /* Flow passes not transformed. */
  682. xfrm_pol_put(policy);
  683. return 0;
  684. }
  685. /* Try to find matching bundle.
  686. *
  687. * LATER: help from flow cache. It is optional, this
  688. * is required only for output policy.
  689. */
  690. dst = xfrm_find_bundle(fl, policy, family);
  691. if (IS_ERR(dst)) {
  692. err = PTR_ERR(dst);
  693. goto error;
  694. }
  695. if (dst)
  696. break;
  697. nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
  698. if (unlikely(nx<0)) {
  699. err = nx;
  700. if (err == -EAGAIN && flags) {
  701. DECLARE_WAITQUEUE(wait, current);
  702. add_wait_queue(&km_waitq, &wait);
  703. set_current_state(TASK_INTERRUPTIBLE);
  704. schedule();
  705. set_current_state(TASK_RUNNING);
  706. remove_wait_queue(&km_waitq, &wait);
  707. nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
  708. if (nx == -EAGAIN && signal_pending(current)) {
  709. err = -ERESTART;
  710. goto error;
  711. }
  712. if (nx == -EAGAIN ||
  713. genid != atomic_read(&flow_cache_genid)) {
  714. xfrm_pol_put(policy);
  715. goto restart;
  716. }
  717. err = nx;
  718. }
  719. if (err < 0)
  720. goto error;
  721. }
  722. if (nx == 0) {
  723. /* Flow passes not transformed. */
  724. xfrm_pol_put(policy);
  725. return 0;
  726. }
  727. dst = dst_orig;
  728. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  729. if (unlikely(err)) {
  730. int i;
  731. for (i=0; i<nx; i++)
  732. xfrm_state_put(xfrm[i]);
  733. goto error;
  734. }
  735. write_lock_bh(&policy->lock);
  736. if (unlikely(policy->dead || stale_bundle(dst))) {
  737. /* Wow! While we worked on resolving, this
  738. * policy has gone. Retry. It is not paranoia,
  739. * we just cannot enlist new bundle to dead object.
  740. * We can't enlist stable bundles either.
  741. */
  742. write_unlock_bh(&policy->lock);
  743. xfrm_pol_put(policy);
  744. if (dst)
  745. dst_free(dst);
  746. goto restart;
  747. }
  748. dst->next = policy->bundles;
  749. policy->bundles = dst;
  750. dst_hold(dst);
  751. write_unlock_bh(&policy->lock);
  752. }
  753. *dst_p = dst;
  754. dst_release(dst_orig);
  755. xfrm_pol_put(policy);
  756. return 0;
  757. error:
  758. dst_release(dst_orig);
  759. xfrm_pol_put(policy);
  760. *dst_p = NULL;
  761. return err;
  762. }
  763. EXPORT_SYMBOL(xfrm_lookup);
  764. /* When skb is transformed back to its "native" form, we have to
  765. * check policy restrictions. At the moment we make this in maximally
  766. * stupid way. Shame on me. :-) Of course, connected sockets must
  767. * have policy cached at them.
  768. */
  769. static inline int
  770. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  771. unsigned short family)
  772. {
  773. if (xfrm_state_kern(x))
  774. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
  775. return x->id.proto == tmpl->id.proto &&
  776. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  777. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  778. x->props.mode == tmpl->mode &&
  779. (tmpl->aalgos & (1<<x->props.aalgo)) &&
  780. !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
  781. }
  782. static inline int
  783. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  784. unsigned short family)
  785. {
  786. int idx = start;
  787. if (tmpl->optional) {
  788. if (!tmpl->mode)
  789. return start;
  790. } else
  791. start = -1;
  792. for (; idx < sp->len; idx++) {
  793. if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
  794. return ++idx;
  795. if (sp->x[idx].xvec->props.mode)
  796. break;
  797. }
  798. return start;
  799. }
  800. static int
  801. _decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  802. {
  803. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  804. if (unlikely(afinfo == NULL))
  805. return -EAFNOSUPPORT;
  806. afinfo->decode_session(skb, fl);
  807. xfrm_policy_put_afinfo(afinfo);
  808. return 0;
  809. }
  810. static inline int secpath_has_tunnel(struct sec_path *sp, int k)
  811. {
  812. for (; k < sp->len; k++) {
  813. if (sp->x[k].xvec->props.mode)
  814. return 1;
  815. }
  816. return 0;
  817. }
  818. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  819. unsigned short family)
  820. {
  821. struct xfrm_policy *pol;
  822. struct flowi fl;
  823. if (_decode_session(skb, &fl, family) < 0)
  824. return 0;
  825. /* First, check used SA against their selectors. */
  826. if (skb->sp) {
  827. int i;
  828. for (i=skb->sp->len-1; i>=0; i--) {
  829. struct sec_decap_state *xvec = &(skb->sp->x[i]);
  830. if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
  831. return 0;
  832. /* If there is a post_input processor, try running it */
  833. if (xvec->xvec->type->post_input &&
  834. (xvec->xvec->type->post_input)(xvec->xvec,
  835. &(xvec->decap),
  836. skb) != 0)
  837. return 0;
  838. }
  839. }
  840. pol = NULL;
  841. if (sk && sk->sk_policy[dir])
  842. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  843. if (!pol)
  844. pol = flow_cache_lookup(&fl, family,
  845. policy_to_flow_dir(dir),
  846. xfrm_policy_lookup);
  847. if (!pol)
  848. return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
  849. pol->curlft.use_time = (unsigned long)xtime.tv_sec;
  850. if (pol->action == XFRM_POLICY_ALLOW) {
  851. struct sec_path *sp;
  852. static struct sec_path dummy;
  853. int i, k;
  854. if ((sp = skb->sp) == NULL)
  855. sp = &dummy;
  856. /* For each tunnel xfrm, find the first matching tmpl.
  857. * For each tmpl before that, find corresponding xfrm.
  858. * Order is _important_. Later we will implement
  859. * some barriers, but at the moment barriers
  860. * are implied between each two transformations.
  861. */
  862. for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
  863. k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
  864. if (k < 0)
  865. goto reject;
  866. }
  867. if (secpath_has_tunnel(sp, k))
  868. goto reject;
  869. xfrm_pol_put(pol);
  870. return 1;
  871. }
  872. reject:
  873. xfrm_pol_put(pol);
  874. return 0;
  875. }
  876. EXPORT_SYMBOL(__xfrm_policy_check);
  877. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  878. {
  879. struct flowi fl;
  880. if (_decode_session(skb, &fl, family) < 0)
  881. return 0;
  882. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  883. }
  884. EXPORT_SYMBOL(__xfrm_route_forward);
  885. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  886. {
  887. /* If it is marked obsolete, which is how we even get here,
  888. * then we have purged it from the policy bundle list and we
  889. * did that for a good reason.
  890. */
  891. return NULL;
  892. }
  893. static int stale_bundle(struct dst_entry *dst)
  894. {
  895. return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
  896. }
  897. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  898. {
  899. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  900. dst->dev = &loopback_dev;
  901. dev_hold(&loopback_dev);
  902. dev_put(dev);
  903. }
  904. }
  905. EXPORT_SYMBOL(xfrm_dst_ifdown);
  906. static void xfrm_link_failure(struct sk_buff *skb)
  907. {
  908. /* Impossible. Such dst must be popped before reaches point of failure. */
  909. return;
  910. }
  911. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  912. {
  913. if (dst) {
  914. if (dst->obsolete) {
  915. dst_release(dst);
  916. dst = NULL;
  917. }
  918. }
  919. return dst;
  920. }
  921. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  922. {
  923. int i;
  924. struct xfrm_policy *pol;
  925. struct dst_entry *dst, **dstp, *gc_list = NULL;
  926. read_lock_bh(&xfrm_policy_lock);
  927. for (i=0; i<2*XFRM_POLICY_MAX; i++) {
  928. for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
  929. write_lock(&pol->lock);
  930. dstp = &pol->bundles;
  931. while ((dst=*dstp) != NULL) {
  932. if (func(dst)) {
  933. *dstp = dst->next;
  934. dst->next = gc_list;
  935. gc_list = dst;
  936. } else {
  937. dstp = &dst->next;
  938. }
  939. }
  940. write_unlock(&pol->lock);
  941. }
  942. }
  943. read_unlock_bh(&xfrm_policy_lock);
  944. while (gc_list) {
  945. dst = gc_list;
  946. gc_list = dst->next;
  947. dst_free(dst);
  948. }
  949. }
  950. static int unused_bundle(struct dst_entry *dst)
  951. {
  952. return !atomic_read(&dst->__refcnt);
  953. }
  954. static void __xfrm_garbage_collect(void)
  955. {
  956. xfrm_prune_bundles(unused_bundle);
  957. }
  958. int xfrm_flush_bundles(void)
  959. {
  960. xfrm_prune_bundles(stale_bundle);
  961. return 0;
  962. }
  963. static int always_true(struct dst_entry *dst)
  964. {
  965. return 1;
  966. }
  967. void xfrm_flush_all_bundles(void)
  968. {
  969. xfrm_prune_bundles(always_true);
  970. }
  971. void xfrm_init_pmtu(struct dst_entry *dst)
  972. {
  973. do {
  974. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  975. u32 pmtu, route_mtu_cached;
  976. pmtu = dst_mtu(dst->child);
  977. xdst->child_mtu_cached = pmtu;
  978. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  979. route_mtu_cached = dst_mtu(xdst->route);
  980. xdst->route_mtu_cached = route_mtu_cached;
  981. if (pmtu > route_mtu_cached)
  982. pmtu = route_mtu_cached;
  983. dst->metrics[RTAX_MTU-1] = pmtu;
  984. } while ((dst = dst->next));
  985. }
  986. EXPORT_SYMBOL(xfrm_init_pmtu);
  987. /* Check that the bundle accepts the flow and its components are
  988. * still valid.
  989. */
  990. int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
  991. {
  992. struct dst_entry *dst = &first->u.dst;
  993. struct xfrm_dst *last;
  994. u32 mtu;
  995. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  996. (dst->dev && !netif_running(dst->dev)))
  997. return 0;
  998. last = NULL;
  999. do {
  1000. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1001. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1002. return 0;
  1003. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1004. return 0;
  1005. mtu = dst_mtu(dst->child);
  1006. if (xdst->child_mtu_cached != mtu) {
  1007. last = xdst;
  1008. xdst->child_mtu_cached = mtu;
  1009. }
  1010. if (!dst_check(xdst->route, xdst->route_cookie))
  1011. return 0;
  1012. mtu = dst_mtu(xdst->route);
  1013. if (xdst->route_mtu_cached != mtu) {
  1014. last = xdst;
  1015. xdst->route_mtu_cached = mtu;
  1016. }
  1017. dst = dst->child;
  1018. } while (dst->xfrm);
  1019. if (likely(!last))
  1020. return 1;
  1021. mtu = last->child_mtu_cached;
  1022. for (;;) {
  1023. dst = &last->u.dst;
  1024. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1025. if (mtu > last->route_mtu_cached)
  1026. mtu = last->route_mtu_cached;
  1027. dst->metrics[RTAX_MTU-1] = mtu;
  1028. if (last == first)
  1029. break;
  1030. last = last->u.next;
  1031. last->child_mtu_cached = mtu;
  1032. }
  1033. return 1;
  1034. }
  1035. EXPORT_SYMBOL(xfrm_bundle_ok);
  1036. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1037. {
  1038. int err = 0;
  1039. if (unlikely(afinfo == NULL))
  1040. return -EINVAL;
  1041. if (unlikely(afinfo->family >= NPROTO))
  1042. return -EAFNOSUPPORT;
  1043. write_lock(&xfrm_policy_afinfo_lock);
  1044. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1045. err = -ENOBUFS;
  1046. else {
  1047. struct dst_ops *dst_ops = afinfo->dst_ops;
  1048. if (likely(dst_ops->kmem_cachep == NULL))
  1049. dst_ops->kmem_cachep = xfrm_dst_cache;
  1050. if (likely(dst_ops->check == NULL))
  1051. dst_ops->check = xfrm_dst_check;
  1052. if (likely(dst_ops->negative_advice == NULL))
  1053. dst_ops->negative_advice = xfrm_negative_advice;
  1054. if (likely(dst_ops->link_failure == NULL))
  1055. dst_ops->link_failure = xfrm_link_failure;
  1056. if (likely(afinfo->garbage_collect == NULL))
  1057. afinfo->garbage_collect = __xfrm_garbage_collect;
  1058. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1059. }
  1060. write_unlock(&xfrm_policy_afinfo_lock);
  1061. return err;
  1062. }
  1063. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1064. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1065. {
  1066. int err = 0;
  1067. if (unlikely(afinfo == NULL))
  1068. return -EINVAL;
  1069. if (unlikely(afinfo->family >= NPROTO))
  1070. return -EAFNOSUPPORT;
  1071. write_lock(&xfrm_policy_afinfo_lock);
  1072. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1073. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1074. err = -EINVAL;
  1075. else {
  1076. struct dst_ops *dst_ops = afinfo->dst_ops;
  1077. xfrm_policy_afinfo[afinfo->family] = NULL;
  1078. dst_ops->kmem_cachep = NULL;
  1079. dst_ops->check = NULL;
  1080. dst_ops->negative_advice = NULL;
  1081. dst_ops->link_failure = NULL;
  1082. afinfo->garbage_collect = NULL;
  1083. }
  1084. }
  1085. write_unlock(&xfrm_policy_afinfo_lock);
  1086. return err;
  1087. }
  1088. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1089. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1090. {
  1091. struct xfrm_policy_afinfo *afinfo;
  1092. if (unlikely(family >= NPROTO))
  1093. return NULL;
  1094. read_lock(&xfrm_policy_afinfo_lock);
  1095. afinfo = xfrm_policy_afinfo[family];
  1096. if (likely(afinfo != NULL))
  1097. read_lock(&afinfo->lock);
  1098. read_unlock(&xfrm_policy_afinfo_lock);
  1099. return afinfo;
  1100. }
  1101. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1102. {
  1103. if (unlikely(afinfo == NULL))
  1104. return;
  1105. read_unlock(&afinfo->lock);
  1106. }
  1107. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1108. {
  1109. switch (event) {
  1110. case NETDEV_DOWN:
  1111. xfrm_flush_bundles();
  1112. }
  1113. return NOTIFY_DONE;
  1114. }
  1115. static struct notifier_block xfrm_dev_notifier = {
  1116. xfrm_dev_event,
  1117. NULL,
  1118. 0
  1119. };
  1120. static void __init xfrm_policy_init(void)
  1121. {
  1122. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1123. sizeof(struct xfrm_dst),
  1124. 0, SLAB_HWCACHE_ALIGN,
  1125. NULL, NULL);
  1126. if (!xfrm_dst_cache)
  1127. panic("XFRM: failed to allocate xfrm_dst_cache\n");
  1128. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
  1129. register_netdevice_notifier(&xfrm_dev_notifier);
  1130. }
  1131. void __init xfrm_init(void)
  1132. {
  1133. xfrm_state_init();
  1134. xfrm_policy_init();
  1135. xfrm_input_init();
  1136. }