xfrm_policy.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/kmod.h>
  17. #include <linux/list.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/notifier.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/module.h>
  24. #include <net/xfrm.h>
  25. #include <net/ip.h>
  26. DEFINE_MUTEX(xfrm_cfg_mutex);
  27. EXPORT_SYMBOL(xfrm_cfg_mutex);
  28. static DEFINE_RWLOCK(xfrm_policy_lock);
  29. struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
  30. EXPORT_SYMBOL(xfrm_policy_list);
  31. #ifdef CONFIG_XFRM_SUB_POLICY
  32. struct xfrm_policy *xfrm_policy_list_sub[XFRM_POLICY_MAX*2];
  33. EXPORT_SYMBOL(xfrm_policy_list_sub);
  34. #define XFRM_POLICY_LISTS(type) \
  35. ((type == XFRM_POLICY_TYPE_SUB) ? xfrm_policy_list_sub : \
  36. xfrm_policy_list)
  37. #define XFRM_POLICY_LISTHEAD(type, dir) \
  38. ((type == XFRM_POLICY_TYPE_SUB) ? xfrm_policy_list_sub[dir] : \
  39. xfrm_policy_list[dir])
  40. #define XFRM_POLICY_LISTHEADP(type, dir) \
  41. ((type == XFRM_POLICY_TYPE_SUB) ? &xfrm_policy_list_sub[dir] : \
  42. &xfrm_policy_list[dir])
  43. #else
  44. #define XFRM_POLICY_LISTS(type) xfrm_policy_list
  45. #define XFRM_POLICY_LISTHEAD(type, dif) xfrm_policy_list[dir]
  46. #define XFRM_POLICY_LISTHEADP(type, dif) &xfrm_policy_list[dir]
  47. #endif
  48. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  49. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  50. static kmem_cache_t *xfrm_dst_cache __read_mostly;
  51. static struct work_struct xfrm_policy_gc_work;
  52. static struct list_head xfrm_policy_gc_list =
  53. LIST_HEAD_INIT(xfrm_policy_gc_list);
  54. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  55. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  56. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  57. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
  58. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
  59. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  60. {
  61. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  62. struct xfrm_type **typemap;
  63. int err = 0;
  64. if (unlikely(afinfo == NULL))
  65. return -EAFNOSUPPORT;
  66. typemap = afinfo->type_map;
  67. if (likely(typemap[type->proto] == NULL))
  68. typemap[type->proto] = type;
  69. else
  70. err = -EEXIST;
  71. xfrm_policy_unlock_afinfo(afinfo);
  72. return err;
  73. }
  74. EXPORT_SYMBOL(xfrm_register_type);
  75. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  76. {
  77. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  78. struct xfrm_type **typemap;
  79. int err = 0;
  80. if (unlikely(afinfo == NULL))
  81. return -EAFNOSUPPORT;
  82. typemap = afinfo->type_map;
  83. if (unlikely(typemap[type->proto] != type))
  84. err = -ENOENT;
  85. else
  86. typemap[type->proto] = NULL;
  87. xfrm_policy_unlock_afinfo(afinfo);
  88. return err;
  89. }
  90. EXPORT_SYMBOL(xfrm_unregister_type);
  91. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  92. {
  93. struct xfrm_policy_afinfo *afinfo;
  94. struct xfrm_type **typemap;
  95. struct xfrm_type *type;
  96. int modload_attempted = 0;
  97. retry:
  98. afinfo = xfrm_policy_get_afinfo(family);
  99. if (unlikely(afinfo == NULL))
  100. return NULL;
  101. typemap = afinfo->type_map;
  102. type = typemap[proto];
  103. if (unlikely(type && !try_module_get(type->owner)))
  104. type = NULL;
  105. if (!type && !modload_attempted) {
  106. xfrm_policy_put_afinfo(afinfo);
  107. request_module("xfrm-type-%d-%d",
  108. (int) family, (int) proto);
  109. modload_attempted = 1;
  110. goto retry;
  111. }
  112. xfrm_policy_put_afinfo(afinfo);
  113. return type;
  114. }
  115. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  116. unsigned short family)
  117. {
  118. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  119. int err = 0;
  120. if (unlikely(afinfo == NULL))
  121. return -EAFNOSUPPORT;
  122. if (likely(afinfo->dst_lookup != NULL))
  123. err = afinfo->dst_lookup(dst, fl);
  124. else
  125. err = -EINVAL;
  126. xfrm_policy_put_afinfo(afinfo);
  127. return err;
  128. }
  129. EXPORT_SYMBOL(xfrm_dst_lookup);
  130. void xfrm_put_type(struct xfrm_type *type)
  131. {
  132. module_put(type->owner);
  133. }
  134. int xfrm_register_mode(struct xfrm_mode *mode, int family)
  135. {
  136. struct xfrm_policy_afinfo *afinfo;
  137. struct xfrm_mode **modemap;
  138. int err;
  139. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  140. return -EINVAL;
  141. afinfo = xfrm_policy_lock_afinfo(family);
  142. if (unlikely(afinfo == NULL))
  143. return -EAFNOSUPPORT;
  144. err = -EEXIST;
  145. modemap = afinfo->mode_map;
  146. if (likely(modemap[mode->encap] == NULL)) {
  147. modemap[mode->encap] = mode;
  148. err = 0;
  149. }
  150. xfrm_policy_unlock_afinfo(afinfo);
  151. return err;
  152. }
  153. EXPORT_SYMBOL(xfrm_register_mode);
  154. int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
  155. {
  156. struct xfrm_policy_afinfo *afinfo;
  157. struct xfrm_mode **modemap;
  158. int err;
  159. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  160. return -EINVAL;
  161. afinfo = xfrm_policy_lock_afinfo(family);
  162. if (unlikely(afinfo == NULL))
  163. return -EAFNOSUPPORT;
  164. err = -ENOENT;
  165. modemap = afinfo->mode_map;
  166. if (likely(modemap[mode->encap] == mode)) {
  167. modemap[mode->encap] = NULL;
  168. err = 0;
  169. }
  170. xfrm_policy_unlock_afinfo(afinfo);
  171. return err;
  172. }
  173. EXPORT_SYMBOL(xfrm_unregister_mode);
  174. struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
  175. {
  176. struct xfrm_policy_afinfo *afinfo;
  177. struct xfrm_mode *mode;
  178. int modload_attempted = 0;
  179. if (unlikely(encap >= XFRM_MODE_MAX))
  180. return NULL;
  181. retry:
  182. afinfo = xfrm_policy_get_afinfo(family);
  183. if (unlikely(afinfo == NULL))
  184. return NULL;
  185. mode = afinfo->mode_map[encap];
  186. if (unlikely(mode && !try_module_get(mode->owner)))
  187. mode = NULL;
  188. if (!mode && !modload_attempted) {
  189. xfrm_policy_put_afinfo(afinfo);
  190. request_module("xfrm-mode-%d-%d", family, encap);
  191. modload_attempted = 1;
  192. goto retry;
  193. }
  194. xfrm_policy_put_afinfo(afinfo);
  195. return mode;
  196. }
  197. void xfrm_put_mode(struct xfrm_mode *mode)
  198. {
  199. module_put(mode->owner);
  200. }
  201. static inline unsigned long make_jiffies(long secs)
  202. {
  203. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  204. return MAX_SCHEDULE_TIMEOUT-1;
  205. else
  206. return secs*HZ;
  207. }
  208. static void xfrm_policy_timer(unsigned long data)
  209. {
  210. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  211. unsigned long now = (unsigned long)xtime.tv_sec;
  212. long next = LONG_MAX;
  213. int warn = 0;
  214. int dir;
  215. read_lock(&xp->lock);
  216. if (xp->dead)
  217. goto out;
  218. dir = xfrm_policy_id2dir(xp->index);
  219. if (xp->lft.hard_add_expires_seconds) {
  220. long tmo = xp->lft.hard_add_expires_seconds +
  221. xp->curlft.add_time - now;
  222. if (tmo <= 0)
  223. goto expired;
  224. if (tmo < next)
  225. next = tmo;
  226. }
  227. if (xp->lft.hard_use_expires_seconds) {
  228. long tmo = xp->lft.hard_use_expires_seconds +
  229. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  230. if (tmo <= 0)
  231. goto expired;
  232. if (tmo < next)
  233. next = tmo;
  234. }
  235. if (xp->lft.soft_add_expires_seconds) {
  236. long tmo = xp->lft.soft_add_expires_seconds +
  237. xp->curlft.add_time - now;
  238. if (tmo <= 0) {
  239. warn = 1;
  240. tmo = XFRM_KM_TIMEOUT;
  241. }
  242. if (tmo < next)
  243. next = tmo;
  244. }
  245. if (xp->lft.soft_use_expires_seconds) {
  246. long tmo = xp->lft.soft_use_expires_seconds +
  247. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  248. if (tmo <= 0) {
  249. warn = 1;
  250. tmo = XFRM_KM_TIMEOUT;
  251. }
  252. if (tmo < next)
  253. next = tmo;
  254. }
  255. if (warn)
  256. km_policy_expired(xp, dir, 0, 0);
  257. if (next != LONG_MAX &&
  258. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  259. xfrm_pol_hold(xp);
  260. out:
  261. read_unlock(&xp->lock);
  262. xfrm_pol_put(xp);
  263. return;
  264. expired:
  265. read_unlock(&xp->lock);
  266. if (!xfrm_policy_delete(xp, dir))
  267. km_policy_expired(xp, dir, 1, 0);
  268. xfrm_pol_put(xp);
  269. }
  270. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  271. * SPD calls.
  272. */
  273. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  274. {
  275. struct xfrm_policy *policy;
  276. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  277. if (policy) {
  278. atomic_set(&policy->refcnt, 1);
  279. rwlock_init(&policy->lock);
  280. init_timer(&policy->timer);
  281. policy->timer.data = (unsigned long)policy;
  282. policy->timer.function = xfrm_policy_timer;
  283. }
  284. return policy;
  285. }
  286. EXPORT_SYMBOL(xfrm_policy_alloc);
  287. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  288. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  289. {
  290. BUG_ON(!policy->dead);
  291. BUG_ON(policy->bundles);
  292. if (del_timer(&policy->timer))
  293. BUG();
  294. security_xfrm_policy_free(policy);
  295. kfree(policy);
  296. }
  297. EXPORT_SYMBOL(__xfrm_policy_destroy);
  298. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  299. {
  300. struct dst_entry *dst;
  301. while ((dst = policy->bundles) != NULL) {
  302. policy->bundles = dst->next;
  303. dst_free(dst);
  304. }
  305. if (del_timer(&policy->timer))
  306. atomic_dec(&policy->refcnt);
  307. if (atomic_read(&policy->refcnt) > 1)
  308. flow_cache_flush();
  309. xfrm_pol_put(policy);
  310. }
  311. static void xfrm_policy_gc_task(void *data)
  312. {
  313. struct xfrm_policy *policy;
  314. struct list_head *entry, *tmp;
  315. struct list_head gc_list = LIST_HEAD_INIT(gc_list);
  316. spin_lock_bh(&xfrm_policy_gc_lock);
  317. list_splice_init(&xfrm_policy_gc_list, &gc_list);
  318. spin_unlock_bh(&xfrm_policy_gc_lock);
  319. list_for_each_safe(entry, tmp, &gc_list) {
  320. policy = list_entry(entry, struct xfrm_policy, list);
  321. xfrm_policy_gc_kill(policy);
  322. }
  323. }
  324. /* Rule must be locked. Release descentant resources, announce
  325. * entry dead. The rule must be unlinked from lists to the moment.
  326. */
  327. static void xfrm_policy_kill(struct xfrm_policy *policy)
  328. {
  329. int dead;
  330. write_lock_bh(&policy->lock);
  331. dead = policy->dead;
  332. policy->dead = 1;
  333. write_unlock_bh(&policy->lock);
  334. if (unlikely(dead)) {
  335. WARN_ON(1);
  336. return;
  337. }
  338. spin_lock(&xfrm_policy_gc_lock);
  339. list_add(&policy->list, &xfrm_policy_gc_list);
  340. spin_unlock(&xfrm_policy_gc_lock);
  341. schedule_work(&xfrm_policy_gc_work);
  342. }
  343. /* Generate new index... KAME seems to generate them ordered by cost
  344. * of an absolute inpredictability of ordering of rules. This will not pass. */
  345. static u32 xfrm_gen_index(u8 type, int dir)
  346. {
  347. u32 idx;
  348. struct xfrm_policy *p;
  349. static u32 idx_generator;
  350. for (;;) {
  351. idx = (idx_generator | dir);
  352. idx_generator += 8;
  353. if (idx == 0)
  354. idx = 8;
  355. for (p = XFRM_POLICY_LISTHEAD(type, dir); p; p = p->next) {
  356. if (p->index == idx)
  357. break;
  358. }
  359. if (!p)
  360. return idx;
  361. }
  362. }
  363. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  364. {
  365. struct xfrm_policy *pol, **p;
  366. struct xfrm_policy *delpol = NULL;
  367. struct xfrm_policy **newpos = NULL;
  368. struct dst_entry *gc_list;
  369. write_lock_bh(&xfrm_policy_lock);
  370. for (p = XFRM_POLICY_LISTHEADP(policy->type, dir); (pol=*p)!=NULL;) {
  371. if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 &&
  372. xfrm_sec_ctx_match(pol->security, policy->security)) {
  373. if (excl) {
  374. write_unlock_bh(&xfrm_policy_lock);
  375. return -EEXIST;
  376. }
  377. *p = pol->next;
  378. delpol = pol;
  379. if (policy->priority > pol->priority)
  380. continue;
  381. } else if (policy->priority >= pol->priority) {
  382. p = &pol->next;
  383. continue;
  384. }
  385. if (!newpos)
  386. newpos = p;
  387. if (delpol)
  388. break;
  389. p = &pol->next;
  390. }
  391. if (newpos)
  392. p = newpos;
  393. xfrm_pol_hold(policy);
  394. policy->next = *p;
  395. *p = policy;
  396. atomic_inc(&flow_cache_genid);
  397. policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
  398. policy->curlft.add_time = (unsigned long)xtime.tv_sec;
  399. policy->curlft.use_time = 0;
  400. if (!mod_timer(&policy->timer, jiffies + HZ))
  401. xfrm_pol_hold(policy);
  402. write_unlock_bh(&xfrm_policy_lock);
  403. if (delpol)
  404. xfrm_policy_kill(delpol);
  405. read_lock_bh(&xfrm_policy_lock);
  406. gc_list = NULL;
  407. for (policy = policy->next; policy; policy = policy->next) {
  408. struct dst_entry *dst;
  409. write_lock(&policy->lock);
  410. dst = policy->bundles;
  411. if (dst) {
  412. struct dst_entry *tail = dst;
  413. while (tail->next)
  414. tail = tail->next;
  415. tail->next = gc_list;
  416. gc_list = dst;
  417. policy->bundles = NULL;
  418. }
  419. write_unlock(&policy->lock);
  420. }
  421. read_unlock_bh(&xfrm_policy_lock);
  422. while (gc_list) {
  423. struct dst_entry *dst = gc_list;
  424. gc_list = dst->next;
  425. dst_free(dst);
  426. }
  427. return 0;
  428. }
  429. EXPORT_SYMBOL(xfrm_policy_insert);
  430. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  431. struct xfrm_selector *sel,
  432. struct xfrm_sec_ctx *ctx, int delete)
  433. {
  434. struct xfrm_policy *pol, **p;
  435. write_lock_bh(&xfrm_policy_lock);
  436. for (p = XFRM_POLICY_LISTHEADP(type, dir); (pol=*p)!=NULL; p = &pol->next) {
  437. if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) &&
  438. (xfrm_sec_ctx_match(ctx, pol->security))) {
  439. xfrm_pol_hold(pol);
  440. if (delete)
  441. *p = pol->next;
  442. break;
  443. }
  444. }
  445. write_unlock_bh(&xfrm_policy_lock);
  446. if (pol && delete) {
  447. atomic_inc(&flow_cache_genid);
  448. xfrm_policy_kill(pol);
  449. }
  450. return pol;
  451. }
  452. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  453. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete)
  454. {
  455. struct xfrm_policy *pol, **p;
  456. write_lock_bh(&xfrm_policy_lock);
  457. for (p = XFRM_POLICY_LISTHEADP(type, dir); (pol=*p)!=NULL; p = &pol->next) {
  458. if (pol->index == id) {
  459. xfrm_pol_hold(pol);
  460. if (delete)
  461. *p = pol->next;
  462. break;
  463. }
  464. }
  465. write_unlock_bh(&xfrm_policy_lock);
  466. if (pol && delete) {
  467. atomic_inc(&flow_cache_genid);
  468. xfrm_policy_kill(pol);
  469. }
  470. return pol;
  471. }
  472. EXPORT_SYMBOL(xfrm_policy_byid);
  473. void xfrm_policy_flush(u8 type)
  474. {
  475. struct xfrm_policy *xp;
  476. struct xfrm_policy **p_list = XFRM_POLICY_LISTS(type);
  477. int dir;
  478. write_lock_bh(&xfrm_policy_lock);
  479. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  480. while ((xp = p_list[dir]) != NULL) {
  481. p_list[dir] = xp->next;
  482. write_unlock_bh(&xfrm_policy_lock);
  483. xfrm_policy_kill(xp);
  484. write_lock_bh(&xfrm_policy_lock);
  485. }
  486. }
  487. atomic_inc(&flow_cache_genid);
  488. write_unlock_bh(&xfrm_policy_lock);
  489. }
  490. EXPORT_SYMBOL(xfrm_policy_flush);
  491. int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
  492. void *data)
  493. {
  494. struct xfrm_policy *xp;
  495. int dir;
  496. int count = 0;
  497. int error = 0;
  498. read_lock_bh(&xfrm_policy_lock);
  499. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  500. for (xp = XFRM_POLICY_LISTHEAD(type, dir); xp; xp = xp->next)
  501. count++;
  502. }
  503. if (count == 0) {
  504. error = -ENOENT;
  505. goto out;
  506. }
  507. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  508. for (xp = XFRM_POLICY_LISTHEAD(type, dir); xp; xp = xp->next) {
  509. error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
  510. if (error)
  511. goto out;
  512. }
  513. }
  514. out:
  515. read_unlock_bh(&xfrm_policy_lock);
  516. return error;
  517. }
  518. EXPORT_SYMBOL(xfrm_policy_walk);
  519. /* Find policy to apply to this flow. */
  520. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  521. u16 family, u8 dir)
  522. {
  523. struct xfrm_policy *pol;
  524. read_lock_bh(&xfrm_policy_lock);
  525. for (pol = XFRM_POLICY_LISTHEAD(type, dir); pol; pol = pol->next) {
  526. struct xfrm_selector *sel = &pol->selector;
  527. int match;
  528. if (pol->family != family)
  529. continue;
  530. match = xfrm_selector_match(sel, fl, family);
  531. if (match) {
  532. if (!security_xfrm_policy_lookup(pol, fl->secid, dir)) {
  533. xfrm_pol_hold(pol);
  534. break;
  535. }
  536. }
  537. }
  538. read_unlock_bh(&xfrm_policy_lock);
  539. return pol;
  540. }
  541. static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  542. void **objp, atomic_t **obj_refp)
  543. {
  544. struct xfrm_policy *pol;
  545. #ifdef CONFIG_XFRM_SUB_POLICY
  546. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  547. if (pol)
  548. goto end;
  549. #endif
  550. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  551. #ifdef CONFIG_XFRM_SUB_POLICY
  552. end:
  553. #endif
  554. if ((*objp = (void *) pol) != NULL)
  555. *obj_refp = &pol->refcnt;
  556. }
  557. static inline int policy_to_flow_dir(int dir)
  558. {
  559. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  560. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  561. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  562. return dir;
  563. switch (dir) {
  564. default:
  565. case XFRM_POLICY_IN:
  566. return FLOW_DIR_IN;
  567. case XFRM_POLICY_OUT:
  568. return FLOW_DIR_OUT;
  569. case XFRM_POLICY_FWD:
  570. return FLOW_DIR_FWD;
  571. };
  572. }
  573. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  574. {
  575. struct xfrm_policy *pol;
  576. read_lock_bh(&xfrm_policy_lock);
  577. if ((pol = sk->sk_policy[dir]) != NULL) {
  578. int match = xfrm_selector_match(&pol->selector, fl,
  579. sk->sk_family);
  580. int err = 0;
  581. if (match)
  582. err = security_xfrm_policy_lookup(pol, fl->secid, policy_to_flow_dir(dir));
  583. if (match && !err)
  584. xfrm_pol_hold(pol);
  585. else
  586. pol = NULL;
  587. }
  588. read_unlock_bh(&xfrm_policy_lock);
  589. return pol;
  590. }
  591. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  592. {
  593. struct xfrm_policy **p_list = XFRM_POLICY_LISTS(pol->type);
  594. pol->next = p_list[dir];
  595. p_list[dir] = pol;
  596. xfrm_pol_hold(pol);
  597. }
  598. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  599. int dir)
  600. {
  601. struct xfrm_policy **polp;
  602. for (polp = XFRM_POLICY_LISTHEADP(pol->type, dir);
  603. *polp != NULL; polp = &(*polp)->next) {
  604. if (*polp == pol) {
  605. *polp = pol->next;
  606. return pol;
  607. }
  608. }
  609. return NULL;
  610. }
  611. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  612. {
  613. write_lock_bh(&xfrm_policy_lock);
  614. pol = __xfrm_policy_unlink(pol, dir);
  615. write_unlock_bh(&xfrm_policy_lock);
  616. if (pol) {
  617. if (dir < XFRM_POLICY_MAX)
  618. atomic_inc(&flow_cache_genid);
  619. xfrm_policy_kill(pol);
  620. return 0;
  621. }
  622. return -ENOENT;
  623. }
  624. EXPORT_SYMBOL(xfrm_policy_delete);
  625. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  626. {
  627. struct xfrm_policy *old_pol;
  628. #ifdef CONFIG_XFRM_SUB_POLICY
  629. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  630. return -EINVAL;
  631. #endif
  632. write_lock_bh(&xfrm_policy_lock);
  633. old_pol = sk->sk_policy[dir];
  634. sk->sk_policy[dir] = pol;
  635. if (pol) {
  636. pol->curlft.add_time = (unsigned long)xtime.tv_sec;
  637. pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
  638. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  639. }
  640. if (old_pol)
  641. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  642. write_unlock_bh(&xfrm_policy_lock);
  643. if (old_pol) {
  644. xfrm_policy_kill(old_pol);
  645. }
  646. return 0;
  647. }
  648. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  649. {
  650. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  651. if (newp) {
  652. newp->selector = old->selector;
  653. if (security_xfrm_policy_clone(old, newp)) {
  654. kfree(newp);
  655. return NULL; /* ENOMEM */
  656. }
  657. newp->lft = old->lft;
  658. newp->curlft = old->curlft;
  659. newp->action = old->action;
  660. newp->flags = old->flags;
  661. newp->xfrm_nr = old->xfrm_nr;
  662. newp->index = old->index;
  663. newp->type = old->type;
  664. memcpy(newp->xfrm_vec, old->xfrm_vec,
  665. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  666. write_lock_bh(&xfrm_policy_lock);
  667. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  668. write_unlock_bh(&xfrm_policy_lock);
  669. xfrm_pol_put(newp);
  670. }
  671. return newp;
  672. }
  673. int __xfrm_sk_clone_policy(struct sock *sk)
  674. {
  675. struct xfrm_policy *p0 = sk->sk_policy[0],
  676. *p1 = sk->sk_policy[1];
  677. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  678. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  679. return -ENOMEM;
  680. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  681. return -ENOMEM;
  682. return 0;
  683. }
  684. /* Resolve list of templates for the flow, given policy. */
  685. static int
  686. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  687. struct xfrm_state **xfrm,
  688. unsigned short family)
  689. {
  690. int nx;
  691. int i, error;
  692. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  693. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  694. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  695. struct xfrm_state *x;
  696. xfrm_address_t *remote = daddr;
  697. xfrm_address_t *local = saddr;
  698. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  699. if (tmpl->mode == XFRM_MODE_TUNNEL) {
  700. remote = &tmpl->id.daddr;
  701. local = &tmpl->saddr;
  702. }
  703. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  704. if (x && x->km.state == XFRM_STATE_VALID) {
  705. xfrm[nx++] = x;
  706. daddr = remote;
  707. saddr = local;
  708. continue;
  709. }
  710. if (x) {
  711. error = (x->km.state == XFRM_STATE_ERROR ?
  712. -EINVAL : -EAGAIN);
  713. xfrm_state_put(x);
  714. }
  715. if (!tmpl->optional)
  716. goto fail;
  717. }
  718. return nx;
  719. fail:
  720. for (nx--; nx>=0; nx--)
  721. xfrm_state_put(xfrm[nx]);
  722. return error;
  723. }
  724. static int
  725. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  726. struct xfrm_state **xfrm,
  727. unsigned short family)
  728. {
  729. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  730. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  731. int cnx = 0;
  732. int error;
  733. int ret;
  734. int i;
  735. for (i = 0; i < npols; i++) {
  736. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  737. error = -ENOBUFS;
  738. goto fail;
  739. }
  740. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  741. if (ret < 0) {
  742. error = ret;
  743. goto fail;
  744. } else
  745. cnx += ret;
  746. }
  747. /* found states are sorted for outbound processing */
  748. if (npols > 1)
  749. xfrm_state_sort(xfrm, tpp, cnx, family);
  750. return cnx;
  751. fail:
  752. for (cnx--; cnx>=0; cnx--)
  753. xfrm_state_put(tpp[cnx]);
  754. return error;
  755. }
  756. /* Check that the bundle accepts the flow and its components are
  757. * still valid.
  758. */
  759. static struct dst_entry *
  760. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  761. {
  762. struct dst_entry *x;
  763. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  764. if (unlikely(afinfo == NULL))
  765. return ERR_PTR(-EINVAL);
  766. x = afinfo->find_bundle(fl, policy);
  767. xfrm_policy_put_afinfo(afinfo);
  768. return x;
  769. }
  770. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  771. * all the metrics... Shortly, bundle a bundle.
  772. */
  773. static int
  774. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  775. struct flowi *fl, struct dst_entry **dst_p,
  776. unsigned short family)
  777. {
  778. int err;
  779. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  780. if (unlikely(afinfo == NULL))
  781. return -EINVAL;
  782. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  783. xfrm_policy_put_afinfo(afinfo);
  784. return err;
  785. }
  786. static int stale_bundle(struct dst_entry *dst);
  787. /* Main function: finds/creates a bundle for given flow.
  788. *
  789. * At the moment we eat a raw IP route. Mostly to speed up lookups
  790. * on interfaces with disabled IPsec.
  791. */
  792. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  793. struct sock *sk, int flags)
  794. {
  795. struct xfrm_policy *policy;
  796. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  797. int npols;
  798. int pol_dead;
  799. int xfrm_nr;
  800. int pi;
  801. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  802. struct dst_entry *dst, *dst_orig = *dst_p;
  803. int nx = 0;
  804. int err;
  805. u32 genid;
  806. u16 family;
  807. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  808. restart:
  809. genid = atomic_read(&flow_cache_genid);
  810. policy = NULL;
  811. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  812. pols[pi] = NULL;
  813. npols = 0;
  814. pol_dead = 0;
  815. xfrm_nr = 0;
  816. if (sk && sk->sk_policy[1])
  817. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  818. if (!policy) {
  819. /* To accelerate a bit... */
  820. if ((dst_orig->flags & DST_NOXFRM) || xfrm_policy_lists_empty(XFRM_POLICY_OUT))
  821. return 0;
  822. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  823. dir, xfrm_policy_lookup);
  824. }
  825. if (!policy)
  826. return 0;
  827. family = dst_orig->ops->family;
  828. policy->curlft.use_time = (unsigned long)xtime.tv_sec;
  829. pols[0] = policy;
  830. npols ++;
  831. xfrm_nr += pols[0]->xfrm_nr;
  832. switch (policy->action) {
  833. case XFRM_POLICY_BLOCK:
  834. /* Prohibit the flow */
  835. err = -EPERM;
  836. goto error;
  837. case XFRM_POLICY_ALLOW:
  838. #ifndef CONFIG_XFRM_SUB_POLICY
  839. if (policy->xfrm_nr == 0) {
  840. /* Flow passes not transformed. */
  841. xfrm_pol_put(policy);
  842. return 0;
  843. }
  844. #endif
  845. /* Try to find matching bundle.
  846. *
  847. * LATER: help from flow cache. It is optional, this
  848. * is required only for output policy.
  849. */
  850. dst = xfrm_find_bundle(fl, policy, family);
  851. if (IS_ERR(dst)) {
  852. err = PTR_ERR(dst);
  853. goto error;
  854. }
  855. if (dst)
  856. break;
  857. #ifdef CONFIG_XFRM_SUB_POLICY
  858. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  859. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  860. fl, family,
  861. XFRM_POLICY_OUT);
  862. if (pols[1]) {
  863. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  864. err = -EPERM;
  865. goto error;
  866. }
  867. npols ++;
  868. xfrm_nr += pols[1]->xfrm_nr;
  869. }
  870. }
  871. /*
  872. * Because neither flowi nor bundle information knows about
  873. * transformation template size. On more than one policy usage
  874. * we can realize whether all of them is bypass or not after
  875. * they are searched. See above not-transformed bypass
  876. * is surrounded by non-sub policy configuration, too.
  877. */
  878. if (xfrm_nr == 0) {
  879. /* Flow passes not transformed. */
  880. xfrm_pols_put(pols, npols);
  881. return 0;
  882. }
  883. #endif
  884. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  885. if (unlikely(nx<0)) {
  886. err = nx;
  887. if (err == -EAGAIN && flags) {
  888. DECLARE_WAITQUEUE(wait, current);
  889. add_wait_queue(&km_waitq, &wait);
  890. set_current_state(TASK_INTERRUPTIBLE);
  891. schedule();
  892. set_current_state(TASK_RUNNING);
  893. remove_wait_queue(&km_waitq, &wait);
  894. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  895. if (nx == -EAGAIN && signal_pending(current)) {
  896. err = -ERESTART;
  897. goto error;
  898. }
  899. if (nx == -EAGAIN ||
  900. genid != atomic_read(&flow_cache_genid)) {
  901. xfrm_pols_put(pols, npols);
  902. goto restart;
  903. }
  904. err = nx;
  905. }
  906. if (err < 0)
  907. goto error;
  908. }
  909. if (nx == 0) {
  910. /* Flow passes not transformed. */
  911. xfrm_pols_put(pols, npols);
  912. return 0;
  913. }
  914. dst = dst_orig;
  915. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  916. if (unlikely(err)) {
  917. int i;
  918. for (i=0; i<nx; i++)
  919. xfrm_state_put(xfrm[i]);
  920. goto error;
  921. }
  922. for (pi = 0; pi < npols; pi++) {
  923. read_lock_bh(&pols[pi]->lock);
  924. pol_dead |= pols[pi]->dead;
  925. read_unlock_bh(&pols[pi]->lock);
  926. }
  927. write_lock_bh(&policy->lock);
  928. if (unlikely(pol_dead || stale_bundle(dst))) {
  929. /* Wow! While we worked on resolving, this
  930. * policy has gone. Retry. It is not paranoia,
  931. * we just cannot enlist new bundle to dead object.
  932. * We can't enlist stable bundles either.
  933. */
  934. write_unlock_bh(&policy->lock);
  935. if (dst)
  936. dst_free(dst);
  937. err = -EHOSTUNREACH;
  938. goto error;
  939. }
  940. dst->next = policy->bundles;
  941. policy->bundles = dst;
  942. dst_hold(dst);
  943. write_unlock_bh(&policy->lock);
  944. }
  945. *dst_p = dst;
  946. dst_release(dst_orig);
  947. xfrm_pols_put(pols, npols);
  948. return 0;
  949. error:
  950. dst_release(dst_orig);
  951. xfrm_pols_put(pols, npols);
  952. *dst_p = NULL;
  953. return err;
  954. }
  955. EXPORT_SYMBOL(xfrm_lookup);
  956. static inline int
  957. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  958. {
  959. struct xfrm_state *x;
  960. int err;
  961. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  962. return 0;
  963. x = skb->sp->xvec[idx];
  964. if (!x->type->reject)
  965. return 0;
  966. xfrm_state_hold(x);
  967. err = x->type->reject(x, skb, fl);
  968. xfrm_state_put(x);
  969. return err;
  970. }
  971. /* When skb is transformed back to its "native" form, we have to
  972. * check policy restrictions. At the moment we make this in maximally
  973. * stupid way. Shame on me. :-) Of course, connected sockets must
  974. * have policy cached at them.
  975. */
  976. static inline int
  977. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  978. unsigned short family)
  979. {
  980. if (xfrm_state_kern(x))
  981. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
  982. return x->id.proto == tmpl->id.proto &&
  983. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  984. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  985. x->props.mode == tmpl->mode &&
  986. ((tmpl->aalgos & (1<<x->props.aalgo)) ||
  987. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  988. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  989. xfrm_state_addr_cmp(tmpl, x, family));
  990. }
  991. /*
  992. * 0 or more than 0 is returned when validation is succeeded (either bypass
  993. * because of optional transport mode, or next index of the mathced secpath
  994. * state with the template.
  995. * -1 is returned when no matching template is found.
  996. * Otherwise "-2 - errored_index" is returned.
  997. */
  998. static inline int
  999. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1000. unsigned short family)
  1001. {
  1002. int idx = start;
  1003. if (tmpl->optional) {
  1004. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1005. return start;
  1006. } else
  1007. start = -1;
  1008. for (; idx < sp->len; idx++) {
  1009. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1010. return ++idx;
  1011. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1012. if (start == -1)
  1013. start = -2-idx;
  1014. break;
  1015. }
  1016. }
  1017. return start;
  1018. }
  1019. int
  1020. xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  1021. {
  1022. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1023. int err;
  1024. if (unlikely(afinfo == NULL))
  1025. return -EAFNOSUPPORT;
  1026. afinfo->decode_session(skb, fl);
  1027. err = security_xfrm_decode_session(skb, &fl->secid);
  1028. xfrm_policy_put_afinfo(afinfo);
  1029. return err;
  1030. }
  1031. EXPORT_SYMBOL(xfrm_decode_session);
  1032. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1033. {
  1034. for (; k < sp->len; k++) {
  1035. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1036. if (idxp)
  1037. *idxp = k;
  1038. return 1;
  1039. }
  1040. }
  1041. return 0;
  1042. }
  1043. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1044. unsigned short family)
  1045. {
  1046. struct xfrm_policy *pol;
  1047. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1048. int npols = 0;
  1049. int xfrm_nr;
  1050. int pi;
  1051. struct flowi fl;
  1052. u8 fl_dir = policy_to_flow_dir(dir);
  1053. int xerr_idx = -1;
  1054. int *xerr_idxp = &xerr_idx;
  1055. if (xfrm_decode_session(skb, &fl, family) < 0)
  1056. return 0;
  1057. nf_nat_decode_session(skb, &fl, family);
  1058. /* First, check used SA against their selectors. */
  1059. if (skb->sp) {
  1060. int i;
  1061. for (i=skb->sp->len-1; i>=0; i--) {
  1062. struct xfrm_state *x = skb->sp->xvec[i];
  1063. if (!xfrm_selector_match(&x->sel, &fl, family))
  1064. return 0;
  1065. }
  1066. }
  1067. pol = NULL;
  1068. if (sk && sk->sk_policy[dir])
  1069. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1070. if (!pol)
  1071. pol = flow_cache_lookup(&fl, family, fl_dir,
  1072. xfrm_policy_lookup);
  1073. if (!pol) {
  1074. if (skb->sp && secpath_has_nontransport(skb->sp, 0, xerr_idxp)) {
  1075. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1076. return 0;
  1077. }
  1078. return 1;
  1079. }
  1080. pol->curlft.use_time = (unsigned long)xtime.tv_sec;
  1081. pols[0] = pol;
  1082. npols ++;
  1083. #ifdef CONFIG_XFRM_SUB_POLICY
  1084. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1085. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1086. &fl, family,
  1087. XFRM_POLICY_IN);
  1088. if (pols[1]) {
  1089. pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec;
  1090. npols ++;
  1091. }
  1092. }
  1093. #endif
  1094. if (pol->action == XFRM_POLICY_ALLOW) {
  1095. struct sec_path *sp;
  1096. static struct sec_path dummy;
  1097. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1098. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1099. struct xfrm_tmpl **tpp = tp;
  1100. int ti = 0;
  1101. int i, k;
  1102. if ((sp = skb->sp) == NULL)
  1103. sp = &dummy;
  1104. for (pi = 0; pi < npols; pi++) {
  1105. if (pols[pi] != pol &&
  1106. pols[pi]->action != XFRM_POLICY_ALLOW)
  1107. goto reject;
  1108. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
  1109. goto reject_error;
  1110. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1111. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1112. }
  1113. xfrm_nr = ti;
  1114. if (npols > 1) {
  1115. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1116. tpp = stp;
  1117. }
  1118. /* For each tunnel xfrm, find the first matching tmpl.
  1119. * For each tmpl before that, find corresponding xfrm.
  1120. * Order is _important_. Later we will implement
  1121. * some barriers, but at the moment barriers
  1122. * are implied between each two transformations.
  1123. */
  1124. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1125. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1126. if (k < 0) {
  1127. if (k < -1 && xerr_idxp)
  1128. *xerr_idxp = -(2+k);
  1129. goto reject;
  1130. }
  1131. }
  1132. if (secpath_has_nontransport(sp, k, xerr_idxp))
  1133. goto reject;
  1134. xfrm_pols_put(pols, npols);
  1135. return 1;
  1136. }
  1137. reject:
  1138. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1139. reject_error:
  1140. xfrm_pols_put(pols, npols);
  1141. return 0;
  1142. }
  1143. EXPORT_SYMBOL(__xfrm_policy_check);
  1144. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1145. {
  1146. struct flowi fl;
  1147. if (xfrm_decode_session(skb, &fl, family) < 0)
  1148. return 0;
  1149. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1150. }
  1151. EXPORT_SYMBOL(__xfrm_route_forward);
  1152. /* Optimize later using cookies and generation ids. */
  1153. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1154. {
  1155. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1156. * to "-1" to force all XFRM destinations to get validated by
  1157. * dst_ops->check on every use. We do this because when a
  1158. * normal route referenced by an XFRM dst is obsoleted we do
  1159. * not go looking around for all parent referencing XFRM dsts
  1160. * so that we can invalidate them. It is just too much work.
  1161. * Instead we make the checks here on every use. For example:
  1162. *
  1163. * XFRM dst A --> IPv4 dst X
  1164. *
  1165. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1166. * in this example). If X is marked obsolete, "A" will not
  1167. * notice. That's what we are validating here via the
  1168. * stale_bundle() check.
  1169. *
  1170. * When a policy's bundle is pruned, we dst_free() the XFRM
  1171. * dst which causes it's ->obsolete field to be set to a
  1172. * positive non-zero integer. If an XFRM dst has been pruned
  1173. * like this, we want to force a new route lookup.
  1174. */
  1175. if (dst->obsolete < 0 && !stale_bundle(dst))
  1176. return dst;
  1177. return NULL;
  1178. }
  1179. static int stale_bundle(struct dst_entry *dst)
  1180. {
  1181. return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1182. }
  1183. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1184. {
  1185. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1186. dst->dev = &loopback_dev;
  1187. dev_hold(&loopback_dev);
  1188. dev_put(dev);
  1189. }
  1190. }
  1191. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1192. static void xfrm_link_failure(struct sk_buff *skb)
  1193. {
  1194. /* Impossible. Such dst must be popped before reaches point of failure. */
  1195. return;
  1196. }
  1197. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1198. {
  1199. if (dst) {
  1200. if (dst->obsolete) {
  1201. dst_release(dst);
  1202. dst = NULL;
  1203. }
  1204. }
  1205. return dst;
  1206. }
  1207. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1208. {
  1209. int i;
  1210. struct xfrm_policy *pol;
  1211. struct dst_entry *dst, **dstp, *gc_list = NULL;
  1212. read_lock_bh(&xfrm_policy_lock);
  1213. for (i=0; i<2*XFRM_POLICY_MAX; i++) {
  1214. #ifdef CONFIG_XFRM_SUB_POLICY
  1215. for (pol = xfrm_policy_list_sub[i]; pol; pol = pol->next) {
  1216. write_lock(&pol->lock);
  1217. dstp = &pol->bundles;
  1218. while ((dst=*dstp) != NULL) {
  1219. if (func(dst)) {
  1220. *dstp = dst->next;
  1221. dst->next = gc_list;
  1222. gc_list = dst;
  1223. } else {
  1224. dstp = &dst->next;
  1225. }
  1226. }
  1227. write_unlock(&pol->lock);
  1228. }
  1229. #endif
  1230. for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
  1231. write_lock(&pol->lock);
  1232. dstp = &pol->bundles;
  1233. while ((dst=*dstp) != NULL) {
  1234. if (func(dst)) {
  1235. *dstp = dst->next;
  1236. dst->next = gc_list;
  1237. gc_list = dst;
  1238. } else {
  1239. dstp = &dst->next;
  1240. }
  1241. }
  1242. write_unlock(&pol->lock);
  1243. }
  1244. }
  1245. read_unlock_bh(&xfrm_policy_lock);
  1246. while (gc_list) {
  1247. dst = gc_list;
  1248. gc_list = dst->next;
  1249. dst_free(dst);
  1250. }
  1251. }
  1252. static int unused_bundle(struct dst_entry *dst)
  1253. {
  1254. return !atomic_read(&dst->__refcnt);
  1255. }
  1256. static void __xfrm_garbage_collect(void)
  1257. {
  1258. xfrm_prune_bundles(unused_bundle);
  1259. }
  1260. int xfrm_flush_bundles(void)
  1261. {
  1262. xfrm_prune_bundles(stale_bundle);
  1263. return 0;
  1264. }
  1265. static int always_true(struct dst_entry *dst)
  1266. {
  1267. return 1;
  1268. }
  1269. void xfrm_flush_all_bundles(void)
  1270. {
  1271. xfrm_prune_bundles(always_true);
  1272. }
  1273. void xfrm_init_pmtu(struct dst_entry *dst)
  1274. {
  1275. do {
  1276. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1277. u32 pmtu, route_mtu_cached;
  1278. pmtu = dst_mtu(dst->child);
  1279. xdst->child_mtu_cached = pmtu;
  1280. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1281. route_mtu_cached = dst_mtu(xdst->route);
  1282. xdst->route_mtu_cached = route_mtu_cached;
  1283. if (pmtu > route_mtu_cached)
  1284. pmtu = route_mtu_cached;
  1285. dst->metrics[RTAX_MTU-1] = pmtu;
  1286. } while ((dst = dst->next));
  1287. }
  1288. EXPORT_SYMBOL(xfrm_init_pmtu);
  1289. /* Check that the bundle accepts the flow and its components are
  1290. * still valid.
  1291. */
  1292. int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int strict)
  1293. {
  1294. struct dst_entry *dst = &first->u.dst;
  1295. struct xfrm_dst *last;
  1296. u32 mtu;
  1297. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1298. (dst->dev && !netif_running(dst->dev)))
  1299. return 0;
  1300. last = NULL;
  1301. do {
  1302. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1303. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1304. return 0;
  1305. if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm))
  1306. return 0;
  1307. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1308. return 0;
  1309. if (strict && fl && dst->xfrm->props.mode != XFRM_MODE_TUNNEL &&
  1310. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1311. return 0;
  1312. mtu = dst_mtu(dst->child);
  1313. if (xdst->child_mtu_cached != mtu) {
  1314. last = xdst;
  1315. xdst->child_mtu_cached = mtu;
  1316. }
  1317. if (!dst_check(xdst->route, xdst->route_cookie))
  1318. return 0;
  1319. mtu = dst_mtu(xdst->route);
  1320. if (xdst->route_mtu_cached != mtu) {
  1321. last = xdst;
  1322. xdst->route_mtu_cached = mtu;
  1323. }
  1324. dst = dst->child;
  1325. } while (dst->xfrm);
  1326. if (likely(!last))
  1327. return 1;
  1328. mtu = last->child_mtu_cached;
  1329. for (;;) {
  1330. dst = &last->u.dst;
  1331. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1332. if (mtu > last->route_mtu_cached)
  1333. mtu = last->route_mtu_cached;
  1334. dst->metrics[RTAX_MTU-1] = mtu;
  1335. if (last == first)
  1336. break;
  1337. last = last->u.next;
  1338. last->child_mtu_cached = mtu;
  1339. }
  1340. return 1;
  1341. }
  1342. EXPORT_SYMBOL(xfrm_bundle_ok);
  1343. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1344. {
  1345. int err = 0;
  1346. if (unlikely(afinfo == NULL))
  1347. return -EINVAL;
  1348. if (unlikely(afinfo->family >= NPROTO))
  1349. return -EAFNOSUPPORT;
  1350. write_lock_bh(&xfrm_policy_afinfo_lock);
  1351. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1352. err = -ENOBUFS;
  1353. else {
  1354. struct dst_ops *dst_ops = afinfo->dst_ops;
  1355. if (likely(dst_ops->kmem_cachep == NULL))
  1356. dst_ops->kmem_cachep = xfrm_dst_cache;
  1357. if (likely(dst_ops->check == NULL))
  1358. dst_ops->check = xfrm_dst_check;
  1359. if (likely(dst_ops->negative_advice == NULL))
  1360. dst_ops->negative_advice = xfrm_negative_advice;
  1361. if (likely(dst_ops->link_failure == NULL))
  1362. dst_ops->link_failure = xfrm_link_failure;
  1363. if (likely(afinfo->garbage_collect == NULL))
  1364. afinfo->garbage_collect = __xfrm_garbage_collect;
  1365. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1366. }
  1367. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1368. return err;
  1369. }
  1370. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1371. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1372. {
  1373. int err = 0;
  1374. if (unlikely(afinfo == NULL))
  1375. return -EINVAL;
  1376. if (unlikely(afinfo->family >= NPROTO))
  1377. return -EAFNOSUPPORT;
  1378. write_lock_bh(&xfrm_policy_afinfo_lock);
  1379. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1380. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1381. err = -EINVAL;
  1382. else {
  1383. struct dst_ops *dst_ops = afinfo->dst_ops;
  1384. xfrm_policy_afinfo[afinfo->family] = NULL;
  1385. dst_ops->kmem_cachep = NULL;
  1386. dst_ops->check = NULL;
  1387. dst_ops->negative_advice = NULL;
  1388. dst_ops->link_failure = NULL;
  1389. afinfo->garbage_collect = NULL;
  1390. }
  1391. }
  1392. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1393. return err;
  1394. }
  1395. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1396. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1397. {
  1398. struct xfrm_policy_afinfo *afinfo;
  1399. if (unlikely(family >= NPROTO))
  1400. return NULL;
  1401. read_lock(&xfrm_policy_afinfo_lock);
  1402. afinfo = xfrm_policy_afinfo[family];
  1403. if (unlikely(!afinfo))
  1404. read_unlock(&xfrm_policy_afinfo_lock);
  1405. return afinfo;
  1406. }
  1407. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1408. {
  1409. read_unlock(&xfrm_policy_afinfo_lock);
  1410. }
  1411. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family)
  1412. {
  1413. struct xfrm_policy_afinfo *afinfo;
  1414. if (unlikely(family >= NPROTO))
  1415. return NULL;
  1416. write_lock_bh(&xfrm_policy_afinfo_lock);
  1417. afinfo = xfrm_policy_afinfo[family];
  1418. if (unlikely(!afinfo))
  1419. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1420. return afinfo;
  1421. }
  1422. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo)
  1423. {
  1424. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1425. }
  1426. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1427. {
  1428. switch (event) {
  1429. case NETDEV_DOWN:
  1430. xfrm_flush_bundles();
  1431. }
  1432. return NOTIFY_DONE;
  1433. }
  1434. static struct notifier_block xfrm_dev_notifier = {
  1435. xfrm_dev_event,
  1436. NULL,
  1437. 0
  1438. };
  1439. static void __init xfrm_policy_init(void)
  1440. {
  1441. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1442. sizeof(struct xfrm_dst),
  1443. 0, SLAB_HWCACHE_ALIGN,
  1444. NULL, NULL);
  1445. if (!xfrm_dst_cache)
  1446. panic("XFRM: failed to allocate xfrm_dst_cache\n");
  1447. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
  1448. register_netdevice_notifier(&xfrm_dev_notifier);
  1449. }
  1450. void __init xfrm_init(void)
  1451. {
  1452. xfrm_state_init();
  1453. xfrm_policy_init();
  1454. xfrm_input_init();
  1455. }