xfrm_policy.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/kmod.h>
  17. #include <linux/list.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/notifier.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/module.h>
  24. #include <linux/cache.h>
  25. #include <linux/audit.h>
  26. #include <net/xfrm.h>
  27. #include <net/ip.h>
  28. #include "xfrm_hash.h"
  29. int sysctl_xfrm_larval_drop __read_mostly;
  30. DEFINE_MUTEX(xfrm_cfg_mutex);
  31. EXPORT_SYMBOL(xfrm_cfg_mutex);
  32. static DEFINE_RWLOCK(xfrm_policy_lock);
  33. unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
  34. EXPORT_SYMBOL(xfrm_policy_count);
  35. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  36. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  37. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  38. static struct work_struct xfrm_policy_gc_work;
  39. static HLIST_HEAD(xfrm_policy_gc_list);
  40. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  41. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  42. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  43. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
  44. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
  45. static inline int
  46. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  47. {
  48. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  49. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  50. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  51. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  52. (fl->proto == sel->proto || !sel->proto) &&
  53. (fl->oif == sel->ifindex || !sel->ifindex);
  54. }
  55. static inline int
  56. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  57. {
  58. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  59. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  60. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  61. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  62. (fl->proto == sel->proto || !sel->proto) &&
  63. (fl->oif == sel->ifindex || !sel->ifindex);
  64. }
  65. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  66. unsigned short family)
  67. {
  68. switch (family) {
  69. case AF_INET:
  70. return __xfrm4_selector_match(sel, fl);
  71. case AF_INET6:
  72. return __xfrm6_selector_match(sel, fl);
  73. }
  74. return 0;
  75. }
  76. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  77. {
  78. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  79. struct xfrm_type **typemap;
  80. int err = 0;
  81. if (unlikely(afinfo == NULL))
  82. return -EAFNOSUPPORT;
  83. typemap = afinfo->type_map;
  84. if (likely(typemap[type->proto] == NULL))
  85. typemap[type->proto] = type;
  86. else
  87. err = -EEXIST;
  88. xfrm_policy_unlock_afinfo(afinfo);
  89. return err;
  90. }
  91. EXPORT_SYMBOL(xfrm_register_type);
  92. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  93. {
  94. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  95. struct xfrm_type **typemap;
  96. int err = 0;
  97. if (unlikely(afinfo == NULL))
  98. return -EAFNOSUPPORT;
  99. typemap = afinfo->type_map;
  100. if (unlikely(typemap[type->proto] != type))
  101. err = -ENOENT;
  102. else
  103. typemap[type->proto] = NULL;
  104. xfrm_policy_unlock_afinfo(afinfo);
  105. return err;
  106. }
  107. EXPORT_SYMBOL(xfrm_unregister_type);
  108. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  109. {
  110. struct xfrm_policy_afinfo *afinfo;
  111. struct xfrm_type **typemap;
  112. struct xfrm_type *type;
  113. int modload_attempted = 0;
  114. retry:
  115. afinfo = xfrm_policy_get_afinfo(family);
  116. if (unlikely(afinfo == NULL))
  117. return NULL;
  118. typemap = afinfo->type_map;
  119. type = typemap[proto];
  120. if (unlikely(type && !try_module_get(type->owner)))
  121. type = NULL;
  122. if (!type && !modload_attempted) {
  123. xfrm_policy_put_afinfo(afinfo);
  124. request_module("xfrm-type-%d-%d",
  125. (int) family, (int) proto);
  126. modload_attempted = 1;
  127. goto retry;
  128. }
  129. xfrm_policy_put_afinfo(afinfo);
  130. return type;
  131. }
  132. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  133. unsigned short family)
  134. {
  135. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  136. int err = 0;
  137. if (unlikely(afinfo == NULL))
  138. return -EAFNOSUPPORT;
  139. if (likely(afinfo->dst_lookup != NULL))
  140. err = afinfo->dst_lookup(dst, fl);
  141. else
  142. err = -EINVAL;
  143. xfrm_policy_put_afinfo(afinfo);
  144. return err;
  145. }
  146. EXPORT_SYMBOL(xfrm_dst_lookup);
  147. void xfrm_put_type(struct xfrm_type *type)
  148. {
  149. module_put(type->owner);
  150. }
  151. int xfrm_register_mode(struct xfrm_mode *mode, int family)
  152. {
  153. struct xfrm_policy_afinfo *afinfo;
  154. struct xfrm_mode **modemap;
  155. int err;
  156. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  157. return -EINVAL;
  158. afinfo = xfrm_policy_lock_afinfo(family);
  159. if (unlikely(afinfo == NULL))
  160. return -EAFNOSUPPORT;
  161. err = -EEXIST;
  162. modemap = afinfo->mode_map;
  163. if (likely(modemap[mode->encap] == NULL)) {
  164. modemap[mode->encap] = mode;
  165. err = 0;
  166. }
  167. xfrm_policy_unlock_afinfo(afinfo);
  168. return err;
  169. }
  170. EXPORT_SYMBOL(xfrm_register_mode);
  171. int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
  172. {
  173. struct xfrm_policy_afinfo *afinfo;
  174. struct xfrm_mode **modemap;
  175. int err;
  176. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  177. return -EINVAL;
  178. afinfo = xfrm_policy_lock_afinfo(family);
  179. if (unlikely(afinfo == NULL))
  180. return -EAFNOSUPPORT;
  181. err = -ENOENT;
  182. modemap = afinfo->mode_map;
  183. if (likely(modemap[mode->encap] == mode)) {
  184. modemap[mode->encap] = NULL;
  185. err = 0;
  186. }
  187. xfrm_policy_unlock_afinfo(afinfo);
  188. return err;
  189. }
  190. EXPORT_SYMBOL(xfrm_unregister_mode);
  191. struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
  192. {
  193. struct xfrm_policy_afinfo *afinfo;
  194. struct xfrm_mode *mode;
  195. int modload_attempted = 0;
  196. if (unlikely(encap >= XFRM_MODE_MAX))
  197. return NULL;
  198. retry:
  199. afinfo = xfrm_policy_get_afinfo(family);
  200. if (unlikely(afinfo == NULL))
  201. return NULL;
  202. mode = afinfo->mode_map[encap];
  203. if (unlikely(mode && !try_module_get(mode->owner)))
  204. mode = NULL;
  205. if (!mode && !modload_attempted) {
  206. xfrm_policy_put_afinfo(afinfo);
  207. request_module("xfrm-mode-%d-%d", family, encap);
  208. modload_attempted = 1;
  209. goto retry;
  210. }
  211. xfrm_policy_put_afinfo(afinfo);
  212. return mode;
  213. }
  214. void xfrm_put_mode(struct xfrm_mode *mode)
  215. {
  216. module_put(mode->owner);
  217. }
  218. static inline unsigned long make_jiffies(long secs)
  219. {
  220. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  221. return MAX_SCHEDULE_TIMEOUT-1;
  222. else
  223. return secs*HZ;
  224. }
  225. static void xfrm_policy_timer(unsigned long data)
  226. {
  227. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  228. unsigned long now = get_seconds();
  229. long next = LONG_MAX;
  230. int warn = 0;
  231. int dir;
  232. read_lock(&xp->lock);
  233. if (xp->dead)
  234. goto out;
  235. dir = xfrm_policy_id2dir(xp->index);
  236. if (xp->lft.hard_add_expires_seconds) {
  237. long tmo = xp->lft.hard_add_expires_seconds +
  238. xp->curlft.add_time - now;
  239. if (tmo <= 0)
  240. goto expired;
  241. if (tmo < next)
  242. next = tmo;
  243. }
  244. if (xp->lft.hard_use_expires_seconds) {
  245. long tmo = xp->lft.hard_use_expires_seconds +
  246. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  247. if (tmo <= 0)
  248. goto expired;
  249. if (tmo < next)
  250. next = tmo;
  251. }
  252. if (xp->lft.soft_add_expires_seconds) {
  253. long tmo = xp->lft.soft_add_expires_seconds +
  254. xp->curlft.add_time - now;
  255. if (tmo <= 0) {
  256. warn = 1;
  257. tmo = XFRM_KM_TIMEOUT;
  258. }
  259. if (tmo < next)
  260. next = tmo;
  261. }
  262. if (xp->lft.soft_use_expires_seconds) {
  263. long tmo = xp->lft.soft_use_expires_seconds +
  264. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  265. if (tmo <= 0) {
  266. warn = 1;
  267. tmo = XFRM_KM_TIMEOUT;
  268. }
  269. if (tmo < next)
  270. next = tmo;
  271. }
  272. if (warn)
  273. km_policy_expired(xp, dir, 0, 0);
  274. if (next != LONG_MAX &&
  275. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  276. xfrm_pol_hold(xp);
  277. out:
  278. read_unlock(&xp->lock);
  279. xfrm_pol_put(xp);
  280. return;
  281. expired:
  282. read_unlock(&xp->lock);
  283. if (!xfrm_policy_delete(xp, dir))
  284. km_policy_expired(xp, dir, 1, 0);
  285. xfrm_pol_put(xp);
  286. }
  287. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  288. * SPD calls.
  289. */
  290. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  291. {
  292. struct xfrm_policy *policy;
  293. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  294. if (policy) {
  295. INIT_HLIST_NODE(&policy->bydst);
  296. INIT_HLIST_NODE(&policy->byidx);
  297. rwlock_init(&policy->lock);
  298. atomic_set(&policy->refcnt, 1);
  299. init_timer(&policy->timer);
  300. policy->timer.data = (unsigned long)policy;
  301. policy->timer.function = xfrm_policy_timer;
  302. }
  303. return policy;
  304. }
  305. EXPORT_SYMBOL(xfrm_policy_alloc);
  306. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  307. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  308. {
  309. BUG_ON(!policy->dead);
  310. BUG_ON(policy->bundles);
  311. if (del_timer(&policy->timer))
  312. BUG();
  313. security_xfrm_policy_free(policy);
  314. kfree(policy);
  315. }
  316. EXPORT_SYMBOL(__xfrm_policy_destroy);
  317. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  318. {
  319. struct dst_entry *dst;
  320. while ((dst = policy->bundles) != NULL) {
  321. policy->bundles = dst->next;
  322. dst_free(dst);
  323. }
  324. if (del_timer(&policy->timer))
  325. atomic_dec(&policy->refcnt);
  326. if (atomic_read(&policy->refcnt) > 1)
  327. flow_cache_flush();
  328. xfrm_pol_put(policy);
  329. }
  330. static void xfrm_policy_gc_task(struct work_struct *work)
  331. {
  332. struct xfrm_policy *policy;
  333. struct hlist_node *entry, *tmp;
  334. struct hlist_head gc_list;
  335. spin_lock_bh(&xfrm_policy_gc_lock);
  336. gc_list.first = xfrm_policy_gc_list.first;
  337. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  338. spin_unlock_bh(&xfrm_policy_gc_lock);
  339. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  340. xfrm_policy_gc_kill(policy);
  341. }
  342. /* Rule must be locked. Release descentant resources, announce
  343. * entry dead. The rule must be unlinked from lists to the moment.
  344. */
  345. static void xfrm_policy_kill(struct xfrm_policy *policy)
  346. {
  347. int dead;
  348. write_lock_bh(&policy->lock);
  349. dead = policy->dead;
  350. policy->dead = 1;
  351. write_unlock_bh(&policy->lock);
  352. if (unlikely(dead)) {
  353. WARN_ON(1);
  354. return;
  355. }
  356. spin_lock(&xfrm_policy_gc_lock);
  357. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  358. spin_unlock(&xfrm_policy_gc_lock);
  359. schedule_work(&xfrm_policy_gc_work);
  360. }
  361. struct xfrm_policy_hash {
  362. struct hlist_head *table;
  363. unsigned int hmask;
  364. };
  365. static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
  366. static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
  367. static struct hlist_head *xfrm_policy_byidx __read_mostly;
  368. static unsigned int xfrm_idx_hmask __read_mostly;
  369. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  370. static inline unsigned int idx_hash(u32 index)
  371. {
  372. return __idx_hash(index, xfrm_idx_hmask);
  373. }
  374. static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
  375. {
  376. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  377. unsigned int hash = __sel_hash(sel, family, hmask);
  378. return (hash == hmask + 1 ?
  379. &xfrm_policy_inexact[dir] :
  380. xfrm_policy_bydst[dir].table + hash);
  381. }
  382. static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  383. {
  384. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  385. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  386. return xfrm_policy_bydst[dir].table + hash;
  387. }
  388. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  389. struct hlist_head *ndsttable,
  390. unsigned int nhashmask)
  391. {
  392. struct hlist_node *entry, *tmp;
  393. struct xfrm_policy *pol;
  394. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  395. unsigned int h;
  396. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  397. pol->family, nhashmask);
  398. hlist_add_head(&pol->bydst, ndsttable+h);
  399. }
  400. }
  401. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  402. struct hlist_head *nidxtable,
  403. unsigned int nhashmask)
  404. {
  405. struct hlist_node *entry, *tmp;
  406. struct xfrm_policy *pol;
  407. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  408. unsigned int h;
  409. h = __idx_hash(pol->index, nhashmask);
  410. hlist_add_head(&pol->byidx, nidxtable+h);
  411. }
  412. }
  413. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  414. {
  415. return ((old_hmask + 1) << 1) - 1;
  416. }
  417. static void xfrm_bydst_resize(int dir)
  418. {
  419. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  420. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  421. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  422. struct hlist_head *odst = xfrm_policy_bydst[dir].table;
  423. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  424. int i;
  425. if (!ndst)
  426. return;
  427. write_lock_bh(&xfrm_policy_lock);
  428. for (i = hmask; i >= 0; i--)
  429. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  430. xfrm_policy_bydst[dir].table = ndst;
  431. xfrm_policy_bydst[dir].hmask = nhashmask;
  432. write_unlock_bh(&xfrm_policy_lock);
  433. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  434. }
  435. static void xfrm_byidx_resize(int total)
  436. {
  437. unsigned int hmask = xfrm_idx_hmask;
  438. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  439. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  440. struct hlist_head *oidx = xfrm_policy_byidx;
  441. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  442. int i;
  443. if (!nidx)
  444. return;
  445. write_lock_bh(&xfrm_policy_lock);
  446. for (i = hmask; i >= 0; i--)
  447. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  448. xfrm_policy_byidx = nidx;
  449. xfrm_idx_hmask = nhashmask;
  450. write_unlock_bh(&xfrm_policy_lock);
  451. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  452. }
  453. static inline int xfrm_bydst_should_resize(int dir, int *total)
  454. {
  455. unsigned int cnt = xfrm_policy_count[dir];
  456. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  457. if (total)
  458. *total += cnt;
  459. if ((hmask + 1) < xfrm_policy_hashmax &&
  460. cnt > hmask)
  461. return 1;
  462. return 0;
  463. }
  464. static inline int xfrm_byidx_should_resize(int total)
  465. {
  466. unsigned int hmask = xfrm_idx_hmask;
  467. if ((hmask + 1) < xfrm_policy_hashmax &&
  468. total > hmask)
  469. return 1;
  470. return 0;
  471. }
  472. void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
  473. {
  474. read_lock_bh(&xfrm_policy_lock);
  475. si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
  476. si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
  477. si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
  478. si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  479. si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  480. si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  481. si->spdhcnt = xfrm_idx_hmask;
  482. si->spdhmcnt = xfrm_policy_hashmax;
  483. read_unlock_bh(&xfrm_policy_lock);
  484. }
  485. EXPORT_SYMBOL(xfrm_spd_getinfo);
  486. static DEFINE_MUTEX(hash_resize_mutex);
  487. static void xfrm_hash_resize(struct work_struct *__unused)
  488. {
  489. int dir, total;
  490. mutex_lock(&hash_resize_mutex);
  491. total = 0;
  492. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  493. if (xfrm_bydst_should_resize(dir, &total))
  494. xfrm_bydst_resize(dir);
  495. }
  496. if (xfrm_byidx_should_resize(total))
  497. xfrm_byidx_resize(total);
  498. mutex_unlock(&hash_resize_mutex);
  499. }
  500. static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
  501. /* Generate new index... KAME seems to generate them ordered by cost
  502. * of an absolute inpredictability of ordering of rules. This will not pass. */
  503. static u32 xfrm_gen_index(u8 type, int dir)
  504. {
  505. static u32 idx_generator;
  506. for (;;) {
  507. struct hlist_node *entry;
  508. struct hlist_head *list;
  509. struct xfrm_policy *p;
  510. u32 idx;
  511. int found;
  512. idx = (idx_generator | dir);
  513. idx_generator += 8;
  514. if (idx == 0)
  515. idx = 8;
  516. list = xfrm_policy_byidx + idx_hash(idx);
  517. found = 0;
  518. hlist_for_each_entry(p, entry, list, byidx) {
  519. if (p->index == idx) {
  520. found = 1;
  521. break;
  522. }
  523. }
  524. if (!found)
  525. return idx;
  526. }
  527. }
  528. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  529. {
  530. u32 *p1 = (u32 *) s1;
  531. u32 *p2 = (u32 *) s2;
  532. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  533. int i;
  534. for (i = 0; i < len; i++) {
  535. if (p1[i] != p2[i])
  536. return 1;
  537. }
  538. return 0;
  539. }
  540. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  541. {
  542. struct xfrm_policy *pol;
  543. struct xfrm_policy *delpol;
  544. struct hlist_head *chain;
  545. struct hlist_node *entry, *newpos;
  546. struct dst_entry *gc_list;
  547. write_lock_bh(&xfrm_policy_lock);
  548. chain = policy_hash_bysel(&policy->selector, policy->family, dir);
  549. delpol = NULL;
  550. newpos = NULL;
  551. hlist_for_each_entry(pol, entry, chain, bydst) {
  552. if (pol->type == policy->type &&
  553. !selector_cmp(&pol->selector, &policy->selector) &&
  554. xfrm_sec_ctx_match(pol->security, policy->security) &&
  555. !WARN_ON(delpol)) {
  556. if (excl) {
  557. write_unlock_bh(&xfrm_policy_lock);
  558. return -EEXIST;
  559. }
  560. delpol = pol;
  561. if (policy->priority > pol->priority)
  562. continue;
  563. } else if (policy->priority >= pol->priority) {
  564. newpos = &pol->bydst;
  565. continue;
  566. }
  567. if (delpol)
  568. break;
  569. }
  570. if (newpos)
  571. hlist_add_after(newpos, &policy->bydst);
  572. else
  573. hlist_add_head(&policy->bydst, chain);
  574. xfrm_pol_hold(policy);
  575. xfrm_policy_count[dir]++;
  576. atomic_inc(&flow_cache_genid);
  577. if (delpol) {
  578. hlist_del(&delpol->bydst);
  579. hlist_del(&delpol->byidx);
  580. xfrm_policy_count[dir]--;
  581. }
  582. policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
  583. hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
  584. policy->curlft.add_time = get_seconds();
  585. policy->curlft.use_time = 0;
  586. if (!mod_timer(&policy->timer, jiffies + HZ))
  587. xfrm_pol_hold(policy);
  588. write_unlock_bh(&xfrm_policy_lock);
  589. if (delpol)
  590. xfrm_policy_kill(delpol);
  591. else if (xfrm_bydst_should_resize(dir, NULL))
  592. schedule_work(&xfrm_hash_work);
  593. read_lock_bh(&xfrm_policy_lock);
  594. gc_list = NULL;
  595. entry = &policy->bydst;
  596. hlist_for_each_entry_continue(policy, entry, bydst) {
  597. struct dst_entry *dst;
  598. write_lock(&policy->lock);
  599. dst = policy->bundles;
  600. if (dst) {
  601. struct dst_entry *tail = dst;
  602. while (tail->next)
  603. tail = tail->next;
  604. tail->next = gc_list;
  605. gc_list = dst;
  606. policy->bundles = NULL;
  607. }
  608. write_unlock(&policy->lock);
  609. }
  610. read_unlock_bh(&xfrm_policy_lock);
  611. while (gc_list) {
  612. struct dst_entry *dst = gc_list;
  613. gc_list = dst->next;
  614. dst_free(dst);
  615. }
  616. return 0;
  617. }
  618. EXPORT_SYMBOL(xfrm_policy_insert);
  619. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  620. struct xfrm_selector *sel,
  621. struct xfrm_sec_ctx *ctx, int delete,
  622. int *err)
  623. {
  624. struct xfrm_policy *pol, *ret;
  625. struct hlist_head *chain;
  626. struct hlist_node *entry;
  627. *err = 0;
  628. write_lock_bh(&xfrm_policy_lock);
  629. chain = policy_hash_bysel(sel, sel->family, dir);
  630. ret = NULL;
  631. hlist_for_each_entry(pol, entry, chain, bydst) {
  632. if (pol->type == type &&
  633. !selector_cmp(sel, &pol->selector) &&
  634. xfrm_sec_ctx_match(ctx, pol->security)) {
  635. xfrm_pol_hold(pol);
  636. if (delete) {
  637. *err = security_xfrm_policy_delete(pol);
  638. if (*err) {
  639. write_unlock_bh(&xfrm_policy_lock);
  640. return pol;
  641. }
  642. hlist_del(&pol->bydst);
  643. hlist_del(&pol->byidx);
  644. xfrm_policy_count[dir]--;
  645. }
  646. ret = pol;
  647. break;
  648. }
  649. }
  650. write_unlock_bh(&xfrm_policy_lock);
  651. if (ret && delete) {
  652. atomic_inc(&flow_cache_genid);
  653. xfrm_policy_kill(ret);
  654. }
  655. return ret;
  656. }
  657. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  658. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
  659. int *err)
  660. {
  661. struct xfrm_policy *pol, *ret;
  662. struct hlist_head *chain;
  663. struct hlist_node *entry;
  664. *err = -ENOENT;
  665. if (xfrm_policy_id2dir(id) != dir)
  666. return NULL;
  667. *err = 0;
  668. write_lock_bh(&xfrm_policy_lock);
  669. chain = xfrm_policy_byidx + idx_hash(id);
  670. ret = NULL;
  671. hlist_for_each_entry(pol, entry, chain, byidx) {
  672. if (pol->type == type && pol->index == id) {
  673. xfrm_pol_hold(pol);
  674. if (delete) {
  675. *err = security_xfrm_policy_delete(pol);
  676. if (*err) {
  677. write_unlock_bh(&xfrm_policy_lock);
  678. return pol;
  679. }
  680. hlist_del(&pol->bydst);
  681. hlist_del(&pol->byidx);
  682. xfrm_policy_count[dir]--;
  683. }
  684. ret = pol;
  685. break;
  686. }
  687. }
  688. write_unlock_bh(&xfrm_policy_lock);
  689. if (ret && delete) {
  690. atomic_inc(&flow_cache_genid);
  691. xfrm_policy_kill(ret);
  692. }
  693. return ret;
  694. }
  695. EXPORT_SYMBOL(xfrm_policy_byid);
  696. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  697. static inline int
  698. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  699. {
  700. int dir, err = 0;
  701. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  702. struct xfrm_policy *pol;
  703. struct hlist_node *entry;
  704. int i;
  705. hlist_for_each_entry(pol, entry,
  706. &xfrm_policy_inexact[dir], bydst) {
  707. if (pol->type != type)
  708. continue;
  709. err = security_xfrm_policy_delete(pol);
  710. if (err) {
  711. xfrm_audit_log(audit_info->loginuid,
  712. audit_info->secid,
  713. AUDIT_MAC_IPSEC_DELSPD, 0,
  714. pol, NULL);
  715. return err;
  716. }
  717. }
  718. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  719. hlist_for_each_entry(pol, entry,
  720. xfrm_policy_bydst[dir].table + i,
  721. bydst) {
  722. if (pol->type != type)
  723. continue;
  724. err = security_xfrm_policy_delete(pol);
  725. if (err) {
  726. xfrm_audit_log(audit_info->loginuid,
  727. audit_info->secid,
  728. AUDIT_MAC_IPSEC_DELSPD,
  729. 0, pol, NULL);
  730. return err;
  731. }
  732. }
  733. }
  734. }
  735. return err;
  736. }
  737. #else
  738. static inline int
  739. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  740. {
  741. return 0;
  742. }
  743. #endif
  744. int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
  745. {
  746. int dir, err = 0;
  747. write_lock_bh(&xfrm_policy_lock);
  748. err = xfrm_policy_flush_secctx_check(type, audit_info);
  749. if (err)
  750. goto out;
  751. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  752. struct xfrm_policy *pol;
  753. struct hlist_node *entry;
  754. int i, killed;
  755. killed = 0;
  756. again1:
  757. hlist_for_each_entry(pol, entry,
  758. &xfrm_policy_inexact[dir], bydst) {
  759. if (pol->type != type)
  760. continue;
  761. hlist_del(&pol->bydst);
  762. hlist_del(&pol->byidx);
  763. write_unlock_bh(&xfrm_policy_lock);
  764. xfrm_audit_log(audit_info->loginuid, audit_info->secid,
  765. AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL);
  766. xfrm_policy_kill(pol);
  767. killed++;
  768. write_lock_bh(&xfrm_policy_lock);
  769. goto again1;
  770. }
  771. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  772. again2:
  773. hlist_for_each_entry(pol, entry,
  774. xfrm_policy_bydst[dir].table + i,
  775. bydst) {
  776. if (pol->type != type)
  777. continue;
  778. hlist_del(&pol->bydst);
  779. hlist_del(&pol->byidx);
  780. write_unlock_bh(&xfrm_policy_lock);
  781. xfrm_audit_log(audit_info->loginuid,
  782. audit_info->secid,
  783. AUDIT_MAC_IPSEC_DELSPD, 1,
  784. pol, NULL);
  785. xfrm_policy_kill(pol);
  786. killed++;
  787. write_lock_bh(&xfrm_policy_lock);
  788. goto again2;
  789. }
  790. }
  791. xfrm_policy_count[dir] -= killed;
  792. }
  793. atomic_inc(&flow_cache_genid);
  794. out:
  795. write_unlock_bh(&xfrm_policy_lock);
  796. return err;
  797. }
  798. EXPORT_SYMBOL(xfrm_policy_flush);
  799. int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
  800. void *data)
  801. {
  802. struct xfrm_policy *pol, *last = NULL;
  803. struct hlist_node *entry;
  804. int dir, last_dir = 0, count, error;
  805. read_lock_bh(&xfrm_policy_lock);
  806. count = 0;
  807. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  808. struct hlist_head *table = xfrm_policy_bydst[dir].table;
  809. int i;
  810. hlist_for_each_entry(pol, entry,
  811. &xfrm_policy_inexact[dir], bydst) {
  812. if (pol->type != type)
  813. continue;
  814. if (last) {
  815. error = func(last, last_dir % XFRM_POLICY_MAX,
  816. count, data);
  817. if (error)
  818. goto out;
  819. }
  820. last = pol;
  821. last_dir = dir;
  822. count++;
  823. }
  824. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  825. hlist_for_each_entry(pol, entry, table + i, bydst) {
  826. if (pol->type != type)
  827. continue;
  828. if (last) {
  829. error = func(last, last_dir % XFRM_POLICY_MAX,
  830. count, data);
  831. if (error)
  832. goto out;
  833. }
  834. last = pol;
  835. last_dir = dir;
  836. count++;
  837. }
  838. }
  839. }
  840. if (count == 0) {
  841. error = -ENOENT;
  842. goto out;
  843. }
  844. error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
  845. out:
  846. read_unlock_bh(&xfrm_policy_lock);
  847. return error;
  848. }
  849. EXPORT_SYMBOL(xfrm_policy_walk);
  850. /*
  851. * Find policy to apply to this flow.
  852. *
  853. * Returns 0 if policy found, else an -errno.
  854. */
  855. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  856. u8 type, u16 family, int dir)
  857. {
  858. struct xfrm_selector *sel = &pol->selector;
  859. int match, ret = -ESRCH;
  860. if (pol->family != family ||
  861. pol->type != type)
  862. return ret;
  863. match = xfrm_selector_match(sel, fl, family);
  864. if (match)
  865. ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
  866. return ret;
  867. }
  868. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  869. u16 family, u8 dir)
  870. {
  871. int err;
  872. struct xfrm_policy *pol, *ret;
  873. xfrm_address_t *daddr, *saddr;
  874. struct hlist_node *entry;
  875. struct hlist_head *chain;
  876. u32 priority = ~0U;
  877. daddr = xfrm_flowi_daddr(fl, family);
  878. saddr = xfrm_flowi_saddr(fl, family);
  879. if (unlikely(!daddr || !saddr))
  880. return NULL;
  881. read_lock_bh(&xfrm_policy_lock);
  882. chain = policy_hash_direct(daddr, saddr, family, dir);
  883. ret = NULL;
  884. hlist_for_each_entry(pol, entry, chain, bydst) {
  885. err = xfrm_policy_match(pol, fl, type, family, dir);
  886. if (err) {
  887. if (err == -ESRCH)
  888. continue;
  889. else {
  890. ret = ERR_PTR(err);
  891. goto fail;
  892. }
  893. } else {
  894. ret = pol;
  895. priority = ret->priority;
  896. break;
  897. }
  898. }
  899. chain = &xfrm_policy_inexact[dir];
  900. hlist_for_each_entry(pol, entry, chain, bydst) {
  901. err = xfrm_policy_match(pol, fl, type, family, dir);
  902. if (err) {
  903. if (err == -ESRCH)
  904. continue;
  905. else {
  906. ret = ERR_PTR(err);
  907. goto fail;
  908. }
  909. } else if (pol->priority < priority) {
  910. ret = pol;
  911. break;
  912. }
  913. }
  914. if (ret)
  915. xfrm_pol_hold(ret);
  916. fail:
  917. read_unlock_bh(&xfrm_policy_lock);
  918. return ret;
  919. }
  920. static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  921. void **objp, atomic_t **obj_refp)
  922. {
  923. struct xfrm_policy *pol;
  924. int err = 0;
  925. #ifdef CONFIG_XFRM_SUB_POLICY
  926. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  927. if (IS_ERR(pol)) {
  928. err = PTR_ERR(pol);
  929. pol = NULL;
  930. }
  931. if (pol || err)
  932. goto end;
  933. #endif
  934. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  935. if (IS_ERR(pol)) {
  936. err = PTR_ERR(pol);
  937. pol = NULL;
  938. }
  939. #ifdef CONFIG_XFRM_SUB_POLICY
  940. end:
  941. #endif
  942. if ((*objp = (void *) pol) != NULL)
  943. *obj_refp = &pol->refcnt;
  944. return err;
  945. }
  946. static inline int policy_to_flow_dir(int dir)
  947. {
  948. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  949. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  950. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  951. return dir;
  952. switch (dir) {
  953. default:
  954. case XFRM_POLICY_IN:
  955. return FLOW_DIR_IN;
  956. case XFRM_POLICY_OUT:
  957. return FLOW_DIR_OUT;
  958. case XFRM_POLICY_FWD:
  959. return FLOW_DIR_FWD;
  960. }
  961. }
  962. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  963. {
  964. struct xfrm_policy *pol;
  965. read_lock_bh(&xfrm_policy_lock);
  966. if ((pol = sk->sk_policy[dir]) != NULL) {
  967. int match = xfrm_selector_match(&pol->selector, fl,
  968. sk->sk_family);
  969. int err = 0;
  970. if (match) {
  971. err = security_xfrm_policy_lookup(pol, fl->secid,
  972. policy_to_flow_dir(dir));
  973. if (!err)
  974. xfrm_pol_hold(pol);
  975. else if (err == -ESRCH)
  976. pol = NULL;
  977. else
  978. pol = ERR_PTR(err);
  979. } else
  980. pol = NULL;
  981. }
  982. read_unlock_bh(&xfrm_policy_lock);
  983. return pol;
  984. }
  985. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  986. {
  987. struct hlist_head *chain = policy_hash_bysel(&pol->selector,
  988. pol->family, dir);
  989. hlist_add_head(&pol->bydst, chain);
  990. hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
  991. xfrm_policy_count[dir]++;
  992. xfrm_pol_hold(pol);
  993. if (xfrm_bydst_should_resize(dir, NULL))
  994. schedule_work(&xfrm_hash_work);
  995. }
  996. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  997. int dir)
  998. {
  999. if (hlist_unhashed(&pol->bydst))
  1000. return NULL;
  1001. hlist_del(&pol->bydst);
  1002. hlist_del(&pol->byidx);
  1003. xfrm_policy_count[dir]--;
  1004. return pol;
  1005. }
  1006. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  1007. {
  1008. write_lock_bh(&xfrm_policy_lock);
  1009. pol = __xfrm_policy_unlink(pol, dir);
  1010. write_unlock_bh(&xfrm_policy_lock);
  1011. if (pol) {
  1012. if (dir < XFRM_POLICY_MAX)
  1013. atomic_inc(&flow_cache_genid);
  1014. xfrm_policy_kill(pol);
  1015. return 0;
  1016. }
  1017. return -ENOENT;
  1018. }
  1019. EXPORT_SYMBOL(xfrm_policy_delete);
  1020. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  1021. {
  1022. struct xfrm_policy *old_pol;
  1023. #ifdef CONFIG_XFRM_SUB_POLICY
  1024. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  1025. return -EINVAL;
  1026. #endif
  1027. write_lock_bh(&xfrm_policy_lock);
  1028. old_pol = sk->sk_policy[dir];
  1029. sk->sk_policy[dir] = pol;
  1030. if (pol) {
  1031. pol->curlft.add_time = get_seconds();
  1032. pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
  1033. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  1034. }
  1035. if (old_pol)
  1036. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  1037. write_unlock_bh(&xfrm_policy_lock);
  1038. if (old_pol) {
  1039. xfrm_policy_kill(old_pol);
  1040. }
  1041. return 0;
  1042. }
  1043. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  1044. {
  1045. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  1046. if (newp) {
  1047. newp->selector = old->selector;
  1048. if (security_xfrm_policy_clone(old, newp)) {
  1049. kfree(newp);
  1050. return NULL; /* ENOMEM */
  1051. }
  1052. newp->lft = old->lft;
  1053. newp->curlft = old->curlft;
  1054. newp->action = old->action;
  1055. newp->flags = old->flags;
  1056. newp->xfrm_nr = old->xfrm_nr;
  1057. newp->index = old->index;
  1058. newp->type = old->type;
  1059. memcpy(newp->xfrm_vec, old->xfrm_vec,
  1060. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  1061. write_lock_bh(&xfrm_policy_lock);
  1062. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  1063. write_unlock_bh(&xfrm_policy_lock);
  1064. xfrm_pol_put(newp);
  1065. }
  1066. return newp;
  1067. }
  1068. int __xfrm_sk_clone_policy(struct sock *sk)
  1069. {
  1070. struct xfrm_policy *p0 = sk->sk_policy[0],
  1071. *p1 = sk->sk_policy[1];
  1072. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  1073. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  1074. return -ENOMEM;
  1075. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1076. return -ENOMEM;
  1077. return 0;
  1078. }
  1079. static int
  1080. xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
  1081. unsigned short family)
  1082. {
  1083. int err;
  1084. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1085. if (unlikely(afinfo == NULL))
  1086. return -EINVAL;
  1087. err = afinfo->get_saddr(local, remote);
  1088. xfrm_policy_put_afinfo(afinfo);
  1089. return err;
  1090. }
  1091. /* Resolve list of templates for the flow, given policy. */
  1092. static int
  1093. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  1094. struct xfrm_state **xfrm,
  1095. unsigned short family)
  1096. {
  1097. int nx;
  1098. int i, error;
  1099. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1100. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1101. xfrm_address_t tmp;
  1102. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1103. struct xfrm_state *x;
  1104. xfrm_address_t *remote = daddr;
  1105. xfrm_address_t *local = saddr;
  1106. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1107. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1108. tmpl->mode == XFRM_MODE_BEET) {
  1109. remote = &tmpl->id.daddr;
  1110. local = &tmpl->saddr;
  1111. family = tmpl->encap_family;
  1112. if (xfrm_addr_any(local, family)) {
  1113. error = xfrm_get_saddr(&tmp, remote, family);
  1114. if (error)
  1115. goto fail;
  1116. local = &tmp;
  1117. }
  1118. }
  1119. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1120. if (x && x->km.state == XFRM_STATE_VALID) {
  1121. xfrm[nx++] = x;
  1122. daddr = remote;
  1123. saddr = local;
  1124. continue;
  1125. }
  1126. if (x) {
  1127. error = (x->km.state == XFRM_STATE_ERROR ?
  1128. -EINVAL : -EAGAIN);
  1129. xfrm_state_put(x);
  1130. }
  1131. if (!tmpl->optional)
  1132. goto fail;
  1133. }
  1134. return nx;
  1135. fail:
  1136. for (nx--; nx>=0; nx--)
  1137. xfrm_state_put(xfrm[nx]);
  1138. return error;
  1139. }
  1140. static int
  1141. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1142. struct xfrm_state **xfrm,
  1143. unsigned short family)
  1144. {
  1145. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1146. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1147. int cnx = 0;
  1148. int error;
  1149. int ret;
  1150. int i;
  1151. for (i = 0; i < npols; i++) {
  1152. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1153. error = -ENOBUFS;
  1154. goto fail;
  1155. }
  1156. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1157. if (ret < 0) {
  1158. error = ret;
  1159. goto fail;
  1160. } else
  1161. cnx += ret;
  1162. }
  1163. /* found states are sorted for outbound processing */
  1164. if (npols > 1)
  1165. xfrm_state_sort(xfrm, tpp, cnx, family);
  1166. return cnx;
  1167. fail:
  1168. for (cnx--; cnx>=0; cnx--)
  1169. xfrm_state_put(tpp[cnx]);
  1170. return error;
  1171. }
  1172. /* Check that the bundle accepts the flow and its components are
  1173. * still valid.
  1174. */
  1175. static struct dst_entry *
  1176. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1177. {
  1178. struct dst_entry *x;
  1179. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1180. if (unlikely(afinfo == NULL))
  1181. return ERR_PTR(-EINVAL);
  1182. x = afinfo->find_bundle(fl, policy);
  1183. xfrm_policy_put_afinfo(afinfo);
  1184. return x;
  1185. }
  1186. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1187. * all the metrics... Shortly, bundle a bundle.
  1188. */
  1189. static int
  1190. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  1191. struct flowi *fl, struct dst_entry **dst_p,
  1192. unsigned short family)
  1193. {
  1194. int err;
  1195. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1196. if (unlikely(afinfo == NULL))
  1197. return -EINVAL;
  1198. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  1199. xfrm_policy_put_afinfo(afinfo);
  1200. return err;
  1201. }
  1202. static int inline
  1203. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1204. {
  1205. if (!*target) {
  1206. *target = kmalloc(size, GFP_ATOMIC);
  1207. if (!*target)
  1208. return -ENOMEM;
  1209. }
  1210. memcpy(*target, src, size);
  1211. return 0;
  1212. }
  1213. static int inline
  1214. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1215. {
  1216. #ifdef CONFIG_XFRM_SUB_POLICY
  1217. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1218. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1219. sel, sizeof(*sel));
  1220. #else
  1221. return 0;
  1222. #endif
  1223. }
  1224. static int inline
  1225. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1226. {
  1227. #ifdef CONFIG_XFRM_SUB_POLICY
  1228. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1229. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1230. #else
  1231. return 0;
  1232. #endif
  1233. }
  1234. static int stale_bundle(struct dst_entry *dst);
  1235. /* Main function: finds/creates a bundle for given flow.
  1236. *
  1237. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1238. * on interfaces with disabled IPsec.
  1239. */
  1240. int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1241. struct sock *sk, int flags)
  1242. {
  1243. struct xfrm_policy *policy;
  1244. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1245. int npols;
  1246. int pol_dead;
  1247. int xfrm_nr;
  1248. int pi;
  1249. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1250. struct dst_entry *dst, *dst_orig = *dst_p;
  1251. int nx = 0;
  1252. int err;
  1253. u32 genid;
  1254. u16 family;
  1255. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1256. restart:
  1257. genid = atomic_read(&flow_cache_genid);
  1258. policy = NULL;
  1259. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1260. pols[pi] = NULL;
  1261. npols = 0;
  1262. pol_dead = 0;
  1263. xfrm_nr = 0;
  1264. if (sk && sk->sk_policy[1]) {
  1265. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1266. if (IS_ERR(policy))
  1267. return PTR_ERR(policy);
  1268. }
  1269. if (!policy) {
  1270. /* To accelerate a bit... */
  1271. if ((dst_orig->flags & DST_NOXFRM) ||
  1272. !xfrm_policy_count[XFRM_POLICY_OUT])
  1273. return 0;
  1274. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  1275. dir, xfrm_policy_lookup);
  1276. if (IS_ERR(policy))
  1277. return PTR_ERR(policy);
  1278. }
  1279. if (!policy)
  1280. return 0;
  1281. family = dst_orig->ops->family;
  1282. policy->curlft.use_time = get_seconds();
  1283. pols[0] = policy;
  1284. npols ++;
  1285. xfrm_nr += pols[0]->xfrm_nr;
  1286. switch (policy->action) {
  1287. case XFRM_POLICY_BLOCK:
  1288. /* Prohibit the flow */
  1289. err = -EPERM;
  1290. goto error;
  1291. case XFRM_POLICY_ALLOW:
  1292. #ifndef CONFIG_XFRM_SUB_POLICY
  1293. if (policy->xfrm_nr == 0) {
  1294. /* Flow passes not transformed. */
  1295. xfrm_pol_put(policy);
  1296. return 0;
  1297. }
  1298. #endif
  1299. /* Try to find matching bundle.
  1300. *
  1301. * LATER: help from flow cache. It is optional, this
  1302. * is required only for output policy.
  1303. */
  1304. dst = xfrm_find_bundle(fl, policy, family);
  1305. if (IS_ERR(dst)) {
  1306. err = PTR_ERR(dst);
  1307. goto error;
  1308. }
  1309. if (dst)
  1310. break;
  1311. #ifdef CONFIG_XFRM_SUB_POLICY
  1312. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1313. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1314. fl, family,
  1315. XFRM_POLICY_OUT);
  1316. if (pols[1]) {
  1317. if (IS_ERR(pols[1])) {
  1318. err = PTR_ERR(pols[1]);
  1319. goto error;
  1320. }
  1321. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1322. err = -EPERM;
  1323. goto error;
  1324. }
  1325. npols ++;
  1326. xfrm_nr += pols[1]->xfrm_nr;
  1327. }
  1328. }
  1329. /*
  1330. * Because neither flowi nor bundle information knows about
  1331. * transformation template size. On more than one policy usage
  1332. * we can realize whether all of them is bypass or not after
  1333. * they are searched. See above not-transformed bypass
  1334. * is surrounded by non-sub policy configuration, too.
  1335. */
  1336. if (xfrm_nr == 0) {
  1337. /* Flow passes not transformed. */
  1338. xfrm_pols_put(pols, npols);
  1339. return 0;
  1340. }
  1341. #endif
  1342. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1343. if (unlikely(nx<0)) {
  1344. err = nx;
  1345. if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
  1346. /* EREMOTE tells the caller to generate
  1347. * a one-shot blackhole route.
  1348. */
  1349. xfrm_pol_put(policy);
  1350. return -EREMOTE;
  1351. }
  1352. if (err == -EAGAIN && flags) {
  1353. DECLARE_WAITQUEUE(wait, current);
  1354. add_wait_queue(&km_waitq, &wait);
  1355. set_current_state(TASK_INTERRUPTIBLE);
  1356. schedule();
  1357. set_current_state(TASK_RUNNING);
  1358. remove_wait_queue(&km_waitq, &wait);
  1359. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1360. if (nx == -EAGAIN && signal_pending(current)) {
  1361. err = -ERESTART;
  1362. goto error;
  1363. }
  1364. if (nx == -EAGAIN ||
  1365. genid != atomic_read(&flow_cache_genid)) {
  1366. xfrm_pols_put(pols, npols);
  1367. goto restart;
  1368. }
  1369. err = nx;
  1370. }
  1371. if (err < 0)
  1372. goto error;
  1373. }
  1374. if (nx == 0) {
  1375. /* Flow passes not transformed. */
  1376. xfrm_pols_put(pols, npols);
  1377. return 0;
  1378. }
  1379. dst = dst_orig;
  1380. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  1381. if (unlikely(err)) {
  1382. int i;
  1383. for (i=0; i<nx; i++)
  1384. xfrm_state_put(xfrm[i]);
  1385. goto error;
  1386. }
  1387. for (pi = 0; pi < npols; pi++) {
  1388. read_lock_bh(&pols[pi]->lock);
  1389. pol_dead |= pols[pi]->dead;
  1390. read_unlock_bh(&pols[pi]->lock);
  1391. }
  1392. write_lock_bh(&policy->lock);
  1393. if (unlikely(pol_dead || stale_bundle(dst))) {
  1394. /* Wow! While we worked on resolving, this
  1395. * policy has gone. Retry. It is not paranoia,
  1396. * we just cannot enlist new bundle to dead object.
  1397. * We can't enlist stable bundles either.
  1398. */
  1399. write_unlock_bh(&policy->lock);
  1400. if (dst)
  1401. dst_free(dst);
  1402. err = -EHOSTUNREACH;
  1403. goto error;
  1404. }
  1405. if (npols > 1)
  1406. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1407. else
  1408. err = xfrm_dst_update_origin(dst, fl);
  1409. if (unlikely(err)) {
  1410. write_unlock_bh(&policy->lock);
  1411. if (dst)
  1412. dst_free(dst);
  1413. goto error;
  1414. }
  1415. dst->next = policy->bundles;
  1416. policy->bundles = dst;
  1417. dst_hold(dst);
  1418. write_unlock_bh(&policy->lock);
  1419. }
  1420. *dst_p = dst;
  1421. dst_release(dst_orig);
  1422. xfrm_pols_put(pols, npols);
  1423. return 0;
  1424. error:
  1425. dst_release(dst_orig);
  1426. xfrm_pols_put(pols, npols);
  1427. *dst_p = NULL;
  1428. return err;
  1429. }
  1430. EXPORT_SYMBOL(__xfrm_lookup);
  1431. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1432. struct sock *sk, int flags)
  1433. {
  1434. int err = __xfrm_lookup(dst_p, fl, sk, flags);
  1435. if (err == -EREMOTE) {
  1436. dst_release(*dst_p);
  1437. *dst_p = NULL;
  1438. err = -EAGAIN;
  1439. }
  1440. return err;
  1441. }
  1442. EXPORT_SYMBOL(xfrm_lookup);
  1443. static inline int
  1444. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1445. {
  1446. struct xfrm_state *x;
  1447. int err;
  1448. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1449. return 0;
  1450. x = skb->sp->xvec[idx];
  1451. if (!x->type->reject)
  1452. return 0;
  1453. xfrm_state_hold(x);
  1454. err = x->type->reject(x, skb, fl);
  1455. xfrm_state_put(x);
  1456. return err;
  1457. }
  1458. /* When skb is transformed back to its "native" form, we have to
  1459. * check policy restrictions. At the moment we make this in maximally
  1460. * stupid way. Shame on me. :-) Of course, connected sockets must
  1461. * have policy cached at them.
  1462. */
  1463. static inline int
  1464. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1465. unsigned short family)
  1466. {
  1467. if (xfrm_state_kern(x))
  1468. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1469. return x->id.proto == tmpl->id.proto &&
  1470. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1471. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1472. x->props.mode == tmpl->mode &&
  1473. ((tmpl->aalgos & (1<<x->props.aalgo)) ||
  1474. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1475. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1476. xfrm_state_addr_cmp(tmpl, x, family));
  1477. }
  1478. /*
  1479. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1480. * because of optional transport mode, or next index of the mathced secpath
  1481. * state with the template.
  1482. * -1 is returned when no matching template is found.
  1483. * Otherwise "-2 - errored_index" is returned.
  1484. */
  1485. static inline int
  1486. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1487. unsigned short family)
  1488. {
  1489. int idx = start;
  1490. if (tmpl->optional) {
  1491. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1492. return start;
  1493. } else
  1494. start = -1;
  1495. for (; idx < sp->len; idx++) {
  1496. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1497. return ++idx;
  1498. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1499. if (start == -1)
  1500. start = -2-idx;
  1501. break;
  1502. }
  1503. }
  1504. return start;
  1505. }
  1506. int
  1507. xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  1508. {
  1509. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1510. int err;
  1511. if (unlikely(afinfo == NULL))
  1512. return -EAFNOSUPPORT;
  1513. afinfo->decode_session(skb, fl);
  1514. err = security_xfrm_decode_session(skb, &fl->secid);
  1515. xfrm_policy_put_afinfo(afinfo);
  1516. return err;
  1517. }
  1518. EXPORT_SYMBOL(xfrm_decode_session);
  1519. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1520. {
  1521. for (; k < sp->len; k++) {
  1522. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1523. *idxp = k;
  1524. return 1;
  1525. }
  1526. }
  1527. return 0;
  1528. }
  1529. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1530. unsigned short family)
  1531. {
  1532. struct xfrm_policy *pol;
  1533. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1534. int npols = 0;
  1535. int xfrm_nr;
  1536. int pi;
  1537. struct flowi fl;
  1538. u8 fl_dir = policy_to_flow_dir(dir);
  1539. int xerr_idx = -1;
  1540. if (xfrm_decode_session(skb, &fl, family) < 0)
  1541. return 0;
  1542. nf_nat_decode_session(skb, &fl, family);
  1543. /* First, check used SA against their selectors. */
  1544. if (skb->sp) {
  1545. int i;
  1546. for (i=skb->sp->len-1; i>=0; i--) {
  1547. struct xfrm_state *x = skb->sp->xvec[i];
  1548. if (!xfrm_selector_match(&x->sel, &fl, family))
  1549. return 0;
  1550. }
  1551. }
  1552. pol = NULL;
  1553. if (sk && sk->sk_policy[dir]) {
  1554. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1555. if (IS_ERR(pol))
  1556. return 0;
  1557. }
  1558. if (!pol)
  1559. pol = flow_cache_lookup(&fl, family, fl_dir,
  1560. xfrm_policy_lookup);
  1561. if (IS_ERR(pol))
  1562. return 0;
  1563. if (!pol) {
  1564. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1565. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1566. return 0;
  1567. }
  1568. return 1;
  1569. }
  1570. pol->curlft.use_time = get_seconds();
  1571. pols[0] = pol;
  1572. npols ++;
  1573. #ifdef CONFIG_XFRM_SUB_POLICY
  1574. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1575. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1576. &fl, family,
  1577. XFRM_POLICY_IN);
  1578. if (pols[1]) {
  1579. if (IS_ERR(pols[1]))
  1580. return 0;
  1581. pols[1]->curlft.use_time = get_seconds();
  1582. npols ++;
  1583. }
  1584. }
  1585. #endif
  1586. if (pol->action == XFRM_POLICY_ALLOW) {
  1587. struct sec_path *sp;
  1588. static struct sec_path dummy;
  1589. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1590. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1591. struct xfrm_tmpl **tpp = tp;
  1592. int ti = 0;
  1593. int i, k;
  1594. if ((sp = skb->sp) == NULL)
  1595. sp = &dummy;
  1596. for (pi = 0; pi < npols; pi++) {
  1597. if (pols[pi] != pol &&
  1598. pols[pi]->action != XFRM_POLICY_ALLOW)
  1599. goto reject;
  1600. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
  1601. goto reject_error;
  1602. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1603. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1604. }
  1605. xfrm_nr = ti;
  1606. if (npols > 1) {
  1607. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1608. tpp = stp;
  1609. }
  1610. /* For each tunnel xfrm, find the first matching tmpl.
  1611. * For each tmpl before that, find corresponding xfrm.
  1612. * Order is _important_. Later we will implement
  1613. * some barriers, but at the moment barriers
  1614. * are implied between each two transformations.
  1615. */
  1616. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1617. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1618. if (k < 0) {
  1619. if (k < -1)
  1620. /* "-2 - errored_index" returned */
  1621. xerr_idx = -(2+k);
  1622. goto reject;
  1623. }
  1624. }
  1625. if (secpath_has_nontransport(sp, k, &xerr_idx))
  1626. goto reject;
  1627. xfrm_pols_put(pols, npols);
  1628. return 1;
  1629. }
  1630. reject:
  1631. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1632. reject_error:
  1633. xfrm_pols_put(pols, npols);
  1634. return 0;
  1635. }
  1636. EXPORT_SYMBOL(__xfrm_policy_check);
  1637. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1638. {
  1639. struct flowi fl;
  1640. if (xfrm_decode_session(skb, &fl, family) < 0)
  1641. return 0;
  1642. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1643. }
  1644. EXPORT_SYMBOL(__xfrm_route_forward);
  1645. /* Optimize later using cookies and generation ids. */
  1646. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1647. {
  1648. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1649. * to "-1" to force all XFRM destinations to get validated by
  1650. * dst_ops->check on every use. We do this because when a
  1651. * normal route referenced by an XFRM dst is obsoleted we do
  1652. * not go looking around for all parent referencing XFRM dsts
  1653. * so that we can invalidate them. It is just too much work.
  1654. * Instead we make the checks here on every use. For example:
  1655. *
  1656. * XFRM dst A --> IPv4 dst X
  1657. *
  1658. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1659. * in this example). If X is marked obsolete, "A" will not
  1660. * notice. That's what we are validating here via the
  1661. * stale_bundle() check.
  1662. *
  1663. * When a policy's bundle is pruned, we dst_free() the XFRM
  1664. * dst which causes it's ->obsolete field to be set to a
  1665. * positive non-zero integer. If an XFRM dst has been pruned
  1666. * like this, we want to force a new route lookup.
  1667. */
  1668. if (dst->obsolete < 0 && !stale_bundle(dst))
  1669. return dst;
  1670. return NULL;
  1671. }
  1672. static int stale_bundle(struct dst_entry *dst)
  1673. {
  1674. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1675. }
  1676. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1677. {
  1678. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1679. dst->dev = &loopback_dev;
  1680. dev_hold(&loopback_dev);
  1681. dev_put(dev);
  1682. }
  1683. }
  1684. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1685. static void xfrm_link_failure(struct sk_buff *skb)
  1686. {
  1687. /* Impossible. Such dst must be popped before reaches point of failure. */
  1688. return;
  1689. }
  1690. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1691. {
  1692. if (dst) {
  1693. if (dst->obsolete) {
  1694. dst_release(dst);
  1695. dst = NULL;
  1696. }
  1697. }
  1698. return dst;
  1699. }
  1700. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1701. {
  1702. struct dst_entry *dst, **dstp;
  1703. write_lock(&pol->lock);
  1704. dstp = &pol->bundles;
  1705. while ((dst=*dstp) != NULL) {
  1706. if (func(dst)) {
  1707. *dstp = dst->next;
  1708. dst->next = *gc_list_p;
  1709. *gc_list_p = dst;
  1710. } else {
  1711. dstp = &dst->next;
  1712. }
  1713. }
  1714. write_unlock(&pol->lock);
  1715. }
  1716. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1717. {
  1718. struct dst_entry *gc_list = NULL;
  1719. int dir;
  1720. read_lock_bh(&xfrm_policy_lock);
  1721. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1722. struct xfrm_policy *pol;
  1723. struct hlist_node *entry;
  1724. struct hlist_head *table;
  1725. int i;
  1726. hlist_for_each_entry(pol, entry,
  1727. &xfrm_policy_inexact[dir], bydst)
  1728. prune_one_bundle(pol, func, &gc_list);
  1729. table = xfrm_policy_bydst[dir].table;
  1730. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  1731. hlist_for_each_entry(pol, entry, table + i, bydst)
  1732. prune_one_bundle(pol, func, &gc_list);
  1733. }
  1734. }
  1735. read_unlock_bh(&xfrm_policy_lock);
  1736. while (gc_list) {
  1737. struct dst_entry *dst = gc_list;
  1738. gc_list = dst->next;
  1739. dst_free(dst);
  1740. }
  1741. }
  1742. static int unused_bundle(struct dst_entry *dst)
  1743. {
  1744. return !atomic_read(&dst->__refcnt);
  1745. }
  1746. static void __xfrm_garbage_collect(void)
  1747. {
  1748. xfrm_prune_bundles(unused_bundle);
  1749. }
  1750. static int xfrm_flush_bundles(void)
  1751. {
  1752. xfrm_prune_bundles(stale_bundle);
  1753. return 0;
  1754. }
  1755. void xfrm_init_pmtu(struct dst_entry *dst)
  1756. {
  1757. do {
  1758. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1759. u32 pmtu, route_mtu_cached;
  1760. pmtu = dst_mtu(dst->child);
  1761. xdst->child_mtu_cached = pmtu;
  1762. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1763. route_mtu_cached = dst_mtu(xdst->route);
  1764. xdst->route_mtu_cached = route_mtu_cached;
  1765. if (pmtu > route_mtu_cached)
  1766. pmtu = route_mtu_cached;
  1767. dst->metrics[RTAX_MTU-1] = pmtu;
  1768. } while ((dst = dst->next));
  1769. }
  1770. EXPORT_SYMBOL(xfrm_init_pmtu);
  1771. /* Check that the bundle accepts the flow and its components are
  1772. * still valid.
  1773. */
  1774. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1775. struct flowi *fl, int family, int strict)
  1776. {
  1777. struct dst_entry *dst = &first->u.dst;
  1778. struct xfrm_dst *last;
  1779. u32 mtu;
  1780. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1781. (dst->dev && !netif_running(dst->dev)))
  1782. return 0;
  1783. #ifdef CONFIG_XFRM_SUB_POLICY
  1784. if (fl) {
  1785. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1786. return 0;
  1787. if (first->partner &&
  1788. !xfrm_selector_match(first->partner, fl, family))
  1789. return 0;
  1790. }
  1791. #endif
  1792. last = NULL;
  1793. do {
  1794. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1795. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1796. return 0;
  1797. if (fl && pol &&
  1798. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1799. return 0;
  1800. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1801. return 0;
  1802. if (xdst->genid != dst->xfrm->genid)
  1803. return 0;
  1804. if (strict && fl && dst->xfrm->props.mode != XFRM_MODE_TUNNEL &&
  1805. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1806. return 0;
  1807. mtu = dst_mtu(dst->child);
  1808. if (xdst->child_mtu_cached != mtu) {
  1809. last = xdst;
  1810. xdst->child_mtu_cached = mtu;
  1811. }
  1812. if (!dst_check(xdst->route, xdst->route_cookie))
  1813. return 0;
  1814. mtu = dst_mtu(xdst->route);
  1815. if (xdst->route_mtu_cached != mtu) {
  1816. last = xdst;
  1817. xdst->route_mtu_cached = mtu;
  1818. }
  1819. dst = dst->child;
  1820. } while (dst->xfrm);
  1821. if (likely(!last))
  1822. return 1;
  1823. mtu = last->child_mtu_cached;
  1824. for (;;) {
  1825. dst = &last->u.dst;
  1826. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1827. if (mtu > last->route_mtu_cached)
  1828. mtu = last->route_mtu_cached;
  1829. dst->metrics[RTAX_MTU-1] = mtu;
  1830. if (last == first)
  1831. break;
  1832. last = (struct xfrm_dst *)last->u.dst.next;
  1833. last->child_mtu_cached = mtu;
  1834. }
  1835. return 1;
  1836. }
  1837. EXPORT_SYMBOL(xfrm_bundle_ok);
  1838. #ifdef CONFIG_AUDITSYSCALL
  1839. /* Audit addition and deletion of SAs and ipsec policy */
  1840. void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
  1841. struct xfrm_policy *xp, struct xfrm_state *x)
  1842. {
  1843. char *secctx;
  1844. u32 secctx_len;
  1845. struct xfrm_sec_ctx *sctx = NULL;
  1846. struct audit_buffer *audit_buf;
  1847. int family;
  1848. extern int audit_enabled;
  1849. if (audit_enabled == 0)
  1850. return;
  1851. BUG_ON((type == AUDIT_MAC_IPSEC_ADDSA ||
  1852. type == AUDIT_MAC_IPSEC_DELSA) && !x);
  1853. BUG_ON((type == AUDIT_MAC_IPSEC_ADDSPD ||
  1854. type == AUDIT_MAC_IPSEC_DELSPD) && !xp);
  1855. audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
  1856. if (audit_buf == NULL)
  1857. return;
  1858. switch(type) {
  1859. case AUDIT_MAC_IPSEC_ADDSA:
  1860. audit_log_format(audit_buf, "SAD add: auid=%u", auid);
  1861. break;
  1862. case AUDIT_MAC_IPSEC_DELSA:
  1863. audit_log_format(audit_buf, "SAD delete: auid=%u", auid);
  1864. break;
  1865. case AUDIT_MAC_IPSEC_ADDSPD:
  1866. audit_log_format(audit_buf, "SPD add: auid=%u", auid);
  1867. break;
  1868. case AUDIT_MAC_IPSEC_DELSPD:
  1869. audit_log_format(audit_buf, "SPD delete: auid=%u", auid);
  1870. break;
  1871. default:
  1872. return;
  1873. }
  1874. if (sid != 0 &&
  1875. security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) {
  1876. audit_log_format(audit_buf, " subj=%s", secctx);
  1877. security_release_secctx(secctx, secctx_len);
  1878. } else
  1879. audit_log_task_context(audit_buf);
  1880. if (xp) {
  1881. family = xp->selector.family;
  1882. if (xp->security)
  1883. sctx = xp->security;
  1884. } else {
  1885. family = x->props.family;
  1886. if (x->security)
  1887. sctx = x->security;
  1888. }
  1889. if (sctx)
  1890. audit_log_format(audit_buf,
  1891. " sec_alg=%u sec_doi=%u sec_obj=%s",
  1892. sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str);
  1893. switch(family) {
  1894. case AF_INET:
  1895. {
  1896. struct in_addr saddr, daddr;
  1897. if (xp) {
  1898. saddr.s_addr = xp->selector.saddr.a4;
  1899. daddr.s_addr = xp->selector.daddr.a4;
  1900. } else {
  1901. saddr.s_addr = x->props.saddr.a4;
  1902. daddr.s_addr = x->id.daddr.a4;
  1903. }
  1904. audit_log_format(audit_buf,
  1905. " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
  1906. NIPQUAD(saddr), NIPQUAD(daddr));
  1907. }
  1908. break;
  1909. case AF_INET6:
  1910. {
  1911. struct in6_addr saddr6, daddr6;
  1912. if (xp) {
  1913. memcpy(&saddr6, xp->selector.saddr.a6,
  1914. sizeof(struct in6_addr));
  1915. memcpy(&daddr6, xp->selector.daddr.a6,
  1916. sizeof(struct in6_addr));
  1917. } else {
  1918. memcpy(&saddr6, x->props.saddr.a6,
  1919. sizeof(struct in6_addr));
  1920. memcpy(&daddr6, x->id.daddr.a6,
  1921. sizeof(struct in6_addr));
  1922. }
  1923. audit_log_format(audit_buf,
  1924. " src=" NIP6_FMT " dst=" NIP6_FMT,
  1925. NIP6(saddr6), NIP6(daddr6));
  1926. }
  1927. break;
  1928. }
  1929. if (x)
  1930. audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s",
  1931. (unsigned long)ntohl(x->id.spi),
  1932. (unsigned long)ntohl(x->id.spi),
  1933. x->id.proto == IPPROTO_AH ? "AH" :
  1934. (x->id.proto == IPPROTO_ESP ?
  1935. "ESP" : "IPCOMP"));
  1936. audit_log_format(audit_buf, " res=%u", result);
  1937. audit_log_end(audit_buf);
  1938. }
  1939. EXPORT_SYMBOL(xfrm_audit_log);
  1940. #endif /* CONFIG_AUDITSYSCALL */
  1941. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1942. {
  1943. int err = 0;
  1944. if (unlikely(afinfo == NULL))
  1945. return -EINVAL;
  1946. if (unlikely(afinfo->family >= NPROTO))
  1947. return -EAFNOSUPPORT;
  1948. write_lock_bh(&xfrm_policy_afinfo_lock);
  1949. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1950. err = -ENOBUFS;
  1951. else {
  1952. struct dst_ops *dst_ops = afinfo->dst_ops;
  1953. if (likely(dst_ops->kmem_cachep == NULL))
  1954. dst_ops->kmem_cachep = xfrm_dst_cache;
  1955. if (likely(dst_ops->check == NULL))
  1956. dst_ops->check = xfrm_dst_check;
  1957. if (likely(dst_ops->negative_advice == NULL))
  1958. dst_ops->negative_advice = xfrm_negative_advice;
  1959. if (likely(dst_ops->link_failure == NULL))
  1960. dst_ops->link_failure = xfrm_link_failure;
  1961. if (likely(afinfo->garbage_collect == NULL))
  1962. afinfo->garbage_collect = __xfrm_garbage_collect;
  1963. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1964. }
  1965. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1966. return err;
  1967. }
  1968. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1969. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1970. {
  1971. int err = 0;
  1972. if (unlikely(afinfo == NULL))
  1973. return -EINVAL;
  1974. if (unlikely(afinfo->family >= NPROTO))
  1975. return -EAFNOSUPPORT;
  1976. write_lock_bh(&xfrm_policy_afinfo_lock);
  1977. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1978. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1979. err = -EINVAL;
  1980. else {
  1981. struct dst_ops *dst_ops = afinfo->dst_ops;
  1982. xfrm_policy_afinfo[afinfo->family] = NULL;
  1983. dst_ops->kmem_cachep = NULL;
  1984. dst_ops->check = NULL;
  1985. dst_ops->negative_advice = NULL;
  1986. dst_ops->link_failure = NULL;
  1987. afinfo->garbage_collect = NULL;
  1988. }
  1989. }
  1990. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1991. return err;
  1992. }
  1993. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1994. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1995. {
  1996. struct xfrm_policy_afinfo *afinfo;
  1997. if (unlikely(family >= NPROTO))
  1998. return NULL;
  1999. read_lock(&xfrm_policy_afinfo_lock);
  2000. afinfo = xfrm_policy_afinfo[family];
  2001. if (unlikely(!afinfo))
  2002. read_unlock(&xfrm_policy_afinfo_lock);
  2003. return afinfo;
  2004. }
  2005. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2006. {
  2007. read_unlock(&xfrm_policy_afinfo_lock);
  2008. }
  2009. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family)
  2010. {
  2011. struct xfrm_policy_afinfo *afinfo;
  2012. if (unlikely(family >= NPROTO))
  2013. return NULL;
  2014. write_lock_bh(&xfrm_policy_afinfo_lock);
  2015. afinfo = xfrm_policy_afinfo[family];
  2016. if (unlikely(!afinfo))
  2017. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2018. return afinfo;
  2019. }
  2020. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo)
  2021. {
  2022. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2023. }
  2024. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2025. {
  2026. switch (event) {
  2027. case NETDEV_DOWN:
  2028. xfrm_flush_bundles();
  2029. }
  2030. return NOTIFY_DONE;
  2031. }
  2032. static struct notifier_block xfrm_dev_notifier = {
  2033. xfrm_dev_event,
  2034. NULL,
  2035. 0
  2036. };
  2037. static void __init xfrm_policy_init(void)
  2038. {
  2039. unsigned int hmask, sz;
  2040. int dir;
  2041. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2042. sizeof(struct xfrm_dst),
  2043. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2044. NULL);
  2045. hmask = 8 - 1;
  2046. sz = (hmask+1) * sizeof(struct hlist_head);
  2047. xfrm_policy_byidx = xfrm_hash_alloc(sz);
  2048. xfrm_idx_hmask = hmask;
  2049. if (!xfrm_policy_byidx)
  2050. panic("XFRM: failed to allocate byidx hash\n");
  2051. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2052. struct xfrm_policy_hash *htab;
  2053. INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
  2054. htab = &xfrm_policy_bydst[dir];
  2055. htab->table = xfrm_hash_alloc(sz);
  2056. htab->hmask = hmask;
  2057. if (!htab->table)
  2058. panic("XFRM: failed to allocate bydst hash\n");
  2059. }
  2060. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
  2061. register_netdevice_notifier(&xfrm_dev_notifier);
  2062. }
  2063. void __init xfrm_init(void)
  2064. {
  2065. xfrm_state_init();
  2066. xfrm_policy_init();
  2067. xfrm_input_init();
  2068. }
  2069. #ifdef CONFIG_XFRM_MIGRATE
  2070. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2071. struct xfrm_selector *sel_tgt)
  2072. {
  2073. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2074. if (sel_tgt->family == sel_cmp->family &&
  2075. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2076. sel_cmp->family) == 0 &&
  2077. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2078. sel_cmp->family) == 0 &&
  2079. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2080. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2081. return 1;
  2082. }
  2083. } else {
  2084. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2085. return 1;
  2086. }
  2087. }
  2088. return 0;
  2089. }
  2090. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2091. u8 dir, u8 type)
  2092. {
  2093. struct xfrm_policy *pol, *ret = NULL;
  2094. struct hlist_node *entry;
  2095. struct hlist_head *chain;
  2096. u32 priority = ~0U;
  2097. read_lock_bh(&xfrm_policy_lock);
  2098. chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
  2099. hlist_for_each_entry(pol, entry, chain, bydst) {
  2100. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2101. pol->type == type) {
  2102. ret = pol;
  2103. priority = ret->priority;
  2104. break;
  2105. }
  2106. }
  2107. chain = &xfrm_policy_inexact[dir];
  2108. hlist_for_each_entry(pol, entry, chain, bydst) {
  2109. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2110. pol->type == type &&
  2111. pol->priority < priority) {
  2112. ret = pol;
  2113. break;
  2114. }
  2115. }
  2116. if (ret)
  2117. xfrm_pol_hold(ret);
  2118. read_unlock_bh(&xfrm_policy_lock);
  2119. return ret;
  2120. }
  2121. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2122. {
  2123. int match = 0;
  2124. if (t->mode == m->mode && t->id.proto == m->proto &&
  2125. (m->reqid == 0 || t->reqid == m->reqid)) {
  2126. switch (t->mode) {
  2127. case XFRM_MODE_TUNNEL:
  2128. case XFRM_MODE_BEET:
  2129. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2130. m->old_family) == 0 &&
  2131. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2132. m->old_family) == 0) {
  2133. match = 1;
  2134. }
  2135. break;
  2136. case XFRM_MODE_TRANSPORT:
  2137. /* in case of transport mode, template does not store
  2138. any IP addresses, hence we just compare mode and
  2139. protocol */
  2140. match = 1;
  2141. break;
  2142. default:
  2143. break;
  2144. }
  2145. }
  2146. return match;
  2147. }
  2148. /* update endpoint address(es) of template(s) */
  2149. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2150. struct xfrm_migrate *m, int num_migrate)
  2151. {
  2152. struct xfrm_migrate *mp;
  2153. struct dst_entry *dst;
  2154. int i, j, n = 0;
  2155. write_lock_bh(&pol->lock);
  2156. if (unlikely(pol->dead)) {
  2157. /* target policy has been deleted */
  2158. write_unlock_bh(&pol->lock);
  2159. return -ENOENT;
  2160. }
  2161. for (i = 0; i < pol->xfrm_nr; i++) {
  2162. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2163. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2164. continue;
  2165. n++;
  2166. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL)
  2167. continue;
  2168. /* update endpoints */
  2169. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2170. sizeof(pol->xfrm_vec[i].id.daddr));
  2171. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2172. sizeof(pol->xfrm_vec[i].saddr));
  2173. pol->xfrm_vec[i].encap_family = mp->new_family;
  2174. /* flush bundles */
  2175. while ((dst = pol->bundles) != NULL) {
  2176. pol->bundles = dst->next;
  2177. dst_free(dst);
  2178. }
  2179. }
  2180. }
  2181. write_unlock_bh(&pol->lock);
  2182. if (!n)
  2183. return -ENODATA;
  2184. return 0;
  2185. }
  2186. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2187. {
  2188. int i, j;
  2189. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2190. return -EINVAL;
  2191. for (i = 0; i < num_migrate; i++) {
  2192. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2193. m[i].old_family) == 0) &&
  2194. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2195. m[i].old_family) == 0))
  2196. return -EINVAL;
  2197. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2198. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2199. return -EINVAL;
  2200. /* check if there is any duplicated entry */
  2201. for (j = i + 1; j < num_migrate; j++) {
  2202. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2203. sizeof(m[i].old_daddr)) &&
  2204. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2205. sizeof(m[i].old_saddr)) &&
  2206. m[i].proto == m[j].proto &&
  2207. m[i].mode == m[j].mode &&
  2208. m[i].reqid == m[j].reqid &&
  2209. m[i].old_family == m[j].old_family)
  2210. return -EINVAL;
  2211. }
  2212. }
  2213. return 0;
  2214. }
  2215. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2216. struct xfrm_migrate *m, int num_migrate)
  2217. {
  2218. int i, err, nx_cur = 0, nx_new = 0;
  2219. struct xfrm_policy *pol = NULL;
  2220. struct xfrm_state *x, *xc;
  2221. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2222. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2223. struct xfrm_migrate *mp;
  2224. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2225. goto out;
  2226. /* Stage 1 - find policy */
  2227. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2228. err = -ENOENT;
  2229. goto out;
  2230. }
  2231. /* Stage 2 - find and update state(s) */
  2232. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2233. if ((x = xfrm_migrate_state_find(mp))) {
  2234. x_cur[nx_cur] = x;
  2235. nx_cur++;
  2236. if ((xc = xfrm_state_migrate(x, mp))) {
  2237. x_new[nx_new] = xc;
  2238. nx_new++;
  2239. } else {
  2240. err = -ENODATA;
  2241. goto restore_state;
  2242. }
  2243. }
  2244. }
  2245. /* Stage 3 - update policy */
  2246. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2247. goto restore_state;
  2248. /* Stage 4 - delete old state(s) */
  2249. if (nx_cur) {
  2250. xfrm_states_put(x_cur, nx_cur);
  2251. xfrm_states_delete(x_cur, nx_cur);
  2252. }
  2253. /* Stage 5 - announce */
  2254. km_migrate(sel, dir, type, m, num_migrate);
  2255. xfrm_pol_put(pol);
  2256. return 0;
  2257. out:
  2258. return err;
  2259. restore_state:
  2260. if (pol)
  2261. xfrm_pol_put(pol);
  2262. if (nx_cur)
  2263. xfrm_states_put(x_cur, nx_cur);
  2264. if (nx_new)
  2265. xfrm_states_delete(x_new, nx_new);
  2266. return err;
  2267. }
  2268. EXPORT_SYMBOL(xfrm_migrate);
  2269. #endif