xfrm_policy.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/xfrm.h>
  29. #include <net/ip.h>
  30. #ifdef CONFIG_XFRM_STATISTICS
  31. #include <net/snmp.h>
  32. #endif
  33. #include "xfrm_hash.h"
  34. int sysctl_xfrm_larval_drop __read_mostly = 1;
  35. #ifdef CONFIG_XFRM_STATISTICS
  36. DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
  37. EXPORT_SYMBOL(xfrm_statistics);
  38. #endif
  39. DEFINE_MUTEX(xfrm_cfg_mutex);
  40. EXPORT_SYMBOL(xfrm_cfg_mutex);
  41. static DEFINE_RWLOCK(xfrm_policy_lock);
  42. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  43. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  44. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  45. static HLIST_HEAD(xfrm_policy_gc_list);
  46. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  47. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  48. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  49. static void xfrm_init_pmtu(struct dst_entry *dst);
  50. static inline int
  51. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  52. {
  53. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  54. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  55. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  56. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  57. (fl->proto == sel->proto || !sel->proto) &&
  58. (fl->oif == sel->ifindex || !sel->ifindex);
  59. }
  60. static inline int
  61. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  62. {
  63. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  64. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  65. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  66. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  67. (fl->proto == sel->proto || !sel->proto) &&
  68. (fl->oif == sel->ifindex || !sel->ifindex);
  69. }
  70. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  71. unsigned short family)
  72. {
  73. switch (family) {
  74. case AF_INET:
  75. return __xfrm4_selector_match(sel, fl);
  76. case AF_INET6:
  77. return __xfrm6_selector_match(sel, fl);
  78. }
  79. return 0;
  80. }
  81. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  82. xfrm_address_t *saddr,
  83. xfrm_address_t *daddr,
  84. int family)
  85. {
  86. struct xfrm_policy_afinfo *afinfo;
  87. struct dst_entry *dst;
  88. afinfo = xfrm_policy_get_afinfo(family);
  89. if (unlikely(afinfo == NULL))
  90. return ERR_PTR(-EAFNOSUPPORT);
  91. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  92. xfrm_policy_put_afinfo(afinfo);
  93. return dst;
  94. }
  95. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  96. xfrm_address_t *prev_saddr,
  97. xfrm_address_t *prev_daddr,
  98. int family)
  99. {
  100. struct net *net = xs_net(x);
  101. xfrm_address_t *saddr = &x->props.saddr;
  102. xfrm_address_t *daddr = &x->id.daddr;
  103. struct dst_entry *dst;
  104. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  105. saddr = x->coaddr;
  106. daddr = prev_daddr;
  107. }
  108. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  109. saddr = prev_saddr;
  110. daddr = x->coaddr;
  111. }
  112. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  113. if (!IS_ERR(dst)) {
  114. if (prev_saddr != saddr)
  115. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  116. if (prev_daddr != daddr)
  117. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  118. }
  119. return dst;
  120. }
  121. static inline unsigned long make_jiffies(long secs)
  122. {
  123. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  124. return MAX_SCHEDULE_TIMEOUT-1;
  125. else
  126. return secs*HZ;
  127. }
  128. static void xfrm_policy_timer(unsigned long data)
  129. {
  130. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  131. unsigned long now = get_seconds();
  132. long next = LONG_MAX;
  133. int warn = 0;
  134. int dir;
  135. read_lock(&xp->lock);
  136. if (xp->walk.dead)
  137. goto out;
  138. dir = xfrm_policy_id2dir(xp->index);
  139. if (xp->lft.hard_add_expires_seconds) {
  140. long tmo = xp->lft.hard_add_expires_seconds +
  141. xp->curlft.add_time - now;
  142. if (tmo <= 0)
  143. goto expired;
  144. if (tmo < next)
  145. next = tmo;
  146. }
  147. if (xp->lft.hard_use_expires_seconds) {
  148. long tmo = xp->lft.hard_use_expires_seconds +
  149. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  150. if (tmo <= 0)
  151. goto expired;
  152. if (tmo < next)
  153. next = tmo;
  154. }
  155. if (xp->lft.soft_add_expires_seconds) {
  156. long tmo = xp->lft.soft_add_expires_seconds +
  157. xp->curlft.add_time - now;
  158. if (tmo <= 0) {
  159. warn = 1;
  160. tmo = XFRM_KM_TIMEOUT;
  161. }
  162. if (tmo < next)
  163. next = tmo;
  164. }
  165. if (xp->lft.soft_use_expires_seconds) {
  166. long tmo = xp->lft.soft_use_expires_seconds +
  167. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  168. if (tmo <= 0) {
  169. warn = 1;
  170. tmo = XFRM_KM_TIMEOUT;
  171. }
  172. if (tmo < next)
  173. next = tmo;
  174. }
  175. if (warn)
  176. km_policy_expired(xp, dir, 0, 0);
  177. if (next != LONG_MAX &&
  178. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  179. xfrm_pol_hold(xp);
  180. out:
  181. read_unlock(&xp->lock);
  182. xfrm_pol_put(xp);
  183. return;
  184. expired:
  185. read_unlock(&xp->lock);
  186. if (!xfrm_policy_delete(xp, dir))
  187. km_policy_expired(xp, dir, 1, 0);
  188. xfrm_pol_put(xp);
  189. }
  190. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  191. * SPD calls.
  192. */
  193. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  194. {
  195. struct xfrm_policy *policy;
  196. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  197. if (policy) {
  198. write_pnet(&policy->xp_net, net);
  199. INIT_LIST_HEAD(&policy->walk.all);
  200. INIT_HLIST_NODE(&policy->bydst);
  201. INIT_HLIST_NODE(&policy->byidx);
  202. rwlock_init(&policy->lock);
  203. atomic_set(&policy->refcnt, 1);
  204. setup_timer(&policy->timer, xfrm_policy_timer,
  205. (unsigned long)policy);
  206. }
  207. return policy;
  208. }
  209. EXPORT_SYMBOL(xfrm_policy_alloc);
  210. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  211. void xfrm_policy_destroy(struct xfrm_policy *policy)
  212. {
  213. BUG_ON(!policy->walk.dead);
  214. BUG_ON(policy->bundles);
  215. if (del_timer(&policy->timer))
  216. BUG();
  217. security_xfrm_policy_free(policy->security);
  218. kfree(policy);
  219. }
  220. EXPORT_SYMBOL(xfrm_policy_destroy);
  221. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  222. {
  223. struct dst_entry *dst;
  224. while ((dst = policy->bundles) != NULL) {
  225. policy->bundles = dst->next;
  226. dst_free(dst);
  227. }
  228. if (del_timer(&policy->timer))
  229. atomic_dec(&policy->refcnt);
  230. if (atomic_read(&policy->refcnt) > 1)
  231. flow_cache_flush();
  232. xfrm_pol_put(policy);
  233. }
  234. static void xfrm_policy_gc_task(struct work_struct *work)
  235. {
  236. struct xfrm_policy *policy;
  237. struct hlist_node *entry, *tmp;
  238. struct hlist_head gc_list;
  239. spin_lock_bh(&xfrm_policy_gc_lock);
  240. gc_list.first = xfrm_policy_gc_list.first;
  241. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  242. spin_unlock_bh(&xfrm_policy_gc_lock);
  243. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  244. xfrm_policy_gc_kill(policy);
  245. }
  246. static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
  247. /* Rule must be locked. Release descentant resources, announce
  248. * entry dead. The rule must be unlinked from lists to the moment.
  249. */
  250. static void xfrm_policy_kill(struct xfrm_policy *policy)
  251. {
  252. int dead;
  253. write_lock_bh(&policy->lock);
  254. dead = policy->walk.dead;
  255. policy->walk.dead = 1;
  256. write_unlock_bh(&policy->lock);
  257. if (unlikely(dead)) {
  258. WARN_ON(1);
  259. return;
  260. }
  261. spin_lock_bh(&xfrm_policy_gc_lock);
  262. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  263. spin_unlock_bh(&xfrm_policy_gc_lock);
  264. schedule_work(&xfrm_policy_gc_work);
  265. }
  266. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  267. static inline unsigned int idx_hash(struct net *net, u32 index)
  268. {
  269. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  270. }
  271. static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
  272. {
  273. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  274. unsigned int hash = __sel_hash(sel, family, hmask);
  275. return (hash == hmask + 1 ?
  276. &net->xfrm.policy_inexact[dir] :
  277. net->xfrm.policy_bydst[dir].table + hash);
  278. }
  279. static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  280. {
  281. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  282. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  283. return net->xfrm.policy_bydst[dir].table + hash;
  284. }
  285. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  286. struct hlist_head *ndsttable,
  287. unsigned int nhashmask)
  288. {
  289. struct hlist_node *entry, *tmp, *entry0 = NULL;
  290. struct xfrm_policy *pol;
  291. unsigned int h0 = 0;
  292. redo:
  293. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  294. unsigned int h;
  295. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  296. pol->family, nhashmask);
  297. if (!entry0) {
  298. hlist_del(entry);
  299. hlist_add_head(&pol->bydst, ndsttable+h);
  300. h0 = h;
  301. } else {
  302. if (h != h0)
  303. continue;
  304. hlist_del(entry);
  305. hlist_add_after(entry0, &pol->bydst);
  306. }
  307. entry0 = entry;
  308. }
  309. if (!hlist_empty(list)) {
  310. entry0 = NULL;
  311. goto redo;
  312. }
  313. }
  314. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  315. struct hlist_head *nidxtable,
  316. unsigned int nhashmask)
  317. {
  318. struct hlist_node *entry, *tmp;
  319. struct xfrm_policy *pol;
  320. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  321. unsigned int h;
  322. h = __idx_hash(pol->index, nhashmask);
  323. hlist_add_head(&pol->byidx, nidxtable+h);
  324. }
  325. }
  326. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  327. {
  328. return ((old_hmask + 1) << 1) - 1;
  329. }
  330. static void xfrm_bydst_resize(struct net *net, int dir)
  331. {
  332. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  333. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  334. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  335. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  336. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  337. int i;
  338. if (!ndst)
  339. return;
  340. write_lock_bh(&xfrm_policy_lock);
  341. for (i = hmask; i >= 0; i--)
  342. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  343. net->xfrm.policy_bydst[dir].table = ndst;
  344. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  345. write_unlock_bh(&xfrm_policy_lock);
  346. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  347. }
  348. static void xfrm_byidx_resize(struct net *net, int total)
  349. {
  350. unsigned int hmask = net->xfrm.policy_idx_hmask;
  351. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  352. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  353. struct hlist_head *oidx = net->xfrm.policy_byidx;
  354. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  355. int i;
  356. if (!nidx)
  357. return;
  358. write_lock_bh(&xfrm_policy_lock);
  359. for (i = hmask; i >= 0; i--)
  360. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  361. net->xfrm.policy_byidx = nidx;
  362. net->xfrm.policy_idx_hmask = nhashmask;
  363. write_unlock_bh(&xfrm_policy_lock);
  364. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  365. }
  366. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  367. {
  368. unsigned int cnt = net->xfrm.policy_count[dir];
  369. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  370. if (total)
  371. *total += cnt;
  372. if ((hmask + 1) < xfrm_policy_hashmax &&
  373. cnt > hmask)
  374. return 1;
  375. return 0;
  376. }
  377. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  378. {
  379. unsigned int hmask = net->xfrm.policy_idx_hmask;
  380. if ((hmask + 1) < xfrm_policy_hashmax &&
  381. total > hmask)
  382. return 1;
  383. return 0;
  384. }
  385. void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
  386. {
  387. read_lock_bh(&xfrm_policy_lock);
  388. si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN];
  389. si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT];
  390. si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD];
  391. si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  392. si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  393. si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  394. si->spdhcnt = init_net.xfrm.policy_idx_hmask;
  395. si->spdhmcnt = xfrm_policy_hashmax;
  396. read_unlock_bh(&xfrm_policy_lock);
  397. }
  398. EXPORT_SYMBOL(xfrm_spd_getinfo);
  399. static DEFINE_MUTEX(hash_resize_mutex);
  400. static void xfrm_hash_resize(struct work_struct *work)
  401. {
  402. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  403. int dir, total;
  404. mutex_lock(&hash_resize_mutex);
  405. total = 0;
  406. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  407. if (xfrm_bydst_should_resize(net, dir, &total))
  408. xfrm_bydst_resize(net, dir);
  409. }
  410. if (xfrm_byidx_should_resize(net, total))
  411. xfrm_byidx_resize(net, total);
  412. mutex_unlock(&hash_resize_mutex);
  413. }
  414. /* Generate new index... KAME seems to generate them ordered by cost
  415. * of an absolute inpredictability of ordering of rules. This will not pass. */
  416. static u32 xfrm_gen_index(struct net *net, int dir)
  417. {
  418. static u32 idx_generator;
  419. for (;;) {
  420. struct hlist_node *entry;
  421. struct hlist_head *list;
  422. struct xfrm_policy *p;
  423. u32 idx;
  424. int found;
  425. idx = (idx_generator | dir);
  426. idx_generator += 8;
  427. if (idx == 0)
  428. idx = 8;
  429. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  430. found = 0;
  431. hlist_for_each_entry(p, entry, list, byidx) {
  432. if (p->index == idx) {
  433. found = 1;
  434. break;
  435. }
  436. }
  437. if (!found)
  438. return idx;
  439. }
  440. }
  441. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  442. {
  443. u32 *p1 = (u32 *) s1;
  444. u32 *p2 = (u32 *) s2;
  445. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  446. int i;
  447. for (i = 0; i < len; i++) {
  448. if (p1[i] != p2[i])
  449. return 1;
  450. }
  451. return 0;
  452. }
  453. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  454. {
  455. struct net *net = xp_net(policy);
  456. struct xfrm_policy *pol;
  457. struct xfrm_policy *delpol;
  458. struct hlist_head *chain;
  459. struct hlist_node *entry, *newpos;
  460. struct dst_entry *gc_list;
  461. write_lock_bh(&xfrm_policy_lock);
  462. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  463. delpol = NULL;
  464. newpos = NULL;
  465. hlist_for_each_entry(pol, entry, chain, bydst) {
  466. if (pol->type == policy->type &&
  467. !selector_cmp(&pol->selector, &policy->selector) &&
  468. xfrm_sec_ctx_match(pol->security, policy->security) &&
  469. !WARN_ON(delpol)) {
  470. if (excl) {
  471. write_unlock_bh(&xfrm_policy_lock);
  472. return -EEXIST;
  473. }
  474. delpol = pol;
  475. if (policy->priority > pol->priority)
  476. continue;
  477. } else if (policy->priority >= pol->priority) {
  478. newpos = &pol->bydst;
  479. continue;
  480. }
  481. if (delpol)
  482. break;
  483. }
  484. if (newpos)
  485. hlist_add_after(newpos, &policy->bydst);
  486. else
  487. hlist_add_head(&policy->bydst, chain);
  488. xfrm_pol_hold(policy);
  489. net->xfrm.policy_count[dir]++;
  490. atomic_inc(&flow_cache_genid);
  491. if (delpol) {
  492. hlist_del(&delpol->bydst);
  493. hlist_del(&delpol->byidx);
  494. list_del(&delpol->walk.all);
  495. net->xfrm.policy_count[dir]--;
  496. }
  497. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
  498. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  499. policy->curlft.add_time = get_seconds();
  500. policy->curlft.use_time = 0;
  501. if (!mod_timer(&policy->timer, jiffies + HZ))
  502. xfrm_pol_hold(policy);
  503. list_add(&policy->walk.all, &net->xfrm.policy_all);
  504. write_unlock_bh(&xfrm_policy_lock);
  505. if (delpol)
  506. xfrm_policy_kill(delpol);
  507. else if (xfrm_bydst_should_resize(net, dir, NULL))
  508. schedule_work(&net->xfrm.policy_hash_work);
  509. read_lock_bh(&xfrm_policy_lock);
  510. gc_list = NULL;
  511. entry = &policy->bydst;
  512. hlist_for_each_entry_continue(policy, entry, bydst) {
  513. struct dst_entry *dst;
  514. write_lock(&policy->lock);
  515. dst = policy->bundles;
  516. if (dst) {
  517. struct dst_entry *tail = dst;
  518. while (tail->next)
  519. tail = tail->next;
  520. tail->next = gc_list;
  521. gc_list = dst;
  522. policy->bundles = NULL;
  523. }
  524. write_unlock(&policy->lock);
  525. }
  526. read_unlock_bh(&xfrm_policy_lock);
  527. while (gc_list) {
  528. struct dst_entry *dst = gc_list;
  529. gc_list = dst->next;
  530. dst_free(dst);
  531. }
  532. return 0;
  533. }
  534. EXPORT_SYMBOL(xfrm_policy_insert);
  535. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
  536. struct xfrm_selector *sel,
  537. struct xfrm_sec_ctx *ctx, int delete,
  538. int *err)
  539. {
  540. struct xfrm_policy *pol, *ret;
  541. struct hlist_head *chain;
  542. struct hlist_node *entry;
  543. *err = 0;
  544. write_lock_bh(&xfrm_policy_lock);
  545. chain = policy_hash_bysel(net, sel, sel->family, dir);
  546. ret = NULL;
  547. hlist_for_each_entry(pol, entry, chain, bydst) {
  548. if (pol->type == type &&
  549. !selector_cmp(sel, &pol->selector) &&
  550. xfrm_sec_ctx_match(ctx, pol->security)) {
  551. xfrm_pol_hold(pol);
  552. if (delete) {
  553. *err = security_xfrm_policy_delete(
  554. pol->security);
  555. if (*err) {
  556. write_unlock_bh(&xfrm_policy_lock);
  557. return pol;
  558. }
  559. hlist_del(&pol->bydst);
  560. hlist_del(&pol->byidx);
  561. list_del(&pol->walk.all);
  562. net->xfrm.policy_count[dir]--;
  563. }
  564. ret = pol;
  565. break;
  566. }
  567. }
  568. write_unlock_bh(&xfrm_policy_lock);
  569. if (ret && delete) {
  570. atomic_inc(&flow_cache_genid);
  571. xfrm_policy_kill(ret);
  572. }
  573. return ret;
  574. }
  575. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  576. struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
  577. int delete, int *err)
  578. {
  579. struct xfrm_policy *pol, *ret;
  580. struct hlist_head *chain;
  581. struct hlist_node *entry;
  582. *err = -ENOENT;
  583. if (xfrm_policy_id2dir(id) != dir)
  584. return NULL;
  585. *err = 0;
  586. write_lock_bh(&xfrm_policy_lock);
  587. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  588. ret = NULL;
  589. hlist_for_each_entry(pol, entry, chain, byidx) {
  590. if (pol->type == type && pol->index == id) {
  591. xfrm_pol_hold(pol);
  592. if (delete) {
  593. *err = security_xfrm_policy_delete(
  594. pol->security);
  595. if (*err) {
  596. write_unlock_bh(&xfrm_policy_lock);
  597. return pol;
  598. }
  599. hlist_del(&pol->bydst);
  600. hlist_del(&pol->byidx);
  601. list_del(&pol->walk.all);
  602. net->xfrm.policy_count[dir]--;
  603. }
  604. ret = pol;
  605. break;
  606. }
  607. }
  608. write_unlock_bh(&xfrm_policy_lock);
  609. if (ret && delete) {
  610. atomic_inc(&flow_cache_genid);
  611. xfrm_policy_kill(ret);
  612. }
  613. return ret;
  614. }
  615. EXPORT_SYMBOL(xfrm_policy_byid);
  616. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  617. static inline int
  618. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  619. {
  620. int dir, err = 0;
  621. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  622. struct xfrm_policy *pol;
  623. struct hlist_node *entry;
  624. int i;
  625. hlist_for_each_entry(pol, entry,
  626. &net->xfrm.policy_inexact[dir], bydst) {
  627. if (pol->type != type)
  628. continue;
  629. err = security_xfrm_policy_delete(pol->security);
  630. if (err) {
  631. xfrm_audit_policy_delete(pol, 0,
  632. audit_info->loginuid,
  633. audit_info->sessionid,
  634. audit_info->secid);
  635. return err;
  636. }
  637. }
  638. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  639. hlist_for_each_entry(pol, entry,
  640. net->xfrm.policy_bydst[dir].table + i,
  641. bydst) {
  642. if (pol->type != type)
  643. continue;
  644. err = security_xfrm_policy_delete(
  645. pol->security);
  646. if (err) {
  647. xfrm_audit_policy_delete(pol, 0,
  648. audit_info->loginuid,
  649. audit_info->sessionid,
  650. audit_info->secid);
  651. return err;
  652. }
  653. }
  654. }
  655. }
  656. return err;
  657. }
  658. #else
  659. static inline int
  660. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  661. {
  662. return 0;
  663. }
  664. #endif
  665. int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
  666. {
  667. int dir, err = 0;
  668. write_lock_bh(&xfrm_policy_lock);
  669. err = xfrm_policy_flush_secctx_check(net, type, audit_info);
  670. if (err)
  671. goto out;
  672. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  673. struct xfrm_policy *pol;
  674. struct hlist_node *entry;
  675. int i, killed;
  676. killed = 0;
  677. again1:
  678. hlist_for_each_entry(pol, entry,
  679. &net->xfrm.policy_inexact[dir], bydst) {
  680. if (pol->type != type)
  681. continue;
  682. hlist_del(&pol->bydst);
  683. hlist_del(&pol->byidx);
  684. write_unlock_bh(&xfrm_policy_lock);
  685. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  686. audit_info->sessionid,
  687. audit_info->secid);
  688. xfrm_policy_kill(pol);
  689. killed++;
  690. write_lock_bh(&xfrm_policy_lock);
  691. goto again1;
  692. }
  693. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  694. again2:
  695. hlist_for_each_entry(pol, entry,
  696. net->xfrm.policy_bydst[dir].table + i,
  697. bydst) {
  698. if (pol->type != type)
  699. continue;
  700. hlist_del(&pol->bydst);
  701. hlist_del(&pol->byidx);
  702. list_del(&pol->walk.all);
  703. write_unlock_bh(&xfrm_policy_lock);
  704. xfrm_audit_policy_delete(pol, 1,
  705. audit_info->loginuid,
  706. audit_info->sessionid,
  707. audit_info->secid);
  708. xfrm_policy_kill(pol);
  709. killed++;
  710. write_lock_bh(&xfrm_policy_lock);
  711. goto again2;
  712. }
  713. }
  714. net->xfrm.policy_count[dir] -= killed;
  715. }
  716. atomic_inc(&flow_cache_genid);
  717. out:
  718. write_unlock_bh(&xfrm_policy_lock);
  719. return err;
  720. }
  721. EXPORT_SYMBOL(xfrm_policy_flush);
  722. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  723. int (*func)(struct xfrm_policy *, int, int, void*),
  724. void *data)
  725. {
  726. struct xfrm_policy *pol;
  727. struct xfrm_policy_walk_entry *x;
  728. int error = 0;
  729. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  730. walk->type != XFRM_POLICY_TYPE_ANY)
  731. return -EINVAL;
  732. if (list_empty(&walk->walk.all) && walk->seq != 0)
  733. return 0;
  734. write_lock_bh(&xfrm_policy_lock);
  735. if (list_empty(&walk->walk.all))
  736. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  737. else
  738. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  739. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  740. if (x->dead)
  741. continue;
  742. pol = container_of(x, struct xfrm_policy, walk);
  743. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  744. walk->type != pol->type)
  745. continue;
  746. error = func(pol, xfrm_policy_id2dir(pol->index),
  747. walk->seq, data);
  748. if (error) {
  749. list_move_tail(&walk->walk.all, &x->all);
  750. goto out;
  751. }
  752. walk->seq++;
  753. }
  754. if (walk->seq == 0) {
  755. error = -ENOENT;
  756. goto out;
  757. }
  758. list_del_init(&walk->walk.all);
  759. out:
  760. write_unlock_bh(&xfrm_policy_lock);
  761. return error;
  762. }
  763. EXPORT_SYMBOL(xfrm_policy_walk);
  764. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  765. {
  766. INIT_LIST_HEAD(&walk->walk.all);
  767. walk->walk.dead = 1;
  768. walk->type = type;
  769. walk->seq = 0;
  770. }
  771. EXPORT_SYMBOL(xfrm_policy_walk_init);
  772. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  773. {
  774. if (list_empty(&walk->walk.all))
  775. return;
  776. write_lock_bh(&xfrm_policy_lock);
  777. list_del(&walk->walk.all);
  778. write_unlock_bh(&xfrm_policy_lock);
  779. }
  780. EXPORT_SYMBOL(xfrm_policy_walk_done);
  781. /*
  782. * Find policy to apply to this flow.
  783. *
  784. * Returns 0 if policy found, else an -errno.
  785. */
  786. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  787. u8 type, u16 family, int dir)
  788. {
  789. struct xfrm_selector *sel = &pol->selector;
  790. int match, ret = -ESRCH;
  791. if (pol->family != family ||
  792. pol->type != type)
  793. return ret;
  794. match = xfrm_selector_match(sel, fl, family);
  795. if (match)
  796. ret = security_xfrm_policy_lookup(pol->security, fl->secid,
  797. dir);
  798. return ret;
  799. }
  800. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  801. struct flowi *fl,
  802. u16 family, u8 dir)
  803. {
  804. int err;
  805. struct xfrm_policy *pol, *ret;
  806. xfrm_address_t *daddr, *saddr;
  807. struct hlist_node *entry;
  808. struct hlist_head *chain;
  809. u32 priority = ~0U;
  810. daddr = xfrm_flowi_daddr(fl, family);
  811. saddr = xfrm_flowi_saddr(fl, family);
  812. if (unlikely(!daddr || !saddr))
  813. return NULL;
  814. read_lock_bh(&xfrm_policy_lock);
  815. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  816. ret = NULL;
  817. hlist_for_each_entry(pol, entry, chain, bydst) {
  818. err = xfrm_policy_match(pol, fl, type, family, dir);
  819. if (err) {
  820. if (err == -ESRCH)
  821. continue;
  822. else {
  823. ret = ERR_PTR(err);
  824. goto fail;
  825. }
  826. } else {
  827. ret = pol;
  828. priority = ret->priority;
  829. break;
  830. }
  831. }
  832. chain = &net->xfrm.policy_inexact[dir];
  833. hlist_for_each_entry(pol, entry, chain, bydst) {
  834. err = xfrm_policy_match(pol, fl, type, family, dir);
  835. if (err) {
  836. if (err == -ESRCH)
  837. continue;
  838. else {
  839. ret = ERR_PTR(err);
  840. goto fail;
  841. }
  842. } else if (pol->priority < priority) {
  843. ret = pol;
  844. break;
  845. }
  846. }
  847. if (ret)
  848. xfrm_pol_hold(ret);
  849. fail:
  850. read_unlock_bh(&xfrm_policy_lock);
  851. return ret;
  852. }
  853. static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
  854. u8 dir, void **objp, atomic_t **obj_refp)
  855. {
  856. struct xfrm_policy *pol;
  857. int err = 0;
  858. #ifdef CONFIG_XFRM_SUB_POLICY
  859. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  860. if (IS_ERR(pol)) {
  861. err = PTR_ERR(pol);
  862. pol = NULL;
  863. }
  864. if (pol || err)
  865. goto end;
  866. #endif
  867. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  868. if (IS_ERR(pol)) {
  869. err = PTR_ERR(pol);
  870. pol = NULL;
  871. }
  872. #ifdef CONFIG_XFRM_SUB_POLICY
  873. end:
  874. #endif
  875. if ((*objp = (void *) pol) != NULL)
  876. *obj_refp = &pol->refcnt;
  877. return err;
  878. }
  879. static inline int policy_to_flow_dir(int dir)
  880. {
  881. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  882. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  883. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  884. return dir;
  885. switch (dir) {
  886. default:
  887. case XFRM_POLICY_IN:
  888. return FLOW_DIR_IN;
  889. case XFRM_POLICY_OUT:
  890. return FLOW_DIR_OUT;
  891. case XFRM_POLICY_FWD:
  892. return FLOW_DIR_FWD;
  893. }
  894. }
  895. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  896. {
  897. struct xfrm_policy *pol;
  898. read_lock_bh(&xfrm_policy_lock);
  899. if ((pol = sk->sk_policy[dir]) != NULL) {
  900. int match = xfrm_selector_match(&pol->selector, fl,
  901. sk->sk_family);
  902. int err = 0;
  903. if (match) {
  904. err = security_xfrm_policy_lookup(pol->security,
  905. fl->secid,
  906. policy_to_flow_dir(dir));
  907. if (!err)
  908. xfrm_pol_hold(pol);
  909. else if (err == -ESRCH)
  910. pol = NULL;
  911. else
  912. pol = ERR_PTR(err);
  913. } else
  914. pol = NULL;
  915. }
  916. read_unlock_bh(&xfrm_policy_lock);
  917. return pol;
  918. }
  919. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  920. {
  921. struct net *net = xp_net(pol);
  922. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  923. pol->family, dir);
  924. list_add(&pol->walk.all, &net->xfrm.policy_all);
  925. hlist_add_head(&pol->bydst, chain);
  926. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  927. net->xfrm.policy_count[dir]++;
  928. xfrm_pol_hold(pol);
  929. if (xfrm_bydst_should_resize(net, dir, NULL))
  930. schedule_work(&net->xfrm.policy_hash_work);
  931. }
  932. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  933. int dir)
  934. {
  935. struct net *net = xp_net(pol);
  936. if (hlist_unhashed(&pol->bydst))
  937. return NULL;
  938. hlist_del(&pol->bydst);
  939. hlist_del(&pol->byidx);
  940. list_del(&pol->walk.all);
  941. net->xfrm.policy_count[dir]--;
  942. return pol;
  943. }
  944. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  945. {
  946. write_lock_bh(&xfrm_policy_lock);
  947. pol = __xfrm_policy_unlink(pol, dir);
  948. write_unlock_bh(&xfrm_policy_lock);
  949. if (pol) {
  950. if (dir < XFRM_POLICY_MAX)
  951. atomic_inc(&flow_cache_genid);
  952. xfrm_policy_kill(pol);
  953. return 0;
  954. }
  955. return -ENOENT;
  956. }
  957. EXPORT_SYMBOL(xfrm_policy_delete);
  958. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  959. {
  960. struct net *net = xp_net(pol);
  961. struct xfrm_policy *old_pol;
  962. #ifdef CONFIG_XFRM_SUB_POLICY
  963. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  964. return -EINVAL;
  965. #endif
  966. write_lock_bh(&xfrm_policy_lock);
  967. old_pol = sk->sk_policy[dir];
  968. sk->sk_policy[dir] = pol;
  969. if (pol) {
  970. pol->curlft.add_time = get_seconds();
  971. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
  972. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  973. }
  974. if (old_pol)
  975. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  976. write_unlock_bh(&xfrm_policy_lock);
  977. if (old_pol) {
  978. xfrm_policy_kill(old_pol);
  979. }
  980. return 0;
  981. }
  982. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  983. {
  984. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  985. if (newp) {
  986. newp->selector = old->selector;
  987. if (security_xfrm_policy_clone(old->security,
  988. &newp->security)) {
  989. kfree(newp);
  990. return NULL; /* ENOMEM */
  991. }
  992. newp->lft = old->lft;
  993. newp->curlft = old->curlft;
  994. newp->action = old->action;
  995. newp->flags = old->flags;
  996. newp->xfrm_nr = old->xfrm_nr;
  997. newp->index = old->index;
  998. newp->type = old->type;
  999. memcpy(newp->xfrm_vec, old->xfrm_vec,
  1000. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  1001. write_lock_bh(&xfrm_policy_lock);
  1002. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  1003. write_unlock_bh(&xfrm_policy_lock);
  1004. xfrm_pol_put(newp);
  1005. }
  1006. return newp;
  1007. }
  1008. int __xfrm_sk_clone_policy(struct sock *sk)
  1009. {
  1010. struct xfrm_policy *p0 = sk->sk_policy[0],
  1011. *p1 = sk->sk_policy[1];
  1012. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  1013. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  1014. return -ENOMEM;
  1015. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1016. return -ENOMEM;
  1017. return 0;
  1018. }
  1019. static int
  1020. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  1021. unsigned short family)
  1022. {
  1023. int err;
  1024. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1025. if (unlikely(afinfo == NULL))
  1026. return -EINVAL;
  1027. err = afinfo->get_saddr(net, local, remote);
  1028. xfrm_policy_put_afinfo(afinfo);
  1029. return err;
  1030. }
  1031. /* Resolve list of templates for the flow, given policy. */
  1032. static int
  1033. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  1034. struct xfrm_state **xfrm,
  1035. unsigned short family)
  1036. {
  1037. struct net *net = xp_net(policy);
  1038. int nx;
  1039. int i, error;
  1040. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1041. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1042. xfrm_address_t tmp;
  1043. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1044. struct xfrm_state *x;
  1045. xfrm_address_t *remote = daddr;
  1046. xfrm_address_t *local = saddr;
  1047. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1048. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1049. tmpl->mode == XFRM_MODE_BEET) {
  1050. remote = &tmpl->id.daddr;
  1051. local = &tmpl->saddr;
  1052. family = tmpl->encap_family;
  1053. if (xfrm_addr_any(local, family)) {
  1054. error = xfrm_get_saddr(net, &tmp, remote, family);
  1055. if (error)
  1056. goto fail;
  1057. local = &tmp;
  1058. }
  1059. }
  1060. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1061. if (x && x->km.state == XFRM_STATE_VALID) {
  1062. xfrm[nx++] = x;
  1063. daddr = remote;
  1064. saddr = local;
  1065. continue;
  1066. }
  1067. if (x) {
  1068. error = (x->km.state == XFRM_STATE_ERROR ?
  1069. -EINVAL : -EAGAIN);
  1070. xfrm_state_put(x);
  1071. }
  1072. else if (error == -ESRCH)
  1073. error = -EAGAIN;
  1074. if (!tmpl->optional)
  1075. goto fail;
  1076. }
  1077. return nx;
  1078. fail:
  1079. for (nx--; nx>=0; nx--)
  1080. xfrm_state_put(xfrm[nx]);
  1081. return error;
  1082. }
  1083. static int
  1084. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1085. struct xfrm_state **xfrm,
  1086. unsigned short family)
  1087. {
  1088. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1089. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1090. int cnx = 0;
  1091. int error;
  1092. int ret;
  1093. int i;
  1094. for (i = 0; i < npols; i++) {
  1095. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1096. error = -ENOBUFS;
  1097. goto fail;
  1098. }
  1099. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1100. if (ret < 0) {
  1101. error = ret;
  1102. goto fail;
  1103. } else
  1104. cnx += ret;
  1105. }
  1106. /* found states are sorted for outbound processing */
  1107. if (npols > 1)
  1108. xfrm_state_sort(xfrm, tpp, cnx, family);
  1109. return cnx;
  1110. fail:
  1111. for (cnx--; cnx>=0; cnx--)
  1112. xfrm_state_put(tpp[cnx]);
  1113. return error;
  1114. }
  1115. /* Check that the bundle accepts the flow and its components are
  1116. * still valid.
  1117. */
  1118. static struct dst_entry *
  1119. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1120. {
  1121. struct dst_entry *x;
  1122. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1123. if (unlikely(afinfo == NULL))
  1124. return ERR_PTR(-EINVAL);
  1125. x = afinfo->find_bundle(fl, policy);
  1126. xfrm_policy_put_afinfo(afinfo);
  1127. return x;
  1128. }
  1129. static inline int xfrm_get_tos(struct flowi *fl, int family)
  1130. {
  1131. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1132. int tos;
  1133. if (!afinfo)
  1134. return -EINVAL;
  1135. tos = afinfo->get_tos(fl);
  1136. xfrm_policy_put_afinfo(afinfo);
  1137. return tos;
  1138. }
  1139. static inline struct xfrm_dst *xfrm_alloc_dst(int family)
  1140. {
  1141. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1142. struct xfrm_dst *xdst;
  1143. if (!afinfo)
  1144. return ERR_PTR(-EINVAL);
  1145. xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
  1146. xfrm_policy_put_afinfo(afinfo);
  1147. return xdst;
  1148. }
  1149. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1150. int nfheader_len)
  1151. {
  1152. struct xfrm_policy_afinfo *afinfo =
  1153. xfrm_policy_get_afinfo(dst->ops->family);
  1154. int err;
  1155. if (!afinfo)
  1156. return -EINVAL;
  1157. err = afinfo->init_path(path, dst, nfheader_len);
  1158. xfrm_policy_put_afinfo(afinfo);
  1159. return err;
  1160. }
  1161. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
  1162. {
  1163. struct xfrm_policy_afinfo *afinfo =
  1164. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1165. int err;
  1166. if (!afinfo)
  1167. return -EINVAL;
  1168. err = afinfo->fill_dst(xdst, dev);
  1169. xfrm_policy_put_afinfo(afinfo);
  1170. return err;
  1171. }
  1172. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1173. * all the metrics... Shortly, bundle a bundle.
  1174. */
  1175. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1176. struct xfrm_state **xfrm, int nx,
  1177. struct flowi *fl,
  1178. struct dst_entry *dst)
  1179. {
  1180. unsigned long now = jiffies;
  1181. struct net_device *dev;
  1182. struct dst_entry *dst_prev = NULL;
  1183. struct dst_entry *dst0 = NULL;
  1184. int i = 0;
  1185. int err;
  1186. int header_len = 0;
  1187. int nfheader_len = 0;
  1188. int trailer_len = 0;
  1189. int tos;
  1190. int family = policy->selector.family;
  1191. xfrm_address_t saddr, daddr;
  1192. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1193. tos = xfrm_get_tos(fl, family);
  1194. err = tos;
  1195. if (tos < 0)
  1196. goto put_states;
  1197. dst_hold(dst);
  1198. for (; i < nx; i++) {
  1199. struct xfrm_dst *xdst = xfrm_alloc_dst(family);
  1200. struct dst_entry *dst1 = &xdst->u.dst;
  1201. err = PTR_ERR(xdst);
  1202. if (IS_ERR(xdst)) {
  1203. dst_release(dst);
  1204. goto put_states;
  1205. }
  1206. if (!dst_prev)
  1207. dst0 = dst1;
  1208. else {
  1209. dst_prev->child = dst_clone(dst1);
  1210. dst1->flags |= DST_NOHASH;
  1211. }
  1212. xdst->route = dst;
  1213. memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
  1214. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1215. family = xfrm[i]->props.family;
  1216. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1217. family);
  1218. err = PTR_ERR(dst);
  1219. if (IS_ERR(dst))
  1220. goto put_states;
  1221. } else
  1222. dst_hold(dst);
  1223. dst1->xfrm = xfrm[i];
  1224. xdst->genid = xfrm[i]->genid;
  1225. dst1->obsolete = -1;
  1226. dst1->flags |= DST_HOST;
  1227. dst1->lastuse = now;
  1228. dst1->input = dst_discard;
  1229. dst1->output = xfrm[i]->outer_mode->afinfo->output;
  1230. dst1->next = dst_prev;
  1231. dst_prev = dst1;
  1232. header_len += xfrm[i]->props.header_len;
  1233. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1234. nfheader_len += xfrm[i]->props.header_len;
  1235. trailer_len += xfrm[i]->props.trailer_len;
  1236. }
  1237. dst_prev->child = dst;
  1238. dst0->path = dst;
  1239. err = -ENODEV;
  1240. dev = dst->dev;
  1241. if (!dev)
  1242. goto free_dst;
  1243. /* Copy neighbout for reachability confirmation */
  1244. dst0->neighbour = neigh_clone(dst->neighbour);
  1245. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1246. xfrm_init_pmtu(dst_prev);
  1247. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1248. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1249. err = xfrm_fill_dst(xdst, dev);
  1250. if (err)
  1251. goto free_dst;
  1252. dst_prev->header_len = header_len;
  1253. dst_prev->trailer_len = trailer_len;
  1254. header_len -= xdst->u.dst.xfrm->props.header_len;
  1255. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1256. }
  1257. out:
  1258. return dst0;
  1259. put_states:
  1260. for (; i < nx; i++)
  1261. xfrm_state_put(xfrm[i]);
  1262. free_dst:
  1263. if (dst0)
  1264. dst_free(dst0);
  1265. dst0 = ERR_PTR(err);
  1266. goto out;
  1267. }
  1268. static int inline
  1269. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1270. {
  1271. if (!*target) {
  1272. *target = kmalloc(size, GFP_ATOMIC);
  1273. if (!*target)
  1274. return -ENOMEM;
  1275. }
  1276. memcpy(*target, src, size);
  1277. return 0;
  1278. }
  1279. static int inline
  1280. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1281. {
  1282. #ifdef CONFIG_XFRM_SUB_POLICY
  1283. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1284. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1285. sel, sizeof(*sel));
  1286. #else
  1287. return 0;
  1288. #endif
  1289. }
  1290. static int inline
  1291. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1292. {
  1293. #ifdef CONFIG_XFRM_SUB_POLICY
  1294. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1295. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1296. #else
  1297. return 0;
  1298. #endif
  1299. }
  1300. static int stale_bundle(struct dst_entry *dst);
  1301. /* Main function: finds/creates a bundle for given flow.
  1302. *
  1303. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1304. * on interfaces with disabled IPsec.
  1305. */
  1306. int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
  1307. struct sock *sk, int flags)
  1308. {
  1309. struct xfrm_policy *policy;
  1310. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1311. int npols;
  1312. int pol_dead;
  1313. int xfrm_nr;
  1314. int pi;
  1315. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1316. struct dst_entry *dst, *dst_orig = *dst_p;
  1317. int nx = 0;
  1318. int err;
  1319. u32 genid;
  1320. u16 family;
  1321. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1322. restart:
  1323. genid = atomic_read(&flow_cache_genid);
  1324. policy = NULL;
  1325. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1326. pols[pi] = NULL;
  1327. npols = 0;
  1328. pol_dead = 0;
  1329. xfrm_nr = 0;
  1330. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1331. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1332. err = PTR_ERR(policy);
  1333. if (IS_ERR(policy)) {
  1334. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1335. goto dropdst;
  1336. }
  1337. }
  1338. if (!policy) {
  1339. /* To accelerate a bit... */
  1340. if ((dst_orig->flags & DST_NOXFRM) ||
  1341. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1342. goto nopol;
  1343. policy = flow_cache_lookup(net, fl, dst_orig->ops->family,
  1344. dir, xfrm_policy_lookup);
  1345. err = PTR_ERR(policy);
  1346. if (IS_ERR(policy)) {
  1347. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1348. goto dropdst;
  1349. }
  1350. }
  1351. if (!policy)
  1352. goto nopol;
  1353. family = dst_orig->ops->family;
  1354. pols[0] = policy;
  1355. npols ++;
  1356. xfrm_nr += pols[0]->xfrm_nr;
  1357. err = -ENOENT;
  1358. if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
  1359. goto error;
  1360. policy->curlft.use_time = get_seconds();
  1361. switch (policy->action) {
  1362. default:
  1363. case XFRM_POLICY_BLOCK:
  1364. /* Prohibit the flow */
  1365. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
  1366. err = -EPERM;
  1367. goto error;
  1368. case XFRM_POLICY_ALLOW:
  1369. #ifndef CONFIG_XFRM_SUB_POLICY
  1370. if (policy->xfrm_nr == 0) {
  1371. /* Flow passes not transformed. */
  1372. xfrm_pol_put(policy);
  1373. return 0;
  1374. }
  1375. #endif
  1376. /* Try to find matching bundle.
  1377. *
  1378. * LATER: help from flow cache. It is optional, this
  1379. * is required only for output policy.
  1380. */
  1381. dst = xfrm_find_bundle(fl, policy, family);
  1382. if (IS_ERR(dst)) {
  1383. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1384. err = PTR_ERR(dst);
  1385. goto error;
  1386. }
  1387. if (dst)
  1388. break;
  1389. #ifdef CONFIG_XFRM_SUB_POLICY
  1390. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1391. pols[1] = xfrm_policy_lookup_bytype(net,
  1392. XFRM_POLICY_TYPE_MAIN,
  1393. fl, family,
  1394. XFRM_POLICY_OUT);
  1395. if (pols[1]) {
  1396. if (IS_ERR(pols[1])) {
  1397. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1398. err = PTR_ERR(pols[1]);
  1399. goto error;
  1400. }
  1401. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1402. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
  1403. err = -EPERM;
  1404. goto error;
  1405. }
  1406. npols ++;
  1407. xfrm_nr += pols[1]->xfrm_nr;
  1408. }
  1409. }
  1410. /*
  1411. * Because neither flowi nor bundle information knows about
  1412. * transformation template size. On more than one policy usage
  1413. * we can realize whether all of them is bypass or not after
  1414. * they are searched. See above not-transformed bypass
  1415. * is surrounded by non-sub policy configuration, too.
  1416. */
  1417. if (xfrm_nr == 0) {
  1418. /* Flow passes not transformed. */
  1419. xfrm_pols_put(pols, npols);
  1420. return 0;
  1421. }
  1422. #endif
  1423. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1424. if (unlikely(nx<0)) {
  1425. err = nx;
  1426. if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
  1427. /* EREMOTE tells the caller to generate
  1428. * a one-shot blackhole route.
  1429. */
  1430. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1431. xfrm_pol_put(policy);
  1432. return -EREMOTE;
  1433. }
  1434. if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
  1435. DECLARE_WAITQUEUE(wait, current);
  1436. add_wait_queue(&net->xfrm.km_waitq, &wait);
  1437. set_current_state(TASK_INTERRUPTIBLE);
  1438. schedule();
  1439. set_current_state(TASK_RUNNING);
  1440. remove_wait_queue(&net->xfrm.km_waitq, &wait);
  1441. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1442. if (nx == -EAGAIN && signal_pending(current)) {
  1443. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1444. err = -ERESTART;
  1445. goto error;
  1446. }
  1447. if (nx == -EAGAIN ||
  1448. genid != atomic_read(&flow_cache_genid)) {
  1449. xfrm_pols_put(pols, npols);
  1450. goto restart;
  1451. }
  1452. err = nx;
  1453. }
  1454. if (err < 0) {
  1455. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1456. goto error;
  1457. }
  1458. }
  1459. if (nx == 0) {
  1460. /* Flow passes not transformed. */
  1461. xfrm_pols_put(pols, npols);
  1462. return 0;
  1463. }
  1464. dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
  1465. err = PTR_ERR(dst);
  1466. if (IS_ERR(dst)) {
  1467. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1468. goto error;
  1469. }
  1470. for (pi = 0; pi < npols; pi++) {
  1471. read_lock_bh(&pols[pi]->lock);
  1472. pol_dead |= pols[pi]->walk.dead;
  1473. read_unlock_bh(&pols[pi]->lock);
  1474. }
  1475. write_lock_bh(&policy->lock);
  1476. if (unlikely(pol_dead || stale_bundle(dst))) {
  1477. /* Wow! While we worked on resolving, this
  1478. * policy has gone. Retry. It is not paranoia,
  1479. * we just cannot enlist new bundle to dead object.
  1480. * We can't enlist stable bundles either.
  1481. */
  1482. write_unlock_bh(&policy->lock);
  1483. dst_free(dst);
  1484. if (pol_dead)
  1485. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD);
  1486. else
  1487. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1488. err = -EHOSTUNREACH;
  1489. goto error;
  1490. }
  1491. if (npols > 1)
  1492. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1493. else
  1494. err = xfrm_dst_update_origin(dst, fl);
  1495. if (unlikely(err)) {
  1496. write_unlock_bh(&policy->lock);
  1497. dst_free(dst);
  1498. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1499. goto error;
  1500. }
  1501. dst->next = policy->bundles;
  1502. policy->bundles = dst;
  1503. dst_hold(dst);
  1504. write_unlock_bh(&policy->lock);
  1505. }
  1506. *dst_p = dst;
  1507. dst_release(dst_orig);
  1508. xfrm_pols_put(pols, npols);
  1509. return 0;
  1510. error:
  1511. xfrm_pols_put(pols, npols);
  1512. dropdst:
  1513. dst_release(dst_orig);
  1514. *dst_p = NULL;
  1515. return err;
  1516. nopol:
  1517. err = -ENOENT;
  1518. if (flags & XFRM_LOOKUP_ICMP)
  1519. goto dropdst;
  1520. return 0;
  1521. }
  1522. EXPORT_SYMBOL(__xfrm_lookup);
  1523. int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
  1524. struct sock *sk, int flags)
  1525. {
  1526. int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
  1527. if (err == -EREMOTE) {
  1528. dst_release(*dst_p);
  1529. *dst_p = NULL;
  1530. err = -EAGAIN;
  1531. }
  1532. return err;
  1533. }
  1534. EXPORT_SYMBOL(xfrm_lookup);
  1535. static inline int
  1536. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1537. {
  1538. struct xfrm_state *x;
  1539. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1540. return 0;
  1541. x = skb->sp->xvec[idx];
  1542. if (!x->type->reject)
  1543. return 0;
  1544. return x->type->reject(x, skb, fl);
  1545. }
  1546. /* When skb is transformed back to its "native" form, we have to
  1547. * check policy restrictions. At the moment we make this in maximally
  1548. * stupid way. Shame on me. :-) Of course, connected sockets must
  1549. * have policy cached at them.
  1550. */
  1551. static inline int
  1552. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1553. unsigned short family)
  1554. {
  1555. if (xfrm_state_kern(x))
  1556. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1557. return x->id.proto == tmpl->id.proto &&
  1558. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1559. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1560. x->props.mode == tmpl->mode &&
  1561. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1562. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1563. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1564. xfrm_state_addr_cmp(tmpl, x, family));
  1565. }
  1566. /*
  1567. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1568. * because of optional transport mode, or next index of the mathced secpath
  1569. * state with the template.
  1570. * -1 is returned when no matching template is found.
  1571. * Otherwise "-2 - errored_index" is returned.
  1572. */
  1573. static inline int
  1574. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1575. unsigned short family)
  1576. {
  1577. int idx = start;
  1578. if (tmpl->optional) {
  1579. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1580. return start;
  1581. } else
  1582. start = -1;
  1583. for (; idx < sp->len; idx++) {
  1584. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1585. return ++idx;
  1586. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1587. if (start == -1)
  1588. start = -2-idx;
  1589. break;
  1590. }
  1591. }
  1592. return start;
  1593. }
  1594. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1595. unsigned int family, int reverse)
  1596. {
  1597. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1598. int err;
  1599. if (unlikely(afinfo == NULL))
  1600. return -EAFNOSUPPORT;
  1601. afinfo->decode_session(skb, fl, reverse);
  1602. err = security_xfrm_decode_session(skb, &fl->secid);
  1603. xfrm_policy_put_afinfo(afinfo);
  1604. return err;
  1605. }
  1606. EXPORT_SYMBOL(__xfrm_decode_session);
  1607. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1608. {
  1609. for (; k < sp->len; k++) {
  1610. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1611. *idxp = k;
  1612. return 1;
  1613. }
  1614. }
  1615. return 0;
  1616. }
  1617. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1618. unsigned short family)
  1619. {
  1620. struct net *net = dev_net(skb->dev);
  1621. struct xfrm_policy *pol;
  1622. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1623. int npols = 0;
  1624. int xfrm_nr;
  1625. int pi;
  1626. int reverse;
  1627. struct flowi fl;
  1628. u8 fl_dir;
  1629. int xerr_idx = -1;
  1630. reverse = dir & ~XFRM_POLICY_MASK;
  1631. dir &= XFRM_POLICY_MASK;
  1632. fl_dir = policy_to_flow_dir(dir);
  1633. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1634. XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
  1635. return 0;
  1636. }
  1637. nf_nat_decode_session(skb, &fl, family);
  1638. /* First, check used SA against their selectors. */
  1639. if (skb->sp) {
  1640. int i;
  1641. for (i=skb->sp->len-1; i>=0; i--) {
  1642. struct xfrm_state *x = skb->sp->xvec[i];
  1643. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1644. XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH);
  1645. return 0;
  1646. }
  1647. }
  1648. }
  1649. pol = NULL;
  1650. if (sk && sk->sk_policy[dir]) {
  1651. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1652. if (IS_ERR(pol)) {
  1653. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1654. return 0;
  1655. }
  1656. }
  1657. if (!pol)
  1658. pol = flow_cache_lookup(net, &fl, family, fl_dir,
  1659. xfrm_policy_lookup);
  1660. if (IS_ERR(pol)) {
  1661. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1662. return 0;
  1663. }
  1664. if (!pol) {
  1665. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1666. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1667. XFRM_INC_STATS(LINUX_MIB_XFRMINNOPOLS);
  1668. return 0;
  1669. }
  1670. return 1;
  1671. }
  1672. pol->curlft.use_time = get_seconds();
  1673. pols[0] = pol;
  1674. npols ++;
  1675. #ifdef CONFIG_XFRM_SUB_POLICY
  1676. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1677. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1678. &fl, family,
  1679. XFRM_POLICY_IN);
  1680. if (pols[1]) {
  1681. if (IS_ERR(pols[1])) {
  1682. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1683. return 0;
  1684. }
  1685. pols[1]->curlft.use_time = get_seconds();
  1686. npols ++;
  1687. }
  1688. }
  1689. #endif
  1690. if (pol->action == XFRM_POLICY_ALLOW) {
  1691. struct sec_path *sp;
  1692. static struct sec_path dummy;
  1693. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1694. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1695. struct xfrm_tmpl **tpp = tp;
  1696. int ti = 0;
  1697. int i, k;
  1698. if ((sp = skb->sp) == NULL)
  1699. sp = &dummy;
  1700. for (pi = 0; pi < npols; pi++) {
  1701. if (pols[pi] != pol &&
  1702. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1703. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
  1704. goto reject;
  1705. }
  1706. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1707. XFRM_INC_STATS(LINUX_MIB_XFRMINBUFFERERROR);
  1708. goto reject_error;
  1709. }
  1710. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1711. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1712. }
  1713. xfrm_nr = ti;
  1714. if (npols > 1) {
  1715. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1716. tpp = stp;
  1717. }
  1718. /* For each tunnel xfrm, find the first matching tmpl.
  1719. * For each tmpl before that, find corresponding xfrm.
  1720. * Order is _important_. Later we will implement
  1721. * some barriers, but at the moment barriers
  1722. * are implied between each two transformations.
  1723. */
  1724. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1725. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1726. if (k < 0) {
  1727. if (k < -1)
  1728. /* "-2 - errored_index" returned */
  1729. xerr_idx = -(2+k);
  1730. XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
  1731. goto reject;
  1732. }
  1733. }
  1734. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1735. XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
  1736. goto reject;
  1737. }
  1738. xfrm_pols_put(pols, npols);
  1739. return 1;
  1740. }
  1741. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
  1742. reject:
  1743. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1744. reject_error:
  1745. xfrm_pols_put(pols, npols);
  1746. return 0;
  1747. }
  1748. EXPORT_SYMBOL(__xfrm_policy_check);
  1749. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1750. {
  1751. struct net *net = dev_net(skb->dev);
  1752. struct flowi fl;
  1753. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1754. /* XXX: we should have something like FWDHDRERROR here. */
  1755. XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
  1756. return 0;
  1757. }
  1758. return xfrm_lookup(net, &skb->dst, &fl, NULL, 0) == 0;
  1759. }
  1760. EXPORT_SYMBOL(__xfrm_route_forward);
  1761. /* Optimize later using cookies and generation ids. */
  1762. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1763. {
  1764. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1765. * to "-1" to force all XFRM destinations to get validated by
  1766. * dst_ops->check on every use. We do this because when a
  1767. * normal route referenced by an XFRM dst is obsoleted we do
  1768. * not go looking around for all parent referencing XFRM dsts
  1769. * so that we can invalidate them. It is just too much work.
  1770. * Instead we make the checks here on every use. For example:
  1771. *
  1772. * XFRM dst A --> IPv4 dst X
  1773. *
  1774. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1775. * in this example). If X is marked obsolete, "A" will not
  1776. * notice. That's what we are validating here via the
  1777. * stale_bundle() check.
  1778. *
  1779. * When a policy's bundle is pruned, we dst_free() the XFRM
  1780. * dst which causes it's ->obsolete field to be set to a
  1781. * positive non-zero integer. If an XFRM dst has been pruned
  1782. * like this, we want to force a new route lookup.
  1783. */
  1784. if (dst->obsolete < 0 && !stale_bundle(dst))
  1785. return dst;
  1786. return NULL;
  1787. }
  1788. static int stale_bundle(struct dst_entry *dst)
  1789. {
  1790. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1791. }
  1792. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1793. {
  1794. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1795. dst->dev = dev_net(dev)->loopback_dev;
  1796. dev_hold(dst->dev);
  1797. dev_put(dev);
  1798. }
  1799. }
  1800. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1801. static void xfrm_link_failure(struct sk_buff *skb)
  1802. {
  1803. /* Impossible. Such dst must be popped before reaches point of failure. */
  1804. return;
  1805. }
  1806. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1807. {
  1808. if (dst) {
  1809. if (dst->obsolete) {
  1810. dst_release(dst);
  1811. dst = NULL;
  1812. }
  1813. }
  1814. return dst;
  1815. }
  1816. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1817. {
  1818. struct dst_entry *dst, **dstp;
  1819. write_lock(&pol->lock);
  1820. dstp = &pol->bundles;
  1821. while ((dst=*dstp) != NULL) {
  1822. if (func(dst)) {
  1823. *dstp = dst->next;
  1824. dst->next = *gc_list_p;
  1825. *gc_list_p = dst;
  1826. } else {
  1827. dstp = &dst->next;
  1828. }
  1829. }
  1830. write_unlock(&pol->lock);
  1831. }
  1832. static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
  1833. {
  1834. struct dst_entry *gc_list = NULL;
  1835. int dir;
  1836. read_lock_bh(&xfrm_policy_lock);
  1837. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1838. struct xfrm_policy *pol;
  1839. struct hlist_node *entry;
  1840. struct hlist_head *table;
  1841. int i;
  1842. hlist_for_each_entry(pol, entry,
  1843. &net->xfrm.policy_inexact[dir], bydst)
  1844. prune_one_bundle(pol, func, &gc_list);
  1845. table = net->xfrm.policy_bydst[dir].table;
  1846. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  1847. hlist_for_each_entry(pol, entry, table + i, bydst)
  1848. prune_one_bundle(pol, func, &gc_list);
  1849. }
  1850. }
  1851. read_unlock_bh(&xfrm_policy_lock);
  1852. while (gc_list) {
  1853. struct dst_entry *dst = gc_list;
  1854. gc_list = dst->next;
  1855. dst_free(dst);
  1856. }
  1857. }
  1858. static int unused_bundle(struct dst_entry *dst)
  1859. {
  1860. return !atomic_read(&dst->__refcnt);
  1861. }
  1862. static void __xfrm_garbage_collect(struct net *net)
  1863. {
  1864. xfrm_prune_bundles(net, unused_bundle);
  1865. }
  1866. static int xfrm_flush_bundles(struct net *net)
  1867. {
  1868. xfrm_prune_bundles(net, stale_bundle);
  1869. return 0;
  1870. }
  1871. static void xfrm_init_pmtu(struct dst_entry *dst)
  1872. {
  1873. do {
  1874. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1875. u32 pmtu, route_mtu_cached;
  1876. pmtu = dst_mtu(dst->child);
  1877. xdst->child_mtu_cached = pmtu;
  1878. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1879. route_mtu_cached = dst_mtu(xdst->route);
  1880. xdst->route_mtu_cached = route_mtu_cached;
  1881. if (pmtu > route_mtu_cached)
  1882. pmtu = route_mtu_cached;
  1883. dst->metrics[RTAX_MTU-1] = pmtu;
  1884. } while ((dst = dst->next));
  1885. }
  1886. /* Check that the bundle accepts the flow and its components are
  1887. * still valid.
  1888. */
  1889. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1890. struct flowi *fl, int family, int strict)
  1891. {
  1892. struct dst_entry *dst = &first->u.dst;
  1893. struct xfrm_dst *last;
  1894. u32 mtu;
  1895. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1896. (dst->dev && !netif_running(dst->dev)))
  1897. return 0;
  1898. #ifdef CONFIG_XFRM_SUB_POLICY
  1899. if (fl) {
  1900. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1901. return 0;
  1902. if (first->partner &&
  1903. !xfrm_selector_match(first->partner, fl, family))
  1904. return 0;
  1905. }
  1906. #endif
  1907. last = NULL;
  1908. do {
  1909. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1910. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1911. return 0;
  1912. if (fl && pol &&
  1913. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1914. return 0;
  1915. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1916. return 0;
  1917. if (xdst->genid != dst->xfrm->genid)
  1918. return 0;
  1919. if (strict && fl &&
  1920. !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
  1921. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1922. return 0;
  1923. mtu = dst_mtu(dst->child);
  1924. if (xdst->child_mtu_cached != mtu) {
  1925. last = xdst;
  1926. xdst->child_mtu_cached = mtu;
  1927. }
  1928. if (!dst_check(xdst->route, xdst->route_cookie))
  1929. return 0;
  1930. mtu = dst_mtu(xdst->route);
  1931. if (xdst->route_mtu_cached != mtu) {
  1932. last = xdst;
  1933. xdst->route_mtu_cached = mtu;
  1934. }
  1935. dst = dst->child;
  1936. } while (dst->xfrm);
  1937. if (likely(!last))
  1938. return 1;
  1939. mtu = last->child_mtu_cached;
  1940. for (;;) {
  1941. dst = &last->u.dst;
  1942. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1943. if (mtu > last->route_mtu_cached)
  1944. mtu = last->route_mtu_cached;
  1945. dst->metrics[RTAX_MTU-1] = mtu;
  1946. if (last == first)
  1947. break;
  1948. last = (struct xfrm_dst *)last->u.dst.next;
  1949. last->child_mtu_cached = mtu;
  1950. }
  1951. return 1;
  1952. }
  1953. EXPORT_SYMBOL(xfrm_bundle_ok);
  1954. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1955. {
  1956. int err = 0;
  1957. if (unlikely(afinfo == NULL))
  1958. return -EINVAL;
  1959. if (unlikely(afinfo->family >= NPROTO))
  1960. return -EAFNOSUPPORT;
  1961. write_lock_bh(&xfrm_policy_afinfo_lock);
  1962. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1963. err = -ENOBUFS;
  1964. else {
  1965. struct dst_ops *dst_ops = afinfo->dst_ops;
  1966. if (likely(dst_ops->kmem_cachep == NULL))
  1967. dst_ops->kmem_cachep = xfrm_dst_cache;
  1968. if (likely(dst_ops->check == NULL))
  1969. dst_ops->check = xfrm_dst_check;
  1970. if (likely(dst_ops->negative_advice == NULL))
  1971. dst_ops->negative_advice = xfrm_negative_advice;
  1972. if (likely(dst_ops->link_failure == NULL))
  1973. dst_ops->link_failure = xfrm_link_failure;
  1974. if (likely(afinfo->garbage_collect == NULL))
  1975. afinfo->garbage_collect = __xfrm_garbage_collect;
  1976. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1977. }
  1978. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1979. return err;
  1980. }
  1981. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1982. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1983. {
  1984. int err = 0;
  1985. if (unlikely(afinfo == NULL))
  1986. return -EINVAL;
  1987. if (unlikely(afinfo->family >= NPROTO))
  1988. return -EAFNOSUPPORT;
  1989. write_lock_bh(&xfrm_policy_afinfo_lock);
  1990. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1991. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1992. err = -EINVAL;
  1993. else {
  1994. struct dst_ops *dst_ops = afinfo->dst_ops;
  1995. xfrm_policy_afinfo[afinfo->family] = NULL;
  1996. dst_ops->kmem_cachep = NULL;
  1997. dst_ops->check = NULL;
  1998. dst_ops->negative_advice = NULL;
  1999. dst_ops->link_failure = NULL;
  2000. afinfo->garbage_collect = NULL;
  2001. }
  2002. }
  2003. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2004. return err;
  2005. }
  2006. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2007. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  2008. {
  2009. struct xfrm_policy_afinfo *afinfo;
  2010. if (unlikely(family >= NPROTO))
  2011. return NULL;
  2012. read_lock(&xfrm_policy_afinfo_lock);
  2013. afinfo = xfrm_policy_afinfo[family];
  2014. if (unlikely(!afinfo))
  2015. read_unlock(&xfrm_policy_afinfo_lock);
  2016. return afinfo;
  2017. }
  2018. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2019. {
  2020. read_unlock(&xfrm_policy_afinfo_lock);
  2021. }
  2022. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2023. {
  2024. struct net_device *dev = ptr;
  2025. switch (event) {
  2026. case NETDEV_DOWN:
  2027. xfrm_flush_bundles(dev_net(dev));
  2028. }
  2029. return NOTIFY_DONE;
  2030. }
  2031. static struct notifier_block xfrm_dev_notifier = {
  2032. .notifier_call = xfrm_dev_event,
  2033. };
  2034. #ifdef CONFIG_XFRM_STATISTICS
  2035. static int __init xfrm_statistics_init(void)
  2036. {
  2037. if (snmp_mib_init((void **)xfrm_statistics,
  2038. sizeof(struct linux_xfrm_mib)) < 0)
  2039. return -ENOMEM;
  2040. return 0;
  2041. }
  2042. #endif
  2043. static int __net_init xfrm_policy_init(struct net *net)
  2044. {
  2045. unsigned int hmask, sz;
  2046. int dir;
  2047. if (net_eq(net, &init_net))
  2048. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2049. sizeof(struct xfrm_dst),
  2050. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2051. NULL);
  2052. hmask = 8 - 1;
  2053. sz = (hmask+1) * sizeof(struct hlist_head);
  2054. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2055. if (!net->xfrm.policy_byidx)
  2056. goto out_byidx;
  2057. net->xfrm.policy_idx_hmask = hmask;
  2058. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2059. struct xfrm_policy_hash *htab;
  2060. net->xfrm.policy_count[dir] = 0;
  2061. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2062. htab = &net->xfrm.policy_bydst[dir];
  2063. htab->table = xfrm_hash_alloc(sz);
  2064. if (!htab->table)
  2065. goto out_bydst;
  2066. htab->hmask = hmask;
  2067. }
  2068. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2069. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2070. if (net_eq(net, &init_net))
  2071. register_netdevice_notifier(&xfrm_dev_notifier);
  2072. return 0;
  2073. out_bydst:
  2074. for (dir--; dir >= 0; dir--) {
  2075. struct xfrm_policy_hash *htab;
  2076. htab = &net->xfrm.policy_bydst[dir];
  2077. xfrm_hash_free(htab->table, sz);
  2078. }
  2079. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2080. out_byidx:
  2081. return -ENOMEM;
  2082. }
  2083. static void xfrm_policy_fini(struct net *net)
  2084. {
  2085. unsigned int sz;
  2086. int dir;
  2087. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2088. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2089. struct xfrm_policy_hash *htab;
  2090. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2091. htab = &net->xfrm.policy_bydst[dir];
  2092. sz = (htab->hmask + 1);
  2093. WARN_ON(!hlist_empty(htab->table));
  2094. xfrm_hash_free(htab->table, sz);
  2095. }
  2096. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2097. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2098. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2099. }
  2100. static int __net_init xfrm_net_init(struct net *net)
  2101. {
  2102. int rv;
  2103. rv = xfrm_state_init(net);
  2104. if (rv < 0)
  2105. goto out_state;
  2106. rv = xfrm_policy_init(net);
  2107. if (rv < 0)
  2108. goto out_policy;
  2109. return 0;
  2110. out_policy:
  2111. xfrm_state_fini(net);
  2112. out_state:
  2113. return rv;
  2114. }
  2115. static void __net_exit xfrm_net_exit(struct net *net)
  2116. {
  2117. xfrm_policy_fini(net);
  2118. xfrm_state_fini(net);
  2119. }
  2120. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2121. .init = xfrm_net_init,
  2122. .exit = xfrm_net_exit,
  2123. };
  2124. void __init xfrm_init(void)
  2125. {
  2126. register_pernet_subsys(&xfrm_net_ops);
  2127. #ifdef CONFIG_XFRM_STATISTICS
  2128. xfrm_statistics_init();
  2129. #endif
  2130. xfrm_input_init();
  2131. #ifdef CONFIG_XFRM_STATISTICS
  2132. xfrm_proc_init();
  2133. #endif
  2134. }
  2135. #ifdef CONFIG_AUDITSYSCALL
  2136. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2137. struct audit_buffer *audit_buf)
  2138. {
  2139. struct xfrm_sec_ctx *ctx = xp->security;
  2140. struct xfrm_selector *sel = &xp->selector;
  2141. if (ctx)
  2142. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2143. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2144. switch(sel->family) {
  2145. case AF_INET:
  2146. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2147. if (sel->prefixlen_s != 32)
  2148. audit_log_format(audit_buf, " src_prefixlen=%d",
  2149. sel->prefixlen_s);
  2150. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2151. if (sel->prefixlen_d != 32)
  2152. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2153. sel->prefixlen_d);
  2154. break;
  2155. case AF_INET6:
  2156. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2157. if (sel->prefixlen_s != 128)
  2158. audit_log_format(audit_buf, " src_prefixlen=%d",
  2159. sel->prefixlen_s);
  2160. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2161. if (sel->prefixlen_d != 128)
  2162. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2163. sel->prefixlen_d);
  2164. break;
  2165. }
  2166. }
  2167. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2168. uid_t auid, u32 sessionid, u32 secid)
  2169. {
  2170. struct audit_buffer *audit_buf;
  2171. audit_buf = xfrm_audit_start("SPD-add");
  2172. if (audit_buf == NULL)
  2173. return;
  2174. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2175. audit_log_format(audit_buf, " res=%u", result);
  2176. xfrm_audit_common_policyinfo(xp, audit_buf);
  2177. audit_log_end(audit_buf);
  2178. }
  2179. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2180. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2181. uid_t auid, u32 sessionid, u32 secid)
  2182. {
  2183. struct audit_buffer *audit_buf;
  2184. audit_buf = xfrm_audit_start("SPD-delete");
  2185. if (audit_buf == NULL)
  2186. return;
  2187. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2188. audit_log_format(audit_buf, " res=%u", result);
  2189. xfrm_audit_common_policyinfo(xp, audit_buf);
  2190. audit_log_end(audit_buf);
  2191. }
  2192. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2193. #endif
  2194. #ifdef CONFIG_XFRM_MIGRATE
  2195. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2196. struct xfrm_selector *sel_tgt)
  2197. {
  2198. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2199. if (sel_tgt->family == sel_cmp->family &&
  2200. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2201. sel_cmp->family) == 0 &&
  2202. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2203. sel_cmp->family) == 0 &&
  2204. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2205. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2206. return 1;
  2207. }
  2208. } else {
  2209. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2210. return 1;
  2211. }
  2212. }
  2213. return 0;
  2214. }
  2215. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2216. u8 dir, u8 type)
  2217. {
  2218. struct xfrm_policy *pol, *ret = NULL;
  2219. struct hlist_node *entry;
  2220. struct hlist_head *chain;
  2221. u32 priority = ~0U;
  2222. read_lock_bh(&xfrm_policy_lock);
  2223. chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
  2224. hlist_for_each_entry(pol, entry, chain, bydst) {
  2225. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2226. pol->type == type) {
  2227. ret = pol;
  2228. priority = ret->priority;
  2229. break;
  2230. }
  2231. }
  2232. chain = &init_net.xfrm.policy_inexact[dir];
  2233. hlist_for_each_entry(pol, entry, chain, bydst) {
  2234. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2235. pol->type == type &&
  2236. pol->priority < priority) {
  2237. ret = pol;
  2238. break;
  2239. }
  2240. }
  2241. if (ret)
  2242. xfrm_pol_hold(ret);
  2243. read_unlock_bh(&xfrm_policy_lock);
  2244. return ret;
  2245. }
  2246. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2247. {
  2248. int match = 0;
  2249. if (t->mode == m->mode && t->id.proto == m->proto &&
  2250. (m->reqid == 0 || t->reqid == m->reqid)) {
  2251. switch (t->mode) {
  2252. case XFRM_MODE_TUNNEL:
  2253. case XFRM_MODE_BEET:
  2254. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2255. m->old_family) == 0 &&
  2256. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2257. m->old_family) == 0) {
  2258. match = 1;
  2259. }
  2260. break;
  2261. case XFRM_MODE_TRANSPORT:
  2262. /* in case of transport mode, template does not store
  2263. any IP addresses, hence we just compare mode and
  2264. protocol */
  2265. match = 1;
  2266. break;
  2267. default:
  2268. break;
  2269. }
  2270. }
  2271. return match;
  2272. }
  2273. /* update endpoint address(es) of template(s) */
  2274. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2275. struct xfrm_migrate *m, int num_migrate)
  2276. {
  2277. struct xfrm_migrate *mp;
  2278. struct dst_entry *dst;
  2279. int i, j, n = 0;
  2280. write_lock_bh(&pol->lock);
  2281. if (unlikely(pol->walk.dead)) {
  2282. /* target policy has been deleted */
  2283. write_unlock_bh(&pol->lock);
  2284. return -ENOENT;
  2285. }
  2286. for (i = 0; i < pol->xfrm_nr; i++) {
  2287. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2288. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2289. continue;
  2290. n++;
  2291. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2292. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2293. continue;
  2294. /* update endpoints */
  2295. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2296. sizeof(pol->xfrm_vec[i].id.daddr));
  2297. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2298. sizeof(pol->xfrm_vec[i].saddr));
  2299. pol->xfrm_vec[i].encap_family = mp->new_family;
  2300. /* flush bundles */
  2301. while ((dst = pol->bundles) != NULL) {
  2302. pol->bundles = dst->next;
  2303. dst_free(dst);
  2304. }
  2305. }
  2306. }
  2307. write_unlock_bh(&pol->lock);
  2308. if (!n)
  2309. return -ENODATA;
  2310. return 0;
  2311. }
  2312. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2313. {
  2314. int i, j;
  2315. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2316. return -EINVAL;
  2317. for (i = 0; i < num_migrate; i++) {
  2318. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2319. m[i].old_family) == 0) &&
  2320. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2321. m[i].old_family) == 0))
  2322. return -EINVAL;
  2323. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2324. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2325. return -EINVAL;
  2326. /* check if there is any duplicated entry */
  2327. for (j = i + 1; j < num_migrate; j++) {
  2328. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2329. sizeof(m[i].old_daddr)) &&
  2330. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2331. sizeof(m[i].old_saddr)) &&
  2332. m[i].proto == m[j].proto &&
  2333. m[i].mode == m[j].mode &&
  2334. m[i].reqid == m[j].reqid &&
  2335. m[i].old_family == m[j].old_family)
  2336. return -EINVAL;
  2337. }
  2338. }
  2339. return 0;
  2340. }
  2341. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2342. struct xfrm_migrate *m, int num_migrate,
  2343. struct xfrm_kmaddress *k)
  2344. {
  2345. int i, err, nx_cur = 0, nx_new = 0;
  2346. struct xfrm_policy *pol = NULL;
  2347. struct xfrm_state *x, *xc;
  2348. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2349. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2350. struct xfrm_migrate *mp;
  2351. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2352. goto out;
  2353. /* Stage 1 - find policy */
  2354. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2355. err = -ENOENT;
  2356. goto out;
  2357. }
  2358. /* Stage 2 - find and update state(s) */
  2359. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2360. if ((x = xfrm_migrate_state_find(mp))) {
  2361. x_cur[nx_cur] = x;
  2362. nx_cur++;
  2363. if ((xc = xfrm_state_migrate(x, mp))) {
  2364. x_new[nx_new] = xc;
  2365. nx_new++;
  2366. } else {
  2367. err = -ENODATA;
  2368. goto restore_state;
  2369. }
  2370. }
  2371. }
  2372. /* Stage 3 - update policy */
  2373. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2374. goto restore_state;
  2375. /* Stage 4 - delete old state(s) */
  2376. if (nx_cur) {
  2377. xfrm_states_put(x_cur, nx_cur);
  2378. xfrm_states_delete(x_cur, nx_cur);
  2379. }
  2380. /* Stage 5 - announce */
  2381. km_migrate(sel, dir, type, m, num_migrate, k);
  2382. xfrm_pol_put(pol);
  2383. return 0;
  2384. out:
  2385. return err;
  2386. restore_state:
  2387. if (pol)
  2388. xfrm_pol_put(pol);
  2389. if (nx_cur)
  2390. xfrm_states_put(x_cur, nx_cur);
  2391. if (nx_new)
  2392. xfrm_states_delete(x_new, nx_new);
  2393. return err;
  2394. }
  2395. EXPORT_SYMBOL(xfrm_migrate);
  2396. #endif