xfrm_policy.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/flow.h>
  29. #include <net/xfrm.h>
  30. #include <net/ip.h>
  31. #ifdef CONFIG_XFRM_STATISTICS
  32. #include <net/snmp.h>
  33. #endif
  34. #include "xfrm_hash.h"
  35. DEFINE_MUTEX(xfrm_cfg_mutex);
  36. EXPORT_SYMBOL(xfrm_cfg_mutex);
  37. static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
  38. static struct dst_entry *xfrm_policy_sk_bundles;
  39. static DEFINE_RWLOCK(xfrm_policy_lock);
  40. static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  41. static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
  42. __read_mostly;
  43. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  44. static void xfrm_init_pmtu(struct dst_entry *dst);
  45. static int stale_bundle(struct dst_entry *dst);
  46. static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  47. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  48. int dir);
  49. static inline bool
  50. __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  51. {
  52. const struct flowi4 *fl4 = &fl->u.ip4;
  53. return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  54. addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  55. !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  56. !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  57. (fl4->flowi4_proto == sel->proto || !sel->proto) &&
  58. (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  59. }
  60. static inline bool
  61. __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  62. {
  63. const struct flowi6 *fl6 = &fl->u.ip6;
  64. return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  65. addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  66. !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  67. !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  68. (fl6->flowi6_proto == sel->proto || !sel->proto) &&
  69. (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  70. }
  71. bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  72. unsigned short family)
  73. {
  74. switch (family) {
  75. case AF_INET:
  76. return __xfrm4_selector_match(sel, fl);
  77. case AF_INET6:
  78. return __xfrm6_selector_match(sel, fl);
  79. }
  80. return false;
  81. }
  82. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  83. {
  84. struct xfrm_policy_afinfo *afinfo;
  85. if (unlikely(family >= NPROTO))
  86. return NULL;
  87. rcu_read_lock();
  88. afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
  89. if (unlikely(!afinfo))
  90. rcu_read_unlock();
  91. return afinfo;
  92. }
  93. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  94. {
  95. rcu_read_unlock();
  96. }
  97. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  98. const xfrm_address_t *saddr,
  99. const xfrm_address_t *daddr,
  100. int family)
  101. {
  102. struct xfrm_policy_afinfo *afinfo;
  103. struct dst_entry *dst;
  104. afinfo = xfrm_policy_get_afinfo(family);
  105. if (unlikely(afinfo == NULL))
  106. return ERR_PTR(-EAFNOSUPPORT);
  107. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  108. xfrm_policy_put_afinfo(afinfo);
  109. return dst;
  110. }
  111. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  112. xfrm_address_t *prev_saddr,
  113. xfrm_address_t *prev_daddr,
  114. int family)
  115. {
  116. struct net *net = xs_net(x);
  117. xfrm_address_t *saddr = &x->props.saddr;
  118. xfrm_address_t *daddr = &x->id.daddr;
  119. struct dst_entry *dst;
  120. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  121. saddr = x->coaddr;
  122. daddr = prev_daddr;
  123. }
  124. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  125. saddr = prev_saddr;
  126. daddr = x->coaddr;
  127. }
  128. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  129. if (!IS_ERR(dst)) {
  130. if (prev_saddr != saddr)
  131. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  132. if (prev_daddr != daddr)
  133. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  134. }
  135. return dst;
  136. }
  137. static inline unsigned long make_jiffies(long secs)
  138. {
  139. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  140. return MAX_SCHEDULE_TIMEOUT-1;
  141. else
  142. return secs*HZ;
  143. }
  144. static void xfrm_policy_timer(unsigned long data)
  145. {
  146. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  147. unsigned long now = get_seconds();
  148. long next = LONG_MAX;
  149. int warn = 0;
  150. int dir;
  151. read_lock(&xp->lock);
  152. if (unlikely(xp->walk.dead))
  153. goto out;
  154. dir = xfrm_policy_id2dir(xp->index);
  155. if (xp->lft.hard_add_expires_seconds) {
  156. long tmo = xp->lft.hard_add_expires_seconds +
  157. xp->curlft.add_time - now;
  158. if (tmo <= 0)
  159. goto expired;
  160. if (tmo < next)
  161. next = tmo;
  162. }
  163. if (xp->lft.hard_use_expires_seconds) {
  164. long tmo = xp->lft.hard_use_expires_seconds +
  165. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  166. if (tmo <= 0)
  167. goto expired;
  168. if (tmo < next)
  169. next = tmo;
  170. }
  171. if (xp->lft.soft_add_expires_seconds) {
  172. long tmo = xp->lft.soft_add_expires_seconds +
  173. xp->curlft.add_time - now;
  174. if (tmo <= 0) {
  175. warn = 1;
  176. tmo = XFRM_KM_TIMEOUT;
  177. }
  178. if (tmo < next)
  179. next = tmo;
  180. }
  181. if (xp->lft.soft_use_expires_seconds) {
  182. long tmo = xp->lft.soft_use_expires_seconds +
  183. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  184. if (tmo <= 0) {
  185. warn = 1;
  186. tmo = XFRM_KM_TIMEOUT;
  187. }
  188. if (tmo < next)
  189. next = tmo;
  190. }
  191. if (warn)
  192. km_policy_expired(xp, dir, 0, 0);
  193. if (next != LONG_MAX &&
  194. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  195. xfrm_pol_hold(xp);
  196. out:
  197. read_unlock(&xp->lock);
  198. xfrm_pol_put(xp);
  199. return;
  200. expired:
  201. read_unlock(&xp->lock);
  202. if (!xfrm_policy_delete(xp, dir))
  203. km_policy_expired(xp, dir, 1, 0);
  204. xfrm_pol_put(xp);
  205. }
  206. static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
  207. {
  208. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  209. if (unlikely(pol->walk.dead))
  210. flo = NULL;
  211. else
  212. xfrm_pol_hold(pol);
  213. return flo;
  214. }
  215. static int xfrm_policy_flo_check(struct flow_cache_object *flo)
  216. {
  217. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  218. return !pol->walk.dead;
  219. }
  220. static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
  221. {
  222. xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
  223. }
  224. static const struct flow_cache_ops xfrm_policy_fc_ops = {
  225. .get = xfrm_policy_flo_get,
  226. .check = xfrm_policy_flo_check,
  227. .delete = xfrm_policy_flo_delete,
  228. };
  229. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  230. * SPD calls.
  231. */
  232. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  233. {
  234. struct xfrm_policy *policy;
  235. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  236. if (policy) {
  237. write_pnet(&policy->xp_net, net);
  238. INIT_LIST_HEAD(&policy->walk.all);
  239. INIT_HLIST_NODE(&policy->bydst);
  240. INIT_HLIST_NODE(&policy->byidx);
  241. rwlock_init(&policy->lock);
  242. atomic_set(&policy->refcnt, 1);
  243. setup_timer(&policy->timer, xfrm_policy_timer,
  244. (unsigned long)policy);
  245. policy->flo.ops = &xfrm_policy_fc_ops;
  246. }
  247. return policy;
  248. }
  249. EXPORT_SYMBOL(xfrm_policy_alloc);
  250. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  251. void xfrm_policy_destroy(struct xfrm_policy *policy)
  252. {
  253. BUG_ON(!policy->walk.dead);
  254. if (del_timer(&policy->timer))
  255. BUG();
  256. security_xfrm_policy_free(policy->security);
  257. kfree(policy);
  258. }
  259. EXPORT_SYMBOL(xfrm_policy_destroy);
  260. /* Rule must be locked. Release descentant resources, announce
  261. * entry dead. The rule must be unlinked from lists to the moment.
  262. */
  263. static void xfrm_policy_kill(struct xfrm_policy *policy)
  264. {
  265. policy->walk.dead = 1;
  266. atomic_inc(&policy->genid);
  267. if (del_timer(&policy->timer))
  268. xfrm_pol_put(policy);
  269. xfrm_pol_put(policy);
  270. }
  271. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  272. static inline unsigned int idx_hash(struct net *net, u32 index)
  273. {
  274. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  275. }
  276. static struct hlist_head *policy_hash_bysel(struct net *net,
  277. const struct xfrm_selector *sel,
  278. unsigned short family, int dir)
  279. {
  280. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  281. unsigned int hash = __sel_hash(sel, family, hmask);
  282. return (hash == hmask + 1 ?
  283. &net->xfrm.policy_inexact[dir] :
  284. net->xfrm.policy_bydst[dir].table + hash);
  285. }
  286. static struct hlist_head *policy_hash_direct(struct net *net,
  287. const xfrm_address_t *daddr,
  288. const xfrm_address_t *saddr,
  289. unsigned short family, int dir)
  290. {
  291. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  292. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  293. return net->xfrm.policy_bydst[dir].table + hash;
  294. }
  295. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  296. struct hlist_head *ndsttable,
  297. unsigned int nhashmask)
  298. {
  299. struct hlist_node *entry, *tmp, *entry0 = NULL;
  300. struct xfrm_policy *pol;
  301. unsigned int h0 = 0;
  302. redo:
  303. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  304. unsigned int h;
  305. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  306. pol->family, nhashmask);
  307. if (!entry0) {
  308. hlist_del(entry);
  309. hlist_add_head(&pol->bydst, ndsttable+h);
  310. h0 = h;
  311. } else {
  312. if (h != h0)
  313. continue;
  314. hlist_del(entry);
  315. hlist_add_after(entry0, &pol->bydst);
  316. }
  317. entry0 = entry;
  318. }
  319. if (!hlist_empty(list)) {
  320. entry0 = NULL;
  321. goto redo;
  322. }
  323. }
  324. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  325. struct hlist_head *nidxtable,
  326. unsigned int nhashmask)
  327. {
  328. struct hlist_node *entry, *tmp;
  329. struct xfrm_policy *pol;
  330. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  331. unsigned int h;
  332. h = __idx_hash(pol->index, nhashmask);
  333. hlist_add_head(&pol->byidx, nidxtable+h);
  334. }
  335. }
  336. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  337. {
  338. return ((old_hmask + 1) << 1) - 1;
  339. }
  340. static void xfrm_bydst_resize(struct net *net, int dir)
  341. {
  342. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  343. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  344. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  345. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  346. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  347. int i;
  348. if (!ndst)
  349. return;
  350. write_lock_bh(&xfrm_policy_lock);
  351. for (i = hmask; i >= 0; i--)
  352. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  353. net->xfrm.policy_bydst[dir].table = ndst;
  354. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  355. write_unlock_bh(&xfrm_policy_lock);
  356. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  357. }
  358. static void xfrm_byidx_resize(struct net *net, int total)
  359. {
  360. unsigned int hmask = net->xfrm.policy_idx_hmask;
  361. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  362. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  363. struct hlist_head *oidx = net->xfrm.policy_byidx;
  364. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  365. int i;
  366. if (!nidx)
  367. return;
  368. write_lock_bh(&xfrm_policy_lock);
  369. for (i = hmask; i >= 0; i--)
  370. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  371. net->xfrm.policy_byidx = nidx;
  372. net->xfrm.policy_idx_hmask = nhashmask;
  373. write_unlock_bh(&xfrm_policy_lock);
  374. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  375. }
  376. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  377. {
  378. unsigned int cnt = net->xfrm.policy_count[dir];
  379. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  380. if (total)
  381. *total += cnt;
  382. if ((hmask + 1) < xfrm_policy_hashmax &&
  383. cnt > hmask)
  384. return 1;
  385. return 0;
  386. }
  387. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  388. {
  389. unsigned int hmask = net->xfrm.policy_idx_hmask;
  390. if ((hmask + 1) < xfrm_policy_hashmax &&
  391. total > hmask)
  392. return 1;
  393. return 0;
  394. }
  395. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  396. {
  397. read_lock_bh(&xfrm_policy_lock);
  398. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  399. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  400. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  401. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  402. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  403. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  404. si->spdhcnt = net->xfrm.policy_idx_hmask;
  405. si->spdhmcnt = xfrm_policy_hashmax;
  406. read_unlock_bh(&xfrm_policy_lock);
  407. }
  408. EXPORT_SYMBOL(xfrm_spd_getinfo);
  409. static DEFINE_MUTEX(hash_resize_mutex);
  410. static void xfrm_hash_resize(struct work_struct *work)
  411. {
  412. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  413. int dir, total;
  414. mutex_lock(&hash_resize_mutex);
  415. total = 0;
  416. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  417. if (xfrm_bydst_should_resize(net, dir, &total))
  418. xfrm_bydst_resize(net, dir);
  419. }
  420. if (xfrm_byidx_should_resize(net, total))
  421. xfrm_byidx_resize(net, total);
  422. mutex_unlock(&hash_resize_mutex);
  423. }
  424. /* Generate new index... KAME seems to generate them ordered by cost
  425. * of an absolute inpredictability of ordering of rules. This will not pass. */
  426. static u32 xfrm_gen_index(struct net *net, int dir)
  427. {
  428. static u32 idx_generator;
  429. for (;;) {
  430. struct hlist_node *entry;
  431. struct hlist_head *list;
  432. struct xfrm_policy *p;
  433. u32 idx;
  434. int found;
  435. idx = (idx_generator | dir);
  436. idx_generator += 8;
  437. if (idx == 0)
  438. idx = 8;
  439. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  440. found = 0;
  441. hlist_for_each_entry(p, entry, list, byidx) {
  442. if (p->index == idx) {
  443. found = 1;
  444. break;
  445. }
  446. }
  447. if (!found)
  448. return idx;
  449. }
  450. }
  451. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  452. {
  453. u32 *p1 = (u32 *) s1;
  454. u32 *p2 = (u32 *) s2;
  455. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  456. int i;
  457. for (i = 0; i < len; i++) {
  458. if (p1[i] != p2[i])
  459. return 1;
  460. }
  461. return 0;
  462. }
  463. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  464. {
  465. struct net *net = xp_net(policy);
  466. struct xfrm_policy *pol;
  467. struct xfrm_policy *delpol;
  468. struct hlist_head *chain;
  469. struct hlist_node *entry, *newpos;
  470. u32 mark = policy->mark.v & policy->mark.m;
  471. write_lock_bh(&xfrm_policy_lock);
  472. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  473. delpol = NULL;
  474. newpos = NULL;
  475. hlist_for_each_entry(pol, entry, chain, bydst) {
  476. if (pol->type == policy->type &&
  477. !selector_cmp(&pol->selector, &policy->selector) &&
  478. (mark & pol->mark.m) == pol->mark.v &&
  479. xfrm_sec_ctx_match(pol->security, policy->security) &&
  480. !WARN_ON(delpol)) {
  481. if (excl) {
  482. write_unlock_bh(&xfrm_policy_lock);
  483. return -EEXIST;
  484. }
  485. delpol = pol;
  486. if (policy->priority > pol->priority)
  487. continue;
  488. } else if (policy->priority >= pol->priority) {
  489. newpos = &pol->bydst;
  490. continue;
  491. }
  492. if (delpol)
  493. break;
  494. }
  495. if (newpos)
  496. hlist_add_after(newpos, &policy->bydst);
  497. else
  498. hlist_add_head(&policy->bydst, chain);
  499. xfrm_pol_hold(policy);
  500. net->xfrm.policy_count[dir]++;
  501. atomic_inc(&flow_cache_genid);
  502. rt_genid_bump(net);
  503. if (delpol)
  504. __xfrm_policy_unlink(delpol, dir);
  505. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
  506. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  507. policy->curlft.add_time = get_seconds();
  508. policy->curlft.use_time = 0;
  509. if (!mod_timer(&policy->timer, jiffies + HZ))
  510. xfrm_pol_hold(policy);
  511. list_add(&policy->walk.all, &net->xfrm.policy_all);
  512. write_unlock_bh(&xfrm_policy_lock);
  513. if (delpol)
  514. xfrm_policy_kill(delpol);
  515. else if (xfrm_bydst_should_resize(net, dir, NULL))
  516. schedule_work(&net->xfrm.policy_hash_work);
  517. return 0;
  518. }
  519. EXPORT_SYMBOL(xfrm_policy_insert);
  520. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
  521. int dir, struct xfrm_selector *sel,
  522. struct xfrm_sec_ctx *ctx, int delete,
  523. int *err)
  524. {
  525. struct xfrm_policy *pol, *ret;
  526. struct hlist_head *chain;
  527. struct hlist_node *entry;
  528. *err = 0;
  529. write_lock_bh(&xfrm_policy_lock);
  530. chain = policy_hash_bysel(net, sel, sel->family, dir);
  531. ret = NULL;
  532. hlist_for_each_entry(pol, entry, chain, bydst) {
  533. if (pol->type == type &&
  534. (mark & pol->mark.m) == pol->mark.v &&
  535. !selector_cmp(sel, &pol->selector) &&
  536. xfrm_sec_ctx_match(ctx, pol->security)) {
  537. xfrm_pol_hold(pol);
  538. if (delete) {
  539. *err = security_xfrm_policy_delete(
  540. pol->security);
  541. if (*err) {
  542. write_unlock_bh(&xfrm_policy_lock);
  543. return pol;
  544. }
  545. __xfrm_policy_unlink(pol, dir);
  546. }
  547. ret = pol;
  548. break;
  549. }
  550. }
  551. write_unlock_bh(&xfrm_policy_lock);
  552. if (ret && delete)
  553. xfrm_policy_kill(ret);
  554. return ret;
  555. }
  556. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  557. struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
  558. int dir, u32 id, int delete, int *err)
  559. {
  560. struct xfrm_policy *pol, *ret;
  561. struct hlist_head *chain;
  562. struct hlist_node *entry;
  563. *err = -ENOENT;
  564. if (xfrm_policy_id2dir(id) != dir)
  565. return NULL;
  566. *err = 0;
  567. write_lock_bh(&xfrm_policy_lock);
  568. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  569. ret = NULL;
  570. hlist_for_each_entry(pol, entry, chain, byidx) {
  571. if (pol->type == type && pol->index == id &&
  572. (mark & pol->mark.m) == pol->mark.v) {
  573. xfrm_pol_hold(pol);
  574. if (delete) {
  575. *err = security_xfrm_policy_delete(
  576. pol->security);
  577. if (*err) {
  578. write_unlock_bh(&xfrm_policy_lock);
  579. return pol;
  580. }
  581. __xfrm_policy_unlink(pol, dir);
  582. }
  583. ret = pol;
  584. break;
  585. }
  586. }
  587. write_unlock_bh(&xfrm_policy_lock);
  588. if (ret && delete)
  589. xfrm_policy_kill(ret);
  590. return ret;
  591. }
  592. EXPORT_SYMBOL(xfrm_policy_byid);
  593. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  594. static inline int
  595. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  596. {
  597. int dir, err = 0;
  598. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  599. struct xfrm_policy *pol;
  600. struct hlist_node *entry;
  601. int i;
  602. hlist_for_each_entry(pol, entry,
  603. &net->xfrm.policy_inexact[dir], bydst) {
  604. if (pol->type != type)
  605. continue;
  606. err = security_xfrm_policy_delete(pol->security);
  607. if (err) {
  608. xfrm_audit_policy_delete(pol, 0,
  609. audit_info->loginuid,
  610. audit_info->sessionid,
  611. audit_info->secid);
  612. return err;
  613. }
  614. }
  615. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  616. hlist_for_each_entry(pol, entry,
  617. net->xfrm.policy_bydst[dir].table + i,
  618. bydst) {
  619. if (pol->type != type)
  620. continue;
  621. err = security_xfrm_policy_delete(
  622. pol->security);
  623. if (err) {
  624. xfrm_audit_policy_delete(pol, 0,
  625. audit_info->loginuid,
  626. audit_info->sessionid,
  627. audit_info->secid);
  628. return err;
  629. }
  630. }
  631. }
  632. }
  633. return err;
  634. }
  635. #else
  636. static inline int
  637. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  638. {
  639. return 0;
  640. }
  641. #endif
  642. int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
  643. {
  644. int dir, err = 0, cnt = 0;
  645. write_lock_bh(&xfrm_policy_lock);
  646. err = xfrm_policy_flush_secctx_check(net, type, audit_info);
  647. if (err)
  648. goto out;
  649. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  650. struct xfrm_policy *pol;
  651. struct hlist_node *entry;
  652. int i;
  653. again1:
  654. hlist_for_each_entry(pol, entry,
  655. &net->xfrm.policy_inexact[dir], bydst) {
  656. if (pol->type != type)
  657. continue;
  658. __xfrm_policy_unlink(pol, dir);
  659. write_unlock_bh(&xfrm_policy_lock);
  660. cnt++;
  661. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  662. audit_info->sessionid,
  663. audit_info->secid);
  664. xfrm_policy_kill(pol);
  665. write_lock_bh(&xfrm_policy_lock);
  666. goto again1;
  667. }
  668. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  669. again2:
  670. hlist_for_each_entry(pol, entry,
  671. net->xfrm.policy_bydst[dir].table + i,
  672. bydst) {
  673. if (pol->type != type)
  674. continue;
  675. __xfrm_policy_unlink(pol, dir);
  676. write_unlock_bh(&xfrm_policy_lock);
  677. cnt++;
  678. xfrm_audit_policy_delete(pol, 1,
  679. audit_info->loginuid,
  680. audit_info->sessionid,
  681. audit_info->secid);
  682. xfrm_policy_kill(pol);
  683. write_lock_bh(&xfrm_policy_lock);
  684. goto again2;
  685. }
  686. }
  687. }
  688. if (!cnt)
  689. err = -ESRCH;
  690. out:
  691. write_unlock_bh(&xfrm_policy_lock);
  692. return err;
  693. }
  694. EXPORT_SYMBOL(xfrm_policy_flush);
  695. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  696. int (*func)(struct xfrm_policy *, int, int, void*),
  697. void *data)
  698. {
  699. struct xfrm_policy *pol;
  700. struct xfrm_policy_walk_entry *x;
  701. int error = 0;
  702. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  703. walk->type != XFRM_POLICY_TYPE_ANY)
  704. return -EINVAL;
  705. if (list_empty(&walk->walk.all) && walk->seq != 0)
  706. return 0;
  707. write_lock_bh(&xfrm_policy_lock);
  708. if (list_empty(&walk->walk.all))
  709. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  710. else
  711. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  712. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  713. if (x->dead)
  714. continue;
  715. pol = container_of(x, struct xfrm_policy, walk);
  716. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  717. walk->type != pol->type)
  718. continue;
  719. error = func(pol, xfrm_policy_id2dir(pol->index),
  720. walk->seq, data);
  721. if (error) {
  722. list_move_tail(&walk->walk.all, &x->all);
  723. goto out;
  724. }
  725. walk->seq++;
  726. }
  727. if (walk->seq == 0) {
  728. error = -ENOENT;
  729. goto out;
  730. }
  731. list_del_init(&walk->walk.all);
  732. out:
  733. write_unlock_bh(&xfrm_policy_lock);
  734. return error;
  735. }
  736. EXPORT_SYMBOL(xfrm_policy_walk);
  737. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  738. {
  739. INIT_LIST_HEAD(&walk->walk.all);
  740. walk->walk.dead = 1;
  741. walk->type = type;
  742. walk->seq = 0;
  743. }
  744. EXPORT_SYMBOL(xfrm_policy_walk_init);
  745. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  746. {
  747. if (list_empty(&walk->walk.all))
  748. return;
  749. write_lock_bh(&xfrm_policy_lock);
  750. list_del(&walk->walk.all);
  751. write_unlock_bh(&xfrm_policy_lock);
  752. }
  753. EXPORT_SYMBOL(xfrm_policy_walk_done);
  754. /*
  755. * Find policy to apply to this flow.
  756. *
  757. * Returns 0 if policy found, else an -errno.
  758. */
  759. static int xfrm_policy_match(const struct xfrm_policy *pol,
  760. const struct flowi *fl,
  761. u8 type, u16 family, int dir)
  762. {
  763. const struct xfrm_selector *sel = &pol->selector;
  764. int ret = -ESRCH;
  765. bool match;
  766. if (pol->family != family ||
  767. (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
  768. pol->type != type)
  769. return ret;
  770. match = xfrm_selector_match(sel, fl, family);
  771. if (match)
  772. ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
  773. dir);
  774. return ret;
  775. }
  776. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  777. const struct flowi *fl,
  778. u16 family, u8 dir)
  779. {
  780. int err;
  781. struct xfrm_policy *pol, *ret;
  782. const xfrm_address_t *daddr, *saddr;
  783. struct hlist_node *entry;
  784. struct hlist_head *chain;
  785. u32 priority = ~0U;
  786. daddr = xfrm_flowi_daddr(fl, family);
  787. saddr = xfrm_flowi_saddr(fl, family);
  788. if (unlikely(!daddr || !saddr))
  789. return NULL;
  790. read_lock_bh(&xfrm_policy_lock);
  791. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  792. ret = NULL;
  793. hlist_for_each_entry(pol, entry, chain, bydst) {
  794. err = xfrm_policy_match(pol, fl, type, family, dir);
  795. if (err) {
  796. if (err == -ESRCH)
  797. continue;
  798. else {
  799. ret = ERR_PTR(err);
  800. goto fail;
  801. }
  802. } else {
  803. ret = pol;
  804. priority = ret->priority;
  805. break;
  806. }
  807. }
  808. chain = &net->xfrm.policy_inexact[dir];
  809. hlist_for_each_entry(pol, entry, chain, bydst) {
  810. err = xfrm_policy_match(pol, fl, type, family, dir);
  811. if (err) {
  812. if (err == -ESRCH)
  813. continue;
  814. else {
  815. ret = ERR_PTR(err);
  816. goto fail;
  817. }
  818. } else if (pol->priority < priority) {
  819. ret = pol;
  820. break;
  821. }
  822. }
  823. if (ret)
  824. xfrm_pol_hold(ret);
  825. fail:
  826. read_unlock_bh(&xfrm_policy_lock);
  827. return ret;
  828. }
  829. static struct xfrm_policy *
  830. __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
  831. {
  832. #ifdef CONFIG_XFRM_SUB_POLICY
  833. struct xfrm_policy *pol;
  834. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  835. if (pol != NULL)
  836. return pol;
  837. #endif
  838. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  839. }
  840. static struct flow_cache_object *
  841. xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
  842. u8 dir, struct flow_cache_object *old_obj, void *ctx)
  843. {
  844. struct xfrm_policy *pol;
  845. if (old_obj)
  846. xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
  847. pol = __xfrm_policy_lookup(net, fl, family, dir);
  848. if (IS_ERR_OR_NULL(pol))
  849. return ERR_CAST(pol);
  850. /* Resolver returns two references:
  851. * one for cache and one for caller of flow_cache_lookup() */
  852. xfrm_pol_hold(pol);
  853. return &pol->flo;
  854. }
  855. static inline int policy_to_flow_dir(int dir)
  856. {
  857. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  858. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  859. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  860. return dir;
  861. switch (dir) {
  862. default:
  863. case XFRM_POLICY_IN:
  864. return FLOW_DIR_IN;
  865. case XFRM_POLICY_OUT:
  866. return FLOW_DIR_OUT;
  867. case XFRM_POLICY_FWD:
  868. return FLOW_DIR_FWD;
  869. }
  870. }
  871. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
  872. const struct flowi *fl)
  873. {
  874. struct xfrm_policy *pol;
  875. read_lock_bh(&xfrm_policy_lock);
  876. if ((pol = sk->sk_policy[dir]) != NULL) {
  877. bool match = xfrm_selector_match(&pol->selector, fl,
  878. sk->sk_family);
  879. int err = 0;
  880. if (match) {
  881. if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
  882. pol = NULL;
  883. goto out;
  884. }
  885. err = security_xfrm_policy_lookup(pol->security,
  886. fl->flowi_secid,
  887. policy_to_flow_dir(dir));
  888. if (!err)
  889. xfrm_pol_hold(pol);
  890. else if (err == -ESRCH)
  891. pol = NULL;
  892. else
  893. pol = ERR_PTR(err);
  894. } else
  895. pol = NULL;
  896. }
  897. out:
  898. read_unlock_bh(&xfrm_policy_lock);
  899. return pol;
  900. }
  901. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  902. {
  903. struct net *net = xp_net(pol);
  904. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  905. pol->family, dir);
  906. list_add(&pol->walk.all, &net->xfrm.policy_all);
  907. hlist_add_head(&pol->bydst, chain);
  908. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  909. net->xfrm.policy_count[dir]++;
  910. xfrm_pol_hold(pol);
  911. if (xfrm_bydst_should_resize(net, dir, NULL))
  912. schedule_work(&net->xfrm.policy_hash_work);
  913. }
  914. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  915. int dir)
  916. {
  917. struct net *net = xp_net(pol);
  918. if (hlist_unhashed(&pol->bydst))
  919. return NULL;
  920. hlist_del(&pol->bydst);
  921. hlist_del(&pol->byidx);
  922. list_del(&pol->walk.all);
  923. net->xfrm.policy_count[dir]--;
  924. return pol;
  925. }
  926. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  927. {
  928. write_lock_bh(&xfrm_policy_lock);
  929. pol = __xfrm_policy_unlink(pol, dir);
  930. write_unlock_bh(&xfrm_policy_lock);
  931. if (pol) {
  932. xfrm_policy_kill(pol);
  933. return 0;
  934. }
  935. return -ENOENT;
  936. }
  937. EXPORT_SYMBOL(xfrm_policy_delete);
  938. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  939. {
  940. struct net *net = xp_net(pol);
  941. struct xfrm_policy *old_pol;
  942. #ifdef CONFIG_XFRM_SUB_POLICY
  943. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  944. return -EINVAL;
  945. #endif
  946. write_lock_bh(&xfrm_policy_lock);
  947. old_pol = sk->sk_policy[dir];
  948. sk->sk_policy[dir] = pol;
  949. if (pol) {
  950. pol->curlft.add_time = get_seconds();
  951. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
  952. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  953. }
  954. if (old_pol)
  955. /* Unlinking succeeds always. This is the only function
  956. * allowed to delete or replace socket policy.
  957. */
  958. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  959. write_unlock_bh(&xfrm_policy_lock);
  960. if (old_pol) {
  961. xfrm_policy_kill(old_pol);
  962. }
  963. return 0;
  964. }
  965. static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
  966. {
  967. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  968. if (newp) {
  969. newp->selector = old->selector;
  970. if (security_xfrm_policy_clone(old->security,
  971. &newp->security)) {
  972. kfree(newp);
  973. return NULL; /* ENOMEM */
  974. }
  975. newp->lft = old->lft;
  976. newp->curlft = old->curlft;
  977. newp->mark = old->mark;
  978. newp->action = old->action;
  979. newp->flags = old->flags;
  980. newp->xfrm_nr = old->xfrm_nr;
  981. newp->index = old->index;
  982. newp->type = old->type;
  983. memcpy(newp->xfrm_vec, old->xfrm_vec,
  984. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  985. write_lock_bh(&xfrm_policy_lock);
  986. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  987. write_unlock_bh(&xfrm_policy_lock);
  988. xfrm_pol_put(newp);
  989. }
  990. return newp;
  991. }
  992. int __xfrm_sk_clone_policy(struct sock *sk)
  993. {
  994. struct xfrm_policy *p0 = sk->sk_policy[0],
  995. *p1 = sk->sk_policy[1];
  996. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  997. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  998. return -ENOMEM;
  999. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1000. return -ENOMEM;
  1001. return 0;
  1002. }
  1003. static int
  1004. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  1005. unsigned short family)
  1006. {
  1007. int err;
  1008. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1009. if (unlikely(afinfo == NULL))
  1010. return -EINVAL;
  1011. err = afinfo->get_saddr(net, local, remote);
  1012. xfrm_policy_put_afinfo(afinfo);
  1013. return err;
  1014. }
  1015. /* Resolve list of templates for the flow, given policy. */
  1016. static int
  1017. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
  1018. struct xfrm_state **xfrm, unsigned short family)
  1019. {
  1020. struct net *net = xp_net(policy);
  1021. int nx;
  1022. int i, error;
  1023. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1024. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1025. xfrm_address_t tmp;
  1026. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1027. struct xfrm_state *x;
  1028. xfrm_address_t *remote = daddr;
  1029. xfrm_address_t *local = saddr;
  1030. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1031. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1032. tmpl->mode == XFRM_MODE_BEET) {
  1033. remote = &tmpl->id.daddr;
  1034. local = &tmpl->saddr;
  1035. if (xfrm_addr_any(local, tmpl->encap_family)) {
  1036. error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
  1037. if (error)
  1038. goto fail;
  1039. local = &tmp;
  1040. }
  1041. }
  1042. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1043. if (x && x->km.state == XFRM_STATE_VALID) {
  1044. xfrm[nx++] = x;
  1045. daddr = remote;
  1046. saddr = local;
  1047. continue;
  1048. }
  1049. if (x) {
  1050. error = (x->km.state == XFRM_STATE_ERROR ?
  1051. -EINVAL : -EAGAIN);
  1052. xfrm_state_put(x);
  1053. }
  1054. else if (error == -ESRCH)
  1055. error = -EAGAIN;
  1056. if (!tmpl->optional)
  1057. goto fail;
  1058. }
  1059. return nx;
  1060. fail:
  1061. for (nx--; nx>=0; nx--)
  1062. xfrm_state_put(xfrm[nx]);
  1063. return error;
  1064. }
  1065. static int
  1066. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
  1067. struct xfrm_state **xfrm, unsigned short family)
  1068. {
  1069. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1070. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1071. int cnx = 0;
  1072. int error;
  1073. int ret;
  1074. int i;
  1075. for (i = 0; i < npols; i++) {
  1076. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1077. error = -ENOBUFS;
  1078. goto fail;
  1079. }
  1080. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1081. if (ret < 0) {
  1082. error = ret;
  1083. goto fail;
  1084. } else
  1085. cnx += ret;
  1086. }
  1087. /* found states are sorted for outbound processing */
  1088. if (npols > 1)
  1089. xfrm_state_sort(xfrm, tpp, cnx, family);
  1090. return cnx;
  1091. fail:
  1092. for (cnx--; cnx>=0; cnx--)
  1093. xfrm_state_put(tpp[cnx]);
  1094. return error;
  1095. }
  1096. /* Check that the bundle accepts the flow and its components are
  1097. * still valid.
  1098. */
  1099. static inline int xfrm_get_tos(const struct flowi *fl, int family)
  1100. {
  1101. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1102. int tos;
  1103. if (!afinfo)
  1104. return -EINVAL;
  1105. tos = afinfo->get_tos(fl);
  1106. xfrm_policy_put_afinfo(afinfo);
  1107. return tos;
  1108. }
  1109. static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
  1110. {
  1111. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1112. struct dst_entry *dst = &xdst->u.dst;
  1113. if (xdst->route == NULL) {
  1114. /* Dummy bundle - if it has xfrms we were not
  1115. * able to build bundle as template resolution failed.
  1116. * It means we need to try again resolving. */
  1117. if (xdst->num_xfrms > 0)
  1118. return NULL;
  1119. } else {
  1120. /* Real bundle */
  1121. if (stale_bundle(dst))
  1122. return NULL;
  1123. }
  1124. dst_hold(dst);
  1125. return flo;
  1126. }
  1127. static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
  1128. {
  1129. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1130. struct dst_entry *dst = &xdst->u.dst;
  1131. if (!xdst->route)
  1132. return 0;
  1133. if (stale_bundle(dst))
  1134. return 0;
  1135. return 1;
  1136. }
  1137. static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
  1138. {
  1139. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1140. struct dst_entry *dst = &xdst->u.dst;
  1141. dst_free(dst);
  1142. }
  1143. static const struct flow_cache_ops xfrm_bundle_fc_ops = {
  1144. .get = xfrm_bundle_flo_get,
  1145. .check = xfrm_bundle_flo_check,
  1146. .delete = xfrm_bundle_flo_delete,
  1147. };
  1148. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  1149. {
  1150. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1151. struct dst_ops *dst_ops;
  1152. struct xfrm_dst *xdst;
  1153. if (!afinfo)
  1154. return ERR_PTR(-EINVAL);
  1155. switch (family) {
  1156. case AF_INET:
  1157. dst_ops = &net->xfrm.xfrm4_dst_ops;
  1158. break;
  1159. #if IS_ENABLED(CONFIG_IPV6)
  1160. case AF_INET6:
  1161. dst_ops = &net->xfrm.xfrm6_dst_ops;
  1162. break;
  1163. #endif
  1164. default:
  1165. BUG();
  1166. }
  1167. xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
  1168. if (likely(xdst)) {
  1169. struct dst_entry *dst = &xdst->u.dst;
  1170. memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
  1171. xdst->flo.ops = &xfrm_bundle_fc_ops;
  1172. if (afinfo->init_dst)
  1173. afinfo->init_dst(net, xdst);
  1174. } else
  1175. xdst = ERR_PTR(-ENOBUFS);
  1176. xfrm_policy_put_afinfo(afinfo);
  1177. return xdst;
  1178. }
  1179. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1180. int nfheader_len)
  1181. {
  1182. struct xfrm_policy_afinfo *afinfo =
  1183. xfrm_policy_get_afinfo(dst->ops->family);
  1184. int err;
  1185. if (!afinfo)
  1186. return -EINVAL;
  1187. err = afinfo->init_path(path, dst, nfheader_len);
  1188. xfrm_policy_put_afinfo(afinfo);
  1189. return err;
  1190. }
  1191. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  1192. const struct flowi *fl)
  1193. {
  1194. struct xfrm_policy_afinfo *afinfo =
  1195. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1196. int err;
  1197. if (!afinfo)
  1198. return -EINVAL;
  1199. err = afinfo->fill_dst(xdst, dev, fl);
  1200. xfrm_policy_put_afinfo(afinfo);
  1201. return err;
  1202. }
  1203. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1204. * all the metrics... Shortly, bundle a bundle.
  1205. */
  1206. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1207. struct xfrm_state **xfrm, int nx,
  1208. const struct flowi *fl,
  1209. struct dst_entry *dst)
  1210. {
  1211. struct net *net = xp_net(policy);
  1212. unsigned long now = jiffies;
  1213. struct net_device *dev;
  1214. struct xfrm_mode *inner_mode;
  1215. struct dst_entry *dst_prev = NULL;
  1216. struct dst_entry *dst0 = NULL;
  1217. int i = 0;
  1218. int err;
  1219. int header_len = 0;
  1220. int nfheader_len = 0;
  1221. int trailer_len = 0;
  1222. int tos;
  1223. int family = policy->selector.family;
  1224. xfrm_address_t saddr, daddr;
  1225. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1226. tos = xfrm_get_tos(fl, family);
  1227. err = tos;
  1228. if (tos < 0)
  1229. goto put_states;
  1230. dst_hold(dst);
  1231. for (; i < nx; i++) {
  1232. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  1233. struct dst_entry *dst1 = &xdst->u.dst;
  1234. err = PTR_ERR(xdst);
  1235. if (IS_ERR(xdst)) {
  1236. dst_release(dst);
  1237. goto put_states;
  1238. }
  1239. if (xfrm[i]->sel.family == AF_UNSPEC) {
  1240. inner_mode = xfrm_ip2inner_mode(xfrm[i],
  1241. xfrm_af2proto(family));
  1242. if (!inner_mode) {
  1243. err = -EAFNOSUPPORT;
  1244. dst_release(dst);
  1245. goto put_states;
  1246. }
  1247. } else
  1248. inner_mode = xfrm[i]->inner_mode;
  1249. if (!dst_prev)
  1250. dst0 = dst1;
  1251. else {
  1252. dst_prev->child = dst_clone(dst1);
  1253. dst1->flags |= DST_NOHASH;
  1254. }
  1255. xdst->route = dst;
  1256. dst_copy_metrics(dst1, dst);
  1257. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1258. family = xfrm[i]->props.family;
  1259. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1260. family);
  1261. err = PTR_ERR(dst);
  1262. if (IS_ERR(dst))
  1263. goto put_states;
  1264. } else
  1265. dst_hold(dst);
  1266. dst1->xfrm = xfrm[i];
  1267. xdst->xfrm_genid = xfrm[i]->genid;
  1268. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  1269. dst1->flags |= DST_HOST;
  1270. dst1->lastuse = now;
  1271. dst1->input = dst_discard;
  1272. dst1->output = inner_mode->afinfo->output;
  1273. dst1->next = dst_prev;
  1274. dst_prev = dst1;
  1275. header_len += xfrm[i]->props.header_len;
  1276. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1277. nfheader_len += xfrm[i]->props.header_len;
  1278. trailer_len += xfrm[i]->props.trailer_len;
  1279. }
  1280. dst_prev->child = dst;
  1281. dst0->path = dst;
  1282. err = -ENODEV;
  1283. dev = dst->dev;
  1284. if (!dev)
  1285. goto free_dst;
  1286. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1287. xfrm_init_pmtu(dst_prev);
  1288. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1289. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1290. err = xfrm_fill_dst(xdst, dev, fl);
  1291. if (err)
  1292. goto free_dst;
  1293. dst_prev->header_len = header_len;
  1294. dst_prev->trailer_len = trailer_len;
  1295. header_len -= xdst->u.dst.xfrm->props.header_len;
  1296. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1297. }
  1298. out:
  1299. return dst0;
  1300. put_states:
  1301. for (; i < nx; i++)
  1302. xfrm_state_put(xfrm[i]);
  1303. free_dst:
  1304. if (dst0)
  1305. dst_free(dst0);
  1306. dst0 = ERR_PTR(err);
  1307. goto out;
  1308. }
  1309. static int inline
  1310. xfrm_dst_alloc_copy(void **target, const void *src, int size)
  1311. {
  1312. if (!*target) {
  1313. *target = kmalloc(size, GFP_ATOMIC);
  1314. if (!*target)
  1315. return -ENOMEM;
  1316. }
  1317. memcpy(*target, src, size);
  1318. return 0;
  1319. }
  1320. static int inline
  1321. xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
  1322. {
  1323. #ifdef CONFIG_XFRM_SUB_POLICY
  1324. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1325. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1326. sel, sizeof(*sel));
  1327. #else
  1328. return 0;
  1329. #endif
  1330. }
  1331. static int inline
  1332. xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
  1333. {
  1334. #ifdef CONFIG_XFRM_SUB_POLICY
  1335. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1336. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1337. #else
  1338. return 0;
  1339. #endif
  1340. }
  1341. static int xfrm_expand_policies(const struct flowi *fl, u16 family,
  1342. struct xfrm_policy **pols,
  1343. int *num_pols, int *num_xfrms)
  1344. {
  1345. int i;
  1346. if (*num_pols == 0 || !pols[0]) {
  1347. *num_pols = 0;
  1348. *num_xfrms = 0;
  1349. return 0;
  1350. }
  1351. if (IS_ERR(pols[0]))
  1352. return PTR_ERR(pols[0]);
  1353. *num_xfrms = pols[0]->xfrm_nr;
  1354. #ifdef CONFIG_XFRM_SUB_POLICY
  1355. if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
  1356. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1357. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  1358. XFRM_POLICY_TYPE_MAIN,
  1359. fl, family,
  1360. XFRM_POLICY_OUT);
  1361. if (pols[1]) {
  1362. if (IS_ERR(pols[1])) {
  1363. xfrm_pols_put(pols, *num_pols);
  1364. return PTR_ERR(pols[1]);
  1365. }
  1366. (*num_pols) ++;
  1367. (*num_xfrms) += pols[1]->xfrm_nr;
  1368. }
  1369. }
  1370. #endif
  1371. for (i = 0; i < *num_pols; i++) {
  1372. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  1373. *num_xfrms = -1;
  1374. break;
  1375. }
  1376. }
  1377. return 0;
  1378. }
  1379. static struct xfrm_dst *
  1380. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  1381. const struct flowi *fl, u16 family,
  1382. struct dst_entry *dst_orig)
  1383. {
  1384. struct net *net = xp_net(pols[0]);
  1385. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1386. struct dst_entry *dst;
  1387. struct xfrm_dst *xdst;
  1388. int err;
  1389. /* Try to instantiate a bundle */
  1390. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  1391. if (err <= 0) {
  1392. if (err != 0 && err != -EAGAIN)
  1393. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1394. return ERR_PTR(err);
  1395. }
  1396. dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
  1397. if (IS_ERR(dst)) {
  1398. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1399. return ERR_CAST(dst);
  1400. }
  1401. xdst = (struct xfrm_dst *)dst;
  1402. xdst->num_xfrms = err;
  1403. if (num_pols > 1)
  1404. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1405. else
  1406. err = xfrm_dst_update_origin(dst, fl);
  1407. if (unlikely(err)) {
  1408. dst_free(dst);
  1409. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1410. return ERR_PTR(err);
  1411. }
  1412. xdst->num_pols = num_pols;
  1413. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1414. xdst->policy_genid = atomic_read(&pols[0]->genid);
  1415. return xdst;
  1416. }
  1417. static struct flow_cache_object *
  1418. xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
  1419. struct flow_cache_object *oldflo, void *ctx)
  1420. {
  1421. struct dst_entry *dst_orig = (struct dst_entry *)ctx;
  1422. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1423. struct xfrm_dst *xdst, *new_xdst;
  1424. int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
  1425. /* Check if the policies from old bundle are usable */
  1426. xdst = NULL;
  1427. if (oldflo) {
  1428. xdst = container_of(oldflo, struct xfrm_dst, flo);
  1429. num_pols = xdst->num_pols;
  1430. num_xfrms = xdst->num_xfrms;
  1431. pol_dead = 0;
  1432. for (i = 0; i < num_pols; i++) {
  1433. pols[i] = xdst->pols[i];
  1434. pol_dead |= pols[i]->walk.dead;
  1435. }
  1436. if (pol_dead) {
  1437. dst_free(&xdst->u.dst);
  1438. xdst = NULL;
  1439. num_pols = 0;
  1440. num_xfrms = 0;
  1441. oldflo = NULL;
  1442. }
  1443. }
  1444. /* Resolve policies to use if we couldn't get them from
  1445. * previous cache entry */
  1446. if (xdst == NULL) {
  1447. num_pols = 1;
  1448. pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
  1449. err = xfrm_expand_policies(fl, family, pols,
  1450. &num_pols, &num_xfrms);
  1451. if (err < 0)
  1452. goto inc_error;
  1453. if (num_pols == 0)
  1454. return NULL;
  1455. if (num_xfrms <= 0)
  1456. goto make_dummy_bundle;
  1457. }
  1458. new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
  1459. if (IS_ERR(new_xdst)) {
  1460. err = PTR_ERR(new_xdst);
  1461. if (err != -EAGAIN)
  1462. goto error;
  1463. if (oldflo == NULL)
  1464. goto make_dummy_bundle;
  1465. dst_hold(&xdst->u.dst);
  1466. return oldflo;
  1467. } else if (new_xdst == NULL) {
  1468. num_xfrms = 0;
  1469. if (oldflo == NULL)
  1470. goto make_dummy_bundle;
  1471. xdst->num_xfrms = 0;
  1472. dst_hold(&xdst->u.dst);
  1473. return oldflo;
  1474. }
  1475. /* Kill the previous bundle */
  1476. if (xdst) {
  1477. /* The policies were stolen for newly generated bundle */
  1478. xdst->num_pols = 0;
  1479. dst_free(&xdst->u.dst);
  1480. }
  1481. /* Flow cache does not have reference, it dst_free()'s,
  1482. * but we do need to return one reference for original caller */
  1483. dst_hold(&new_xdst->u.dst);
  1484. return &new_xdst->flo;
  1485. make_dummy_bundle:
  1486. /* We found policies, but there's no bundles to instantiate:
  1487. * either because the policy blocks, has no transformations or
  1488. * we could not build template (no xfrm_states).*/
  1489. xdst = xfrm_alloc_dst(net, family);
  1490. if (IS_ERR(xdst)) {
  1491. xfrm_pols_put(pols, num_pols);
  1492. return ERR_CAST(xdst);
  1493. }
  1494. xdst->num_pols = num_pols;
  1495. xdst->num_xfrms = num_xfrms;
  1496. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1497. dst_hold(&xdst->u.dst);
  1498. return &xdst->flo;
  1499. inc_error:
  1500. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1501. error:
  1502. if (xdst != NULL)
  1503. dst_free(&xdst->u.dst);
  1504. else
  1505. xfrm_pols_put(pols, num_pols);
  1506. return ERR_PTR(err);
  1507. }
  1508. static struct dst_entry *make_blackhole(struct net *net, u16 family,
  1509. struct dst_entry *dst_orig)
  1510. {
  1511. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1512. struct dst_entry *ret;
  1513. if (!afinfo) {
  1514. dst_release(dst_orig);
  1515. return ERR_PTR(-EINVAL);
  1516. } else {
  1517. ret = afinfo->blackhole_route(net, dst_orig);
  1518. }
  1519. xfrm_policy_put_afinfo(afinfo);
  1520. return ret;
  1521. }
  1522. /* Main function: finds/creates a bundle for given flow.
  1523. *
  1524. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1525. * on interfaces with disabled IPsec.
  1526. */
  1527. struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
  1528. const struct flowi *fl,
  1529. struct sock *sk, int flags)
  1530. {
  1531. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1532. struct flow_cache_object *flo;
  1533. struct xfrm_dst *xdst;
  1534. struct dst_entry *dst, *route;
  1535. u16 family = dst_orig->ops->family;
  1536. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1537. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  1538. restart:
  1539. dst = NULL;
  1540. xdst = NULL;
  1541. route = NULL;
  1542. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1543. num_pols = 1;
  1544. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1545. err = xfrm_expand_policies(fl, family, pols,
  1546. &num_pols, &num_xfrms);
  1547. if (err < 0)
  1548. goto dropdst;
  1549. if (num_pols) {
  1550. if (num_xfrms <= 0) {
  1551. drop_pols = num_pols;
  1552. goto no_transform;
  1553. }
  1554. xdst = xfrm_resolve_and_create_bundle(
  1555. pols, num_pols, fl,
  1556. family, dst_orig);
  1557. if (IS_ERR(xdst)) {
  1558. xfrm_pols_put(pols, num_pols);
  1559. err = PTR_ERR(xdst);
  1560. goto dropdst;
  1561. } else if (xdst == NULL) {
  1562. num_xfrms = 0;
  1563. drop_pols = num_pols;
  1564. goto no_transform;
  1565. }
  1566. dst_hold(&xdst->u.dst);
  1567. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1568. xdst->u.dst.next = xfrm_policy_sk_bundles;
  1569. xfrm_policy_sk_bundles = &xdst->u.dst;
  1570. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1571. route = xdst->route;
  1572. }
  1573. }
  1574. if (xdst == NULL) {
  1575. /* To accelerate a bit... */
  1576. if ((dst_orig->flags & DST_NOXFRM) ||
  1577. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1578. goto nopol;
  1579. flo = flow_cache_lookup(net, fl, family, dir,
  1580. xfrm_bundle_lookup, dst_orig);
  1581. if (flo == NULL)
  1582. goto nopol;
  1583. if (IS_ERR(flo)) {
  1584. err = PTR_ERR(flo);
  1585. goto dropdst;
  1586. }
  1587. xdst = container_of(flo, struct xfrm_dst, flo);
  1588. num_pols = xdst->num_pols;
  1589. num_xfrms = xdst->num_xfrms;
  1590. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
  1591. route = xdst->route;
  1592. }
  1593. dst = &xdst->u.dst;
  1594. if (route == NULL && num_xfrms > 0) {
  1595. /* The only case when xfrm_bundle_lookup() returns a
  1596. * bundle with null route, is when the template could
  1597. * not be resolved. It means policies are there, but
  1598. * bundle could not be created, since we don't yet
  1599. * have the xfrm_state's. We need to wait for KM to
  1600. * negotiate new SA's or bail out with error.*/
  1601. if (net->xfrm.sysctl_larval_drop) {
  1602. /* EREMOTE tells the caller to generate
  1603. * a one-shot blackhole route. */
  1604. dst_release(dst);
  1605. xfrm_pols_put(pols, drop_pols);
  1606. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1607. return make_blackhole(net, family, dst_orig);
  1608. }
  1609. if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
  1610. DECLARE_WAITQUEUE(wait, current);
  1611. add_wait_queue(&net->xfrm.km_waitq, &wait);
  1612. set_current_state(TASK_INTERRUPTIBLE);
  1613. schedule();
  1614. set_current_state(TASK_RUNNING);
  1615. remove_wait_queue(&net->xfrm.km_waitq, &wait);
  1616. if (!signal_pending(current)) {
  1617. dst_release(dst);
  1618. goto restart;
  1619. }
  1620. err = -ERESTART;
  1621. } else
  1622. err = -EAGAIN;
  1623. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1624. goto error;
  1625. }
  1626. no_transform:
  1627. if (num_pols == 0)
  1628. goto nopol;
  1629. if ((flags & XFRM_LOOKUP_ICMP) &&
  1630. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  1631. err = -ENOENT;
  1632. goto error;
  1633. }
  1634. for (i = 0; i < num_pols; i++)
  1635. pols[i]->curlft.use_time = get_seconds();
  1636. if (num_xfrms < 0) {
  1637. /* Prohibit the flow */
  1638. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1639. err = -EPERM;
  1640. goto error;
  1641. } else if (num_xfrms > 0) {
  1642. /* Flow transformed */
  1643. dst_release(dst_orig);
  1644. } else {
  1645. /* Flow passes untransformed */
  1646. dst_release(dst);
  1647. dst = dst_orig;
  1648. }
  1649. ok:
  1650. xfrm_pols_put(pols, drop_pols);
  1651. if (dst && dst->xfrm &&
  1652. dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
  1653. dst->flags |= DST_XFRM_TUNNEL;
  1654. return dst;
  1655. nopol:
  1656. if (!(flags & XFRM_LOOKUP_ICMP)) {
  1657. dst = dst_orig;
  1658. goto ok;
  1659. }
  1660. err = -ENOENT;
  1661. error:
  1662. dst_release(dst);
  1663. dropdst:
  1664. dst_release(dst_orig);
  1665. xfrm_pols_put(pols, drop_pols);
  1666. return ERR_PTR(err);
  1667. }
  1668. EXPORT_SYMBOL(xfrm_lookup);
  1669. static inline int
  1670. xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
  1671. {
  1672. struct xfrm_state *x;
  1673. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1674. return 0;
  1675. x = skb->sp->xvec[idx];
  1676. if (!x->type->reject)
  1677. return 0;
  1678. return x->type->reject(x, skb, fl);
  1679. }
  1680. /* When skb is transformed back to its "native" form, we have to
  1681. * check policy restrictions. At the moment we make this in maximally
  1682. * stupid way. Shame on me. :-) Of course, connected sockets must
  1683. * have policy cached at them.
  1684. */
  1685. static inline int
  1686. xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  1687. unsigned short family)
  1688. {
  1689. if (xfrm_state_kern(x))
  1690. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1691. return x->id.proto == tmpl->id.proto &&
  1692. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1693. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1694. x->props.mode == tmpl->mode &&
  1695. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1696. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1697. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1698. xfrm_state_addr_cmp(tmpl, x, family));
  1699. }
  1700. /*
  1701. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1702. * because of optional transport mode, or next index of the mathced secpath
  1703. * state with the template.
  1704. * -1 is returned when no matching template is found.
  1705. * Otherwise "-2 - errored_index" is returned.
  1706. */
  1707. static inline int
  1708. xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
  1709. unsigned short family)
  1710. {
  1711. int idx = start;
  1712. if (tmpl->optional) {
  1713. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1714. return start;
  1715. } else
  1716. start = -1;
  1717. for (; idx < sp->len; idx++) {
  1718. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1719. return ++idx;
  1720. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1721. if (start == -1)
  1722. start = -2-idx;
  1723. break;
  1724. }
  1725. }
  1726. return start;
  1727. }
  1728. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1729. unsigned int family, int reverse)
  1730. {
  1731. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1732. int err;
  1733. if (unlikely(afinfo == NULL))
  1734. return -EAFNOSUPPORT;
  1735. afinfo->decode_session(skb, fl, reverse);
  1736. err = security_xfrm_decode_session(skb, &fl->flowi_secid);
  1737. xfrm_policy_put_afinfo(afinfo);
  1738. return err;
  1739. }
  1740. EXPORT_SYMBOL(__xfrm_decode_session);
  1741. static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
  1742. {
  1743. for (; k < sp->len; k++) {
  1744. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1745. *idxp = k;
  1746. return 1;
  1747. }
  1748. }
  1749. return 0;
  1750. }
  1751. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1752. unsigned short family)
  1753. {
  1754. struct net *net = dev_net(skb->dev);
  1755. struct xfrm_policy *pol;
  1756. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1757. int npols = 0;
  1758. int xfrm_nr;
  1759. int pi;
  1760. int reverse;
  1761. struct flowi fl;
  1762. u8 fl_dir;
  1763. int xerr_idx = -1;
  1764. reverse = dir & ~XFRM_POLICY_MASK;
  1765. dir &= XFRM_POLICY_MASK;
  1766. fl_dir = policy_to_flow_dir(dir);
  1767. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1768. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  1769. return 0;
  1770. }
  1771. nf_nat_decode_session(skb, &fl, family);
  1772. /* First, check used SA against their selectors. */
  1773. if (skb->sp) {
  1774. int i;
  1775. for (i=skb->sp->len-1; i>=0; i--) {
  1776. struct xfrm_state *x = skb->sp->xvec[i];
  1777. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1778. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  1779. return 0;
  1780. }
  1781. }
  1782. }
  1783. pol = NULL;
  1784. if (sk && sk->sk_policy[dir]) {
  1785. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1786. if (IS_ERR(pol)) {
  1787. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1788. return 0;
  1789. }
  1790. }
  1791. if (!pol) {
  1792. struct flow_cache_object *flo;
  1793. flo = flow_cache_lookup(net, &fl, family, fl_dir,
  1794. xfrm_policy_lookup, NULL);
  1795. if (IS_ERR_OR_NULL(flo))
  1796. pol = ERR_CAST(flo);
  1797. else
  1798. pol = container_of(flo, struct xfrm_policy, flo);
  1799. }
  1800. if (IS_ERR(pol)) {
  1801. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1802. return 0;
  1803. }
  1804. if (!pol) {
  1805. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1806. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1807. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  1808. return 0;
  1809. }
  1810. return 1;
  1811. }
  1812. pol->curlft.use_time = get_seconds();
  1813. pols[0] = pol;
  1814. npols ++;
  1815. #ifdef CONFIG_XFRM_SUB_POLICY
  1816. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1817. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1818. &fl, family,
  1819. XFRM_POLICY_IN);
  1820. if (pols[1]) {
  1821. if (IS_ERR(pols[1])) {
  1822. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1823. return 0;
  1824. }
  1825. pols[1]->curlft.use_time = get_seconds();
  1826. npols ++;
  1827. }
  1828. }
  1829. #endif
  1830. if (pol->action == XFRM_POLICY_ALLOW) {
  1831. struct sec_path *sp;
  1832. static struct sec_path dummy;
  1833. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1834. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1835. struct xfrm_tmpl **tpp = tp;
  1836. int ti = 0;
  1837. int i, k;
  1838. if ((sp = skb->sp) == NULL)
  1839. sp = &dummy;
  1840. for (pi = 0; pi < npols; pi++) {
  1841. if (pols[pi] != pol &&
  1842. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1843. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1844. goto reject;
  1845. }
  1846. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1847. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  1848. goto reject_error;
  1849. }
  1850. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1851. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1852. }
  1853. xfrm_nr = ti;
  1854. if (npols > 1) {
  1855. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1856. tpp = stp;
  1857. }
  1858. /* For each tunnel xfrm, find the first matching tmpl.
  1859. * For each tmpl before that, find corresponding xfrm.
  1860. * Order is _important_. Later we will implement
  1861. * some barriers, but at the moment barriers
  1862. * are implied between each two transformations.
  1863. */
  1864. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1865. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1866. if (k < 0) {
  1867. if (k < -1)
  1868. /* "-2 - errored_index" returned */
  1869. xerr_idx = -(2+k);
  1870. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1871. goto reject;
  1872. }
  1873. }
  1874. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1875. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1876. goto reject;
  1877. }
  1878. xfrm_pols_put(pols, npols);
  1879. return 1;
  1880. }
  1881. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1882. reject:
  1883. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1884. reject_error:
  1885. xfrm_pols_put(pols, npols);
  1886. return 0;
  1887. }
  1888. EXPORT_SYMBOL(__xfrm_policy_check);
  1889. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1890. {
  1891. struct net *net = dev_net(skb->dev);
  1892. struct flowi fl;
  1893. struct dst_entry *dst;
  1894. int res = 1;
  1895. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1896. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  1897. return 0;
  1898. }
  1899. skb_dst_force(skb);
  1900. dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
  1901. if (IS_ERR(dst)) {
  1902. res = 0;
  1903. dst = NULL;
  1904. }
  1905. skb_dst_set(skb, dst);
  1906. return res;
  1907. }
  1908. EXPORT_SYMBOL(__xfrm_route_forward);
  1909. /* Optimize later using cookies and generation ids. */
  1910. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1911. {
  1912. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1913. * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
  1914. * get validated by dst_ops->check on every use. We do this
  1915. * because when a normal route referenced by an XFRM dst is
  1916. * obsoleted we do not go looking around for all parent
  1917. * referencing XFRM dsts so that we can invalidate them. It
  1918. * is just too much work. Instead we make the checks here on
  1919. * every use. For example:
  1920. *
  1921. * XFRM dst A --> IPv4 dst X
  1922. *
  1923. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1924. * in this example). If X is marked obsolete, "A" will not
  1925. * notice. That's what we are validating here via the
  1926. * stale_bundle() check.
  1927. *
  1928. * When a policy's bundle is pruned, we dst_free() the XFRM
  1929. * dst which causes it's ->obsolete field to be set to
  1930. * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
  1931. * this, we want to force a new route lookup.
  1932. */
  1933. if (dst->obsolete < 0 && !stale_bundle(dst))
  1934. return dst;
  1935. return NULL;
  1936. }
  1937. static int stale_bundle(struct dst_entry *dst)
  1938. {
  1939. return !xfrm_bundle_ok((struct xfrm_dst *)dst);
  1940. }
  1941. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1942. {
  1943. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1944. dst->dev = dev_net(dev)->loopback_dev;
  1945. dev_hold(dst->dev);
  1946. dev_put(dev);
  1947. }
  1948. }
  1949. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1950. static void xfrm_link_failure(struct sk_buff *skb)
  1951. {
  1952. /* Impossible. Such dst must be popped before reaches point of failure. */
  1953. }
  1954. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1955. {
  1956. if (dst) {
  1957. if (dst->obsolete) {
  1958. dst_release(dst);
  1959. dst = NULL;
  1960. }
  1961. }
  1962. return dst;
  1963. }
  1964. static void __xfrm_garbage_collect(struct net *net)
  1965. {
  1966. struct dst_entry *head, *next;
  1967. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1968. head = xfrm_policy_sk_bundles;
  1969. xfrm_policy_sk_bundles = NULL;
  1970. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1971. while (head) {
  1972. next = head->next;
  1973. dst_free(head);
  1974. head = next;
  1975. }
  1976. }
  1977. static void xfrm_garbage_collect(struct net *net)
  1978. {
  1979. flow_cache_flush();
  1980. __xfrm_garbage_collect(net);
  1981. }
  1982. static void xfrm_garbage_collect_deferred(struct net *net)
  1983. {
  1984. flow_cache_flush_deferred();
  1985. __xfrm_garbage_collect(net);
  1986. }
  1987. static void xfrm_init_pmtu(struct dst_entry *dst)
  1988. {
  1989. do {
  1990. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1991. u32 pmtu, route_mtu_cached;
  1992. pmtu = dst_mtu(dst->child);
  1993. xdst->child_mtu_cached = pmtu;
  1994. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1995. route_mtu_cached = dst_mtu(xdst->route);
  1996. xdst->route_mtu_cached = route_mtu_cached;
  1997. if (pmtu > route_mtu_cached)
  1998. pmtu = route_mtu_cached;
  1999. dst_metric_set(dst, RTAX_MTU, pmtu);
  2000. } while ((dst = dst->next));
  2001. }
  2002. /* Check that the bundle accepts the flow and its components are
  2003. * still valid.
  2004. */
  2005. static int xfrm_bundle_ok(struct xfrm_dst *first)
  2006. {
  2007. struct dst_entry *dst = &first->u.dst;
  2008. struct xfrm_dst *last;
  2009. u32 mtu;
  2010. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  2011. (dst->dev && !netif_running(dst->dev)))
  2012. return 0;
  2013. last = NULL;
  2014. do {
  2015. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  2016. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  2017. return 0;
  2018. if (xdst->xfrm_genid != dst->xfrm->genid)
  2019. return 0;
  2020. if (xdst->num_pols > 0 &&
  2021. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  2022. return 0;
  2023. mtu = dst_mtu(dst->child);
  2024. if (xdst->child_mtu_cached != mtu) {
  2025. last = xdst;
  2026. xdst->child_mtu_cached = mtu;
  2027. }
  2028. if (!dst_check(xdst->route, xdst->route_cookie))
  2029. return 0;
  2030. mtu = dst_mtu(xdst->route);
  2031. if (xdst->route_mtu_cached != mtu) {
  2032. last = xdst;
  2033. xdst->route_mtu_cached = mtu;
  2034. }
  2035. dst = dst->child;
  2036. } while (dst->xfrm);
  2037. if (likely(!last))
  2038. return 1;
  2039. mtu = last->child_mtu_cached;
  2040. for (;;) {
  2041. dst = &last->u.dst;
  2042. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  2043. if (mtu > last->route_mtu_cached)
  2044. mtu = last->route_mtu_cached;
  2045. dst_metric_set(dst, RTAX_MTU, mtu);
  2046. if (last == first)
  2047. break;
  2048. last = (struct xfrm_dst *)last->u.dst.next;
  2049. last->child_mtu_cached = mtu;
  2050. }
  2051. return 1;
  2052. }
  2053. static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  2054. {
  2055. return dst_metric_advmss(dst->path);
  2056. }
  2057. static unsigned int xfrm_mtu(const struct dst_entry *dst)
  2058. {
  2059. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  2060. return mtu ? : dst_mtu(dst->path);
  2061. }
  2062. static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
  2063. struct sk_buff *skb,
  2064. const void *daddr)
  2065. {
  2066. return dst->path->ops->neigh_lookup(dst, skb, daddr);
  2067. }
  2068. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  2069. {
  2070. struct net *net;
  2071. int err = 0;
  2072. if (unlikely(afinfo == NULL))
  2073. return -EINVAL;
  2074. if (unlikely(afinfo->family >= NPROTO))
  2075. return -EAFNOSUPPORT;
  2076. spin_lock(&xfrm_policy_afinfo_lock);
  2077. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  2078. err = -ENOBUFS;
  2079. else {
  2080. struct dst_ops *dst_ops = afinfo->dst_ops;
  2081. if (likely(dst_ops->kmem_cachep == NULL))
  2082. dst_ops->kmem_cachep = xfrm_dst_cache;
  2083. if (likely(dst_ops->check == NULL))
  2084. dst_ops->check = xfrm_dst_check;
  2085. if (likely(dst_ops->default_advmss == NULL))
  2086. dst_ops->default_advmss = xfrm_default_advmss;
  2087. if (likely(dst_ops->mtu == NULL))
  2088. dst_ops->mtu = xfrm_mtu;
  2089. if (likely(dst_ops->negative_advice == NULL))
  2090. dst_ops->negative_advice = xfrm_negative_advice;
  2091. if (likely(dst_ops->link_failure == NULL))
  2092. dst_ops->link_failure = xfrm_link_failure;
  2093. if (likely(dst_ops->neigh_lookup == NULL))
  2094. dst_ops->neigh_lookup = xfrm_neigh_lookup;
  2095. if (likely(afinfo->garbage_collect == NULL))
  2096. afinfo->garbage_collect = xfrm_garbage_collect_deferred;
  2097. rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
  2098. }
  2099. spin_unlock(&xfrm_policy_afinfo_lock);
  2100. rtnl_lock();
  2101. for_each_net(net) {
  2102. struct dst_ops *xfrm_dst_ops;
  2103. switch (afinfo->family) {
  2104. case AF_INET:
  2105. xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
  2106. break;
  2107. #if IS_ENABLED(CONFIG_IPV6)
  2108. case AF_INET6:
  2109. xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
  2110. break;
  2111. #endif
  2112. default:
  2113. BUG();
  2114. }
  2115. *xfrm_dst_ops = *afinfo->dst_ops;
  2116. }
  2117. rtnl_unlock();
  2118. return err;
  2119. }
  2120. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  2121. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  2122. {
  2123. int err = 0;
  2124. if (unlikely(afinfo == NULL))
  2125. return -EINVAL;
  2126. if (unlikely(afinfo->family >= NPROTO))
  2127. return -EAFNOSUPPORT;
  2128. spin_lock(&xfrm_policy_afinfo_lock);
  2129. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  2130. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  2131. err = -EINVAL;
  2132. else
  2133. RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
  2134. NULL);
  2135. }
  2136. spin_unlock(&xfrm_policy_afinfo_lock);
  2137. if (!err) {
  2138. struct dst_ops *dst_ops = afinfo->dst_ops;
  2139. synchronize_rcu();
  2140. dst_ops->kmem_cachep = NULL;
  2141. dst_ops->check = NULL;
  2142. dst_ops->negative_advice = NULL;
  2143. dst_ops->link_failure = NULL;
  2144. afinfo->garbage_collect = NULL;
  2145. }
  2146. return err;
  2147. }
  2148. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2149. static void __net_init xfrm_dst_ops_init(struct net *net)
  2150. {
  2151. struct xfrm_policy_afinfo *afinfo;
  2152. rcu_read_lock();
  2153. afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
  2154. if (afinfo)
  2155. net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
  2156. #if IS_ENABLED(CONFIG_IPV6)
  2157. afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
  2158. if (afinfo)
  2159. net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
  2160. #endif
  2161. rcu_read_unlock();
  2162. }
  2163. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2164. {
  2165. struct net_device *dev = ptr;
  2166. switch (event) {
  2167. case NETDEV_DOWN:
  2168. xfrm_garbage_collect(dev_net(dev));
  2169. }
  2170. return NOTIFY_DONE;
  2171. }
  2172. static struct notifier_block xfrm_dev_notifier = {
  2173. .notifier_call = xfrm_dev_event,
  2174. };
  2175. #ifdef CONFIG_XFRM_STATISTICS
  2176. static int __net_init xfrm_statistics_init(struct net *net)
  2177. {
  2178. int rv;
  2179. if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
  2180. sizeof(struct linux_xfrm_mib),
  2181. __alignof__(struct linux_xfrm_mib)) < 0)
  2182. return -ENOMEM;
  2183. rv = xfrm_proc_init(net);
  2184. if (rv < 0)
  2185. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2186. return rv;
  2187. }
  2188. static void xfrm_statistics_fini(struct net *net)
  2189. {
  2190. xfrm_proc_fini(net);
  2191. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2192. }
  2193. #else
  2194. static int __net_init xfrm_statistics_init(struct net *net)
  2195. {
  2196. return 0;
  2197. }
  2198. static void xfrm_statistics_fini(struct net *net)
  2199. {
  2200. }
  2201. #endif
  2202. static int __net_init xfrm_policy_init(struct net *net)
  2203. {
  2204. unsigned int hmask, sz;
  2205. int dir;
  2206. if (net_eq(net, &init_net))
  2207. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2208. sizeof(struct xfrm_dst),
  2209. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2210. NULL);
  2211. hmask = 8 - 1;
  2212. sz = (hmask+1) * sizeof(struct hlist_head);
  2213. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2214. if (!net->xfrm.policy_byidx)
  2215. goto out_byidx;
  2216. net->xfrm.policy_idx_hmask = hmask;
  2217. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2218. struct xfrm_policy_hash *htab;
  2219. net->xfrm.policy_count[dir] = 0;
  2220. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2221. htab = &net->xfrm.policy_bydst[dir];
  2222. htab->table = xfrm_hash_alloc(sz);
  2223. if (!htab->table)
  2224. goto out_bydst;
  2225. htab->hmask = hmask;
  2226. }
  2227. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2228. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2229. if (net_eq(net, &init_net))
  2230. register_netdevice_notifier(&xfrm_dev_notifier);
  2231. return 0;
  2232. out_bydst:
  2233. for (dir--; dir >= 0; dir--) {
  2234. struct xfrm_policy_hash *htab;
  2235. htab = &net->xfrm.policy_bydst[dir];
  2236. xfrm_hash_free(htab->table, sz);
  2237. }
  2238. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2239. out_byidx:
  2240. return -ENOMEM;
  2241. }
  2242. static void xfrm_policy_fini(struct net *net)
  2243. {
  2244. struct xfrm_audit audit_info;
  2245. unsigned int sz;
  2246. int dir;
  2247. flush_work(&net->xfrm.policy_hash_work);
  2248. #ifdef CONFIG_XFRM_SUB_POLICY
  2249. audit_info.loginuid = INVALID_UID;
  2250. audit_info.sessionid = -1;
  2251. audit_info.secid = 0;
  2252. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
  2253. #endif
  2254. audit_info.loginuid = INVALID_UID;
  2255. audit_info.sessionid = -1;
  2256. audit_info.secid = 0;
  2257. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
  2258. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2259. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2260. struct xfrm_policy_hash *htab;
  2261. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2262. htab = &net->xfrm.policy_bydst[dir];
  2263. sz = (htab->hmask + 1);
  2264. WARN_ON(!hlist_empty(htab->table));
  2265. xfrm_hash_free(htab->table, sz);
  2266. }
  2267. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2268. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2269. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2270. }
  2271. static int __net_init xfrm_net_init(struct net *net)
  2272. {
  2273. int rv;
  2274. rv = xfrm_statistics_init(net);
  2275. if (rv < 0)
  2276. goto out_statistics;
  2277. rv = xfrm_state_init(net);
  2278. if (rv < 0)
  2279. goto out_state;
  2280. rv = xfrm_policy_init(net);
  2281. if (rv < 0)
  2282. goto out_policy;
  2283. xfrm_dst_ops_init(net);
  2284. rv = xfrm_sysctl_init(net);
  2285. if (rv < 0)
  2286. goto out_sysctl;
  2287. return 0;
  2288. out_sysctl:
  2289. xfrm_policy_fini(net);
  2290. out_policy:
  2291. xfrm_state_fini(net);
  2292. out_state:
  2293. xfrm_statistics_fini(net);
  2294. out_statistics:
  2295. return rv;
  2296. }
  2297. static void __net_exit xfrm_net_exit(struct net *net)
  2298. {
  2299. xfrm_sysctl_fini(net);
  2300. xfrm_policy_fini(net);
  2301. xfrm_state_fini(net);
  2302. xfrm_statistics_fini(net);
  2303. }
  2304. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2305. .init = xfrm_net_init,
  2306. .exit = xfrm_net_exit,
  2307. };
  2308. void __init xfrm_init(void)
  2309. {
  2310. register_pernet_subsys(&xfrm_net_ops);
  2311. xfrm_input_init();
  2312. }
  2313. #ifdef CONFIG_AUDITSYSCALL
  2314. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2315. struct audit_buffer *audit_buf)
  2316. {
  2317. struct xfrm_sec_ctx *ctx = xp->security;
  2318. struct xfrm_selector *sel = &xp->selector;
  2319. if (ctx)
  2320. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2321. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2322. switch(sel->family) {
  2323. case AF_INET:
  2324. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2325. if (sel->prefixlen_s != 32)
  2326. audit_log_format(audit_buf, " src_prefixlen=%d",
  2327. sel->prefixlen_s);
  2328. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2329. if (sel->prefixlen_d != 32)
  2330. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2331. sel->prefixlen_d);
  2332. break;
  2333. case AF_INET6:
  2334. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2335. if (sel->prefixlen_s != 128)
  2336. audit_log_format(audit_buf, " src_prefixlen=%d",
  2337. sel->prefixlen_s);
  2338. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2339. if (sel->prefixlen_d != 128)
  2340. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2341. sel->prefixlen_d);
  2342. break;
  2343. }
  2344. }
  2345. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2346. kuid_t auid, u32 sessionid, u32 secid)
  2347. {
  2348. struct audit_buffer *audit_buf;
  2349. audit_buf = xfrm_audit_start("SPD-add");
  2350. if (audit_buf == NULL)
  2351. return;
  2352. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2353. audit_log_format(audit_buf, " res=%u", result);
  2354. xfrm_audit_common_policyinfo(xp, audit_buf);
  2355. audit_log_end(audit_buf);
  2356. }
  2357. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2358. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2359. kuid_t auid, u32 sessionid, u32 secid)
  2360. {
  2361. struct audit_buffer *audit_buf;
  2362. audit_buf = xfrm_audit_start("SPD-delete");
  2363. if (audit_buf == NULL)
  2364. return;
  2365. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2366. audit_log_format(audit_buf, " res=%u", result);
  2367. xfrm_audit_common_policyinfo(xp, audit_buf);
  2368. audit_log_end(audit_buf);
  2369. }
  2370. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2371. #endif
  2372. #ifdef CONFIG_XFRM_MIGRATE
  2373. static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
  2374. const struct xfrm_selector *sel_tgt)
  2375. {
  2376. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2377. if (sel_tgt->family == sel_cmp->family &&
  2378. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2379. sel_cmp->family) == 0 &&
  2380. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2381. sel_cmp->family) == 0 &&
  2382. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2383. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2384. return true;
  2385. }
  2386. } else {
  2387. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2388. return true;
  2389. }
  2390. }
  2391. return false;
  2392. }
  2393. static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
  2394. u8 dir, u8 type)
  2395. {
  2396. struct xfrm_policy *pol, *ret = NULL;
  2397. struct hlist_node *entry;
  2398. struct hlist_head *chain;
  2399. u32 priority = ~0U;
  2400. read_lock_bh(&xfrm_policy_lock);
  2401. chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
  2402. hlist_for_each_entry(pol, entry, chain, bydst) {
  2403. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2404. pol->type == type) {
  2405. ret = pol;
  2406. priority = ret->priority;
  2407. break;
  2408. }
  2409. }
  2410. chain = &init_net.xfrm.policy_inexact[dir];
  2411. hlist_for_each_entry(pol, entry, chain, bydst) {
  2412. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2413. pol->type == type &&
  2414. pol->priority < priority) {
  2415. ret = pol;
  2416. break;
  2417. }
  2418. }
  2419. if (ret)
  2420. xfrm_pol_hold(ret);
  2421. read_unlock_bh(&xfrm_policy_lock);
  2422. return ret;
  2423. }
  2424. static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
  2425. {
  2426. int match = 0;
  2427. if (t->mode == m->mode && t->id.proto == m->proto &&
  2428. (m->reqid == 0 || t->reqid == m->reqid)) {
  2429. switch (t->mode) {
  2430. case XFRM_MODE_TUNNEL:
  2431. case XFRM_MODE_BEET:
  2432. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2433. m->old_family) == 0 &&
  2434. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2435. m->old_family) == 0) {
  2436. match = 1;
  2437. }
  2438. break;
  2439. case XFRM_MODE_TRANSPORT:
  2440. /* in case of transport mode, template does not store
  2441. any IP addresses, hence we just compare mode and
  2442. protocol */
  2443. match = 1;
  2444. break;
  2445. default:
  2446. break;
  2447. }
  2448. }
  2449. return match;
  2450. }
  2451. /* update endpoint address(es) of template(s) */
  2452. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2453. struct xfrm_migrate *m, int num_migrate)
  2454. {
  2455. struct xfrm_migrate *mp;
  2456. int i, j, n = 0;
  2457. write_lock_bh(&pol->lock);
  2458. if (unlikely(pol->walk.dead)) {
  2459. /* target policy has been deleted */
  2460. write_unlock_bh(&pol->lock);
  2461. return -ENOENT;
  2462. }
  2463. for (i = 0; i < pol->xfrm_nr; i++) {
  2464. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2465. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2466. continue;
  2467. n++;
  2468. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2469. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2470. continue;
  2471. /* update endpoints */
  2472. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2473. sizeof(pol->xfrm_vec[i].id.daddr));
  2474. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2475. sizeof(pol->xfrm_vec[i].saddr));
  2476. pol->xfrm_vec[i].encap_family = mp->new_family;
  2477. /* flush bundles */
  2478. atomic_inc(&pol->genid);
  2479. }
  2480. }
  2481. write_unlock_bh(&pol->lock);
  2482. if (!n)
  2483. return -ENODATA;
  2484. return 0;
  2485. }
  2486. static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
  2487. {
  2488. int i, j;
  2489. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2490. return -EINVAL;
  2491. for (i = 0; i < num_migrate; i++) {
  2492. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2493. m[i].old_family) == 0) &&
  2494. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2495. m[i].old_family) == 0))
  2496. return -EINVAL;
  2497. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2498. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2499. return -EINVAL;
  2500. /* check if there is any duplicated entry */
  2501. for (j = i + 1; j < num_migrate; j++) {
  2502. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2503. sizeof(m[i].old_daddr)) &&
  2504. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2505. sizeof(m[i].old_saddr)) &&
  2506. m[i].proto == m[j].proto &&
  2507. m[i].mode == m[j].mode &&
  2508. m[i].reqid == m[j].reqid &&
  2509. m[i].old_family == m[j].old_family)
  2510. return -EINVAL;
  2511. }
  2512. }
  2513. return 0;
  2514. }
  2515. int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
  2516. struct xfrm_migrate *m, int num_migrate,
  2517. struct xfrm_kmaddress *k)
  2518. {
  2519. int i, err, nx_cur = 0, nx_new = 0;
  2520. struct xfrm_policy *pol = NULL;
  2521. struct xfrm_state *x, *xc;
  2522. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2523. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2524. struct xfrm_migrate *mp;
  2525. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2526. goto out;
  2527. /* Stage 1 - find policy */
  2528. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2529. err = -ENOENT;
  2530. goto out;
  2531. }
  2532. /* Stage 2 - find and update state(s) */
  2533. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2534. if ((x = xfrm_migrate_state_find(mp))) {
  2535. x_cur[nx_cur] = x;
  2536. nx_cur++;
  2537. if ((xc = xfrm_state_migrate(x, mp))) {
  2538. x_new[nx_new] = xc;
  2539. nx_new++;
  2540. } else {
  2541. err = -ENODATA;
  2542. goto restore_state;
  2543. }
  2544. }
  2545. }
  2546. /* Stage 3 - update policy */
  2547. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2548. goto restore_state;
  2549. /* Stage 4 - delete old state(s) */
  2550. if (nx_cur) {
  2551. xfrm_states_put(x_cur, nx_cur);
  2552. xfrm_states_delete(x_cur, nx_cur);
  2553. }
  2554. /* Stage 5 - announce */
  2555. km_migrate(sel, dir, type, m, num_migrate, k);
  2556. xfrm_pol_put(pol);
  2557. return 0;
  2558. out:
  2559. return err;
  2560. restore_state:
  2561. if (pol)
  2562. xfrm_pol_put(pol);
  2563. if (nx_cur)
  2564. xfrm_states_put(x_cur, nx_cur);
  2565. if (nx_new)
  2566. xfrm_states_delete(x_new, nx_new);
  2567. return err;
  2568. }
  2569. EXPORT_SYMBOL(xfrm_migrate);
  2570. #endif