xfrm_policy.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/flow.h>
  29. #include <net/xfrm.h>
  30. #include <net/ip.h>
  31. #ifdef CONFIG_XFRM_STATISTICS
  32. #include <net/snmp.h>
  33. #endif
  34. #include "xfrm_hash.h"
  35. DEFINE_MUTEX(xfrm_cfg_mutex);
  36. EXPORT_SYMBOL(xfrm_cfg_mutex);
  37. static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
  38. static struct dst_entry *xfrm_policy_sk_bundles;
  39. static DEFINE_RWLOCK(xfrm_policy_lock);
  40. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  41. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  42. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  43. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  44. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  45. static void xfrm_init_pmtu(struct dst_entry *dst);
  46. static int stale_bundle(struct dst_entry *dst);
  47. static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  48. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  49. int dir);
  50. static inline bool
  51. __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  52. {
  53. const struct flowi4 *fl4 = &fl->u.ip4;
  54. return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  55. addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  56. !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  57. !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  58. (fl4->flowi4_proto == sel->proto || !sel->proto) &&
  59. (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  60. }
  61. static inline bool
  62. __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  63. {
  64. const struct flowi6 *fl6 = &fl->u.ip6;
  65. return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  66. addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  67. !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  68. !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  69. (fl6->flowi6_proto == sel->proto || !sel->proto) &&
  70. (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  71. }
  72. bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  73. unsigned short family)
  74. {
  75. switch (family) {
  76. case AF_INET:
  77. return __xfrm4_selector_match(sel, fl);
  78. case AF_INET6:
  79. return __xfrm6_selector_match(sel, fl);
  80. }
  81. return false;
  82. }
  83. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  84. const xfrm_address_t *saddr,
  85. const xfrm_address_t *daddr,
  86. int family)
  87. {
  88. struct xfrm_policy_afinfo *afinfo;
  89. struct dst_entry *dst;
  90. afinfo = xfrm_policy_get_afinfo(family);
  91. if (unlikely(afinfo == NULL))
  92. return ERR_PTR(-EAFNOSUPPORT);
  93. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  94. xfrm_policy_put_afinfo(afinfo);
  95. return dst;
  96. }
  97. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  98. xfrm_address_t *prev_saddr,
  99. xfrm_address_t *prev_daddr,
  100. int family)
  101. {
  102. struct net *net = xs_net(x);
  103. xfrm_address_t *saddr = &x->props.saddr;
  104. xfrm_address_t *daddr = &x->id.daddr;
  105. struct dst_entry *dst;
  106. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  107. saddr = x->coaddr;
  108. daddr = prev_daddr;
  109. }
  110. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  111. saddr = prev_saddr;
  112. daddr = x->coaddr;
  113. }
  114. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  115. if (!IS_ERR(dst)) {
  116. if (prev_saddr != saddr)
  117. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  118. if (prev_daddr != daddr)
  119. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  120. }
  121. return dst;
  122. }
  123. static inline unsigned long make_jiffies(long secs)
  124. {
  125. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  126. return MAX_SCHEDULE_TIMEOUT-1;
  127. else
  128. return secs*HZ;
  129. }
  130. static void xfrm_policy_timer(unsigned long data)
  131. {
  132. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  133. unsigned long now = get_seconds();
  134. long next = LONG_MAX;
  135. int warn = 0;
  136. int dir;
  137. read_lock(&xp->lock);
  138. if (unlikely(xp->walk.dead))
  139. goto out;
  140. dir = xfrm_policy_id2dir(xp->index);
  141. if (xp->lft.hard_add_expires_seconds) {
  142. long tmo = xp->lft.hard_add_expires_seconds +
  143. xp->curlft.add_time - now;
  144. if (tmo <= 0)
  145. goto expired;
  146. if (tmo < next)
  147. next = tmo;
  148. }
  149. if (xp->lft.hard_use_expires_seconds) {
  150. long tmo = xp->lft.hard_use_expires_seconds +
  151. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  152. if (tmo <= 0)
  153. goto expired;
  154. if (tmo < next)
  155. next = tmo;
  156. }
  157. if (xp->lft.soft_add_expires_seconds) {
  158. long tmo = xp->lft.soft_add_expires_seconds +
  159. xp->curlft.add_time - now;
  160. if (tmo <= 0) {
  161. warn = 1;
  162. tmo = XFRM_KM_TIMEOUT;
  163. }
  164. if (tmo < next)
  165. next = tmo;
  166. }
  167. if (xp->lft.soft_use_expires_seconds) {
  168. long tmo = xp->lft.soft_use_expires_seconds +
  169. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  170. if (tmo <= 0) {
  171. warn = 1;
  172. tmo = XFRM_KM_TIMEOUT;
  173. }
  174. if (tmo < next)
  175. next = tmo;
  176. }
  177. if (warn)
  178. km_policy_expired(xp, dir, 0, 0);
  179. if (next != LONG_MAX &&
  180. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  181. xfrm_pol_hold(xp);
  182. out:
  183. read_unlock(&xp->lock);
  184. xfrm_pol_put(xp);
  185. return;
  186. expired:
  187. read_unlock(&xp->lock);
  188. if (!xfrm_policy_delete(xp, dir))
  189. km_policy_expired(xp, dir, 1, 0);
  190. xfrm_pol_put(xp);
  191. }
  192. static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
  193. {
  194. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  195. if (unlikely(pol->walk.dead))
  196. flo = NULL;
  197. else
  198. xfrm_pol_hold(pol);
  199. return flo;
  200. }
  201. static int xfrm_policy_flo_check(struct flow_cache_object *flo)
  202. {
  203. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  204. return !pol->walk.dead;
  205. }
  206. static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
  207. {
  208. xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
  209. }
  210. static const struct flow_cache_ops xfrm_policy_fc_ops = {
  211. .get = xfrm_policy_flo_get,
  212. .check = xfrm_policy_flo_check,
  213. .delete = xfrm_policy_flo_delete,
  214. };
  215. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  216. * SPD calls.
  217. */
  218. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  219. {
  220. struct xfrm_policy *policy;
  221. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  222. if (policy) {
  223. write_pnet(&policy->xp_net, net);
  224. INIT_LIST_HEAD(&policy->walk.all);
  225. INIT_HLIST_NODE(&policy->bydst);
  226. INIT_HLIST_NODE(&policy->byidx);
  227. rwlock_init(&policy->lock);
  228. atomic_set(&policy->refcnt, 1);
  229. setup_timer(&policy->timer, xfrm_policy_timer,
  230. (unsigned long)policy);
  231. policy->flo.ops = &xfrm_policy_fc_ops;
  232. }
  233. return policy;
  234. }
  235. EXPORT_SYMBOL(xfrm_policy_alloc);
  236. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  237. void xfrm_policy_destroy(struct xfrm_policy *policy)
  238. {
  239. BUG_ON(!policy->walk.dead);
  240. if (del_timer(&policy->timer))
  241. BUG();
  242. security_xfrm_policy_free(policy->security);
  243. kfree(policy);
  244. }
  245. EXPORT_SYMBOL(xfrm_policy_destroy);
  246. /* Rule must be locked. Release descentant resources, announce
  247. * entry dead. The rule must be unlinked from lists to the moment.
  248. */
  249. static void xfrm_policy_kill(struct xfrm_policy *policy)
  250. {
  251. policy->walk.dead = 1;
  252. atomic_inc(&policy->genid);
  253. if (del_timer(&policy->timer))
  254. xfrm_pol_put(policy);
  255. xfrm_pol_put(policy);
  256. }
  257. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  258. static inline unsigned int idx_hash(struct net *net, u32 index)
  259. {
  260. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  261. }
  262. static struct hlist_head *policy_hash_bysel(struct net *net,
  263. const struct xfrm_selector *sel,
  264. unsigned short family, int dir)
  265. {
  266. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  267. unsigned int hash = __sel_hash(sel, family, hmask);
  268. return (hash == hmask + 1 ?
  269. &net->xfrm.policy_inexact[dir] :
  270. net->xfrm.policy_bydst[dir].table + hash);
  271. }
  272. static struct hlist_head *policy_hash_direct(struct net *net,
  273. const xfrm_address_t *daddr,
  274. const xfrm_address_t *saddr,
  275. unsigned short family, int dir)
  276. {
  277. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  278. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  279. return net->xfrm.policy_bydst[dir].table + hash;
  280. }
  281. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  282. struct hlist_head *ndsttable,
  283. unsigned int nhashmask)
  284. {
  285. struct hlist_node *entry, *tmp, *entry0 = NULL;
  286. struct xfrm_policy *pol;
  287. unsigned int h0 = 0;
  288. redo:
  289. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  290. unsigned int h;
  291. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  292. pol->family, nhashmask);
  293. if (!entry0) {
  294. hlist_del(entry);
  295. hlist_add_head(&pol->bydst, ndsttable+h);
  296. h0 = h;
  297. } else {
  298. if (h != h0)
  299. continue;
  300. hlist_del(entry);
  301. hlist_add_after(entry0, &pol->bydst);
  302. }
  303. entry0 = entry;
  304. }
  305. if (!hlist_empty(list)) {
  306. entry0 = NULL;
  307. goto redo;
  308. }
  309. }
  310. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  311. struct hlist_head *nidxtable,
  312. unsigned int nhashmask)
  313. {
  314. struct hlist_node *entry, *tmp;
  315. struct xfrm_policy *pol;
  316. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  317. unsigned int h;
  318. h = __idx_hash(pol->index, nhashmask);
  319. hlist_add_head(&pol->byidx, nidxtable+h);
  320. }
  321. }
  322. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  323. {
  324. return ((old_hmask + 1) << 1) - 1;
  325. }
  326. static void xfrm_bydst_resize(struct net *net, int dir)
  327. {
  328. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  329. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  330. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  331. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  332. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  333. int i;
  334. if (!ndst)
  335. return;
  336. write_lock_bh(&xfrm_policy_lock);
  337. for (i = hmask; i >= 0; i--)
  338. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  339. net->xfrm.policy_bydst[dir].table = ndst;
  340. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  341. write_unlock_bh(&xfrm_policy_lock);
  342. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  343. }
  344. static void xfrm_byidx_resize(struct net *net, int total)
  345. {
  346. unsigned int hmask = net->xfrm.policy_idx_hmask;
  347. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  348. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  349. struct hlist_head *oidx = net->xfrm.policy_byidx;
  350. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  351. int i;
  352. if (!nidx)
  353. return;
  354. write_lock_bh(&xfrm_policy_lock);
  355. for (i = hmask; i >= 0; i--)
  356. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  357. net->xfrm.policy_byidx = nidx;
  358. net->xfrm.policy_idx_hmask = nhashmask;
  359. write_unlock_bh(&xfrm_policy_lock);
  360. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  361. }
  362. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  363. {
  364. unsigned int cnt = net->xfrm.policy_count[dir];
  365. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  366. if (total)
  367. *total += cnt;
  368. if ((hmask + 1) < xfrm_policy_hashmax &&
  369. cnt > hmask)
  370. return 1;
  371. return 0;
  372. }
  373. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  374. {
  375. unsigned int hmask = net->xfrm.policy_idx_hmask;
  376. if ((hmask + 1) < xfrm_policy_hashmax &&
  377. total > hmask)
  378. return 1;
  379. return 0;
  380. }
  381. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  382. {
  383. read_lock_bh(&xfrm_policy_lock);
  384. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  385. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  386. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  387. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  388. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  389. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  390. si->spdhcnt = net->xfrm.policy_idx_hmask;
  391. si->spdhmcnt = xfrm_policy_hashmax;
  392. read_unlock_bh(&xfrm_policy_lock);
  393. }
  394. EXPORT_SYMBOL(xfrm_spd_getinfo);
  395. static DEFINE_MUTEX(hash_resize_mutex);
  396. static void xfrm_hash_resize(struct work_struct *work)
  397. {
  398. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  399. int dir, total;
  400. mutex_lock(&hash_resize_mutex);
  401. total = 0;
  402. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  403. if (xfrm_bydst_should_resize(net, dir, &total))
  404. xfrm_bydst_resize(net, dir);
  405. }
  406. if (xfrm_byidx_should_resize(net, total))
  407. xfrm_byidx_resize(net, total);
  408. mutex_unlock(&hash_resize_mutex);
  409. }
  410. /* Generate new index... KAME seems to generate them ordered by cost
  411. * of an absolute inpredictability of ordering of rules. This will not pass. */
  412. static u32 xfrm_gen_index(struct net *net, int dir)
  413. {
  414. static u32 idx_generator;
  415. for (;;) {
  416. struct hlist_node *entry;
  417. struct hlist_head *list;
  418. struct xfrm_policy *p;
  419. u32 idx;
  420. int found;
  421. idx = (idx_generator | dir);
  422. idx_generator += 8;
  423. if (idx == 0)
  424. idx = 8;
  425. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  426. found = 0;
  427. hlist_for_each_entry(p, entry, list, byidx) {
  428. if (p->index == idx) {
  429. found = 1;
  430. break;
  431. }
  432. }
  433. if (!found)
  434. return idx;
  435. }
  436. }
  437. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  438. {
  439. u32 *p1 = (u32 *) s1;
  440. u32 *p2 = (u32 *) s2;
  441. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  442. int i;
  443. for (i = 0; i < len; i++) {
  444. if (p1[i] != p2[i])
  445. return 1;
  446. }
  447. return 0;
  448. }
  449. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  450. {
  451. struct net *net = xp_net(policy);
  452. struct xfrm_policy *pol;
  453. struct xfrm_policy *delpol;
  454. struct hlist_head *chain;
  455. struct hlist_node *entry, *newpos;
  456. u32 mark = policy->mark.v & policy->mark.m;
  457. write_lock_bh(&xfrm_policy_lock);
  458. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  459. delpol = NULL;
  460. newpos = NULL;
  461. hlist_for_each_entry(pol, entry, chain, bydst) {
  462. if (pol->type == policy->type &&
  463. !selector_cmp(&pol->selector, &policy->selector) &&
  464. (mark & pol->mark.m) == pol->mark.v &&
  465. xfrm_sec_ctx_match(pol->security, policy->security) &&
  466. !WARN_ON(delpol)) {
  467. if (excl) {
  468. write_unlock_bh(&xfrm_policy_lock);
  469. return -EEXIST;
  470. }
  471. delpol = pol;
  472. if (policy->priority > pol->priority)
  473. continue;
  474. } else if (policy->priority >= pol->priority) {
  475. newpos = &pol->bydst;
  476. continue;
  477. }
  478. if (delpol)
  479. break;
  480. }
  481. if (newpos)
  482. hlist_add_after(newpos, &policy->bydst);
  483. else
  484. hlist_add_head(&policy->bydst, chain);
  485. xfrm_pol_hold(policy);
  486. net->xfrm.policy_count[dir]++;
  487. atomic_inc(&flow_cache_genid);
  488. if (delpol)
  489. __xfrm_policy_unlink(delpol, dir);
  490. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
  491. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  492. policy->curlft.add_time = get_seconds();
  493. policy->curlft.use_time = 0;
  494. if (!mod_timer(&policy->timer, jiffies + HZ))
  495. xfrm_pol_hold(policy);
  496. list_add(&policy->walk.all, &net->xfrm.policy_all);
  497. write_unlock_bh(&xfrm_policy_lock);
  498. if (delpol)
  499. xfrm_policy_kill(delpol);
  500. else if (xfrm_bydst_should_resize(net, dir, NULL))
  501. schedule_work(&net->xfrm.policy_hash_work);
  502. return 0;
  503. }
  504. EXPORT_SYMBOL(xfrm_policy_insert);
  505. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
  506. int dir, struct xfrm_selector *sel,
  507. struct xfrm_sec_ctx *ctx, int delete,
  508. int *err)
  509. {
  510. struct xfrm_policy *pol, *ret;
  511. struct hlist_head *chain;
  512. struct hlist_node *entry;
  513. *err = 0;
  514. write_lock_bh(&xfrm_policy_lock);
  515. chain = policy_hash_bysel(net, sel, sel->family, dir);
  516. ret = NULL;
  517. hlist_for_each_entry(pol, entry, chain, bydst) {
  518. if (pol->type == type &&
  519. (mark & pol->mark.m) == pol->mark.v &&
  520. !selector_cmp(sel, &pol->selector) &&
  521. xfrm_sec_ctx_match(ctx, pol->security)) {
  522. xfrm_pol_hold(pol);
  523. if (delete) {
  524. *err = security_xfrm_policy_delete(
  525. pol->security);
  526. if (*err) {
  527. write_unlock_bh(&xfrm_policy_lock);
  528. return pol;
  529. }
  530. __xfrm_policy_unlink(pol, dir);
  531. }
  532. ret = pol;
  533. break;
  534. }
  535. }
  536. write_unlock_bh(&xfrm_policy_lock);
  537. if (ret && delete)
  538. xfrm_policy_kill(ret);
  539. return ret;
  540. }
  541. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  542. struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
  543. int dir, u32 id, int delete, int *err)
  544. {
  545. struct xfrm_policy *pol, *ret;
  546. struct hlist_head *chain;
  547. struct hlist_node *entry;
  548. *err = -ENOENT;
  549. if (xfrm_policy_id2dir(id) != dir)
  550. return NULL;
  551. *err = 0;
  552. write_lock_bh(&xfrm_policy_lock);
  553. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  554. ret = NULL;
  555. hlist_for_each_entry(pol, entry, chain, byidx) {
  556. if (pol->type == type && pol->index == id &&
  557. (mark & pol->mark.m) == pol->mark.v) {
  558. xfrm_pol_hold(pol);
  559. if (delete) {
  560. *err = security_xfrm_policy_delete(
  561. pol->security);
  562. if (*err) {
  563. write_unlock_bh(&xfrm_policy_lock);
  564. return pol;
  565. }
  566. __xfrm_policy_unlink(pol, dir);
  567. }
  568. ret = pol;
  569. break;
  570. }
  571. }
  572. write_unlock_bh(&xfrm_policy_lock);
  573. if (ret && delete)
  574. xfrm_policy_kill(ret);
  575. return ret;
  576. }
  577. EXPORT_SYMBOL(xfrm_policy_byid);
  578. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  579. static inline int
  580. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  581. {
  582. int dir, err = 0;
  583. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  584. struct xfrm_policy *pol;
  585. struct hlist_node *entry;
  586. int i;
  587. hlist_for_each_entry(pol, entry,
  588. &net->xfrm.policy_inexact[dir], bydst) {
  589. if (pol->type != type)
  590. continue;
  591. err = security_xfrm_policy_delete(pol->security);
  592. if (err) {
  593. xfrm_audit_policy_delete(pol, 0,
  594. audit_info->loginuid,
  595. audit_info->sessionid,
  596. audit_info->secid);
  597. return err;
  598. }
  599. }
  600. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  601. hlist_for_each_entry(pol, entry,
  602. net->xfrm.policy_bydst[dir].table + i,
  603. bydst) {
  604. if (pol->type != type)
  605. continue;
  606. err = security_xfrm_policy_delete(
  607. pol->security);
  608. if (err) {
  609. xfrm_audit_policy_delete(pol, 0,
  610. audit_info->loginuid,
  611. audit_info->sessionid,
  612. audit_info->secid);
  613. return err;
  614. }
  615. }
  616. }
  617. }
  618. return err;
  619. }
  620. #else
  621. static inline int
  622. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  623. {
  624. return 0;
  625. }
  626. #endif
  627. int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
  628. {
  629. int dir, err = 0, cnt = 0;
  630. write_lock_bh(&xfrm_policy_lock);
  631. err = xfrm_policy_flush_secctx_check(net, type, audit_info);
  632. if (err)
  633. goto out;
  634. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  635. struct xfrm_policy *pol;
  636. struct hlist_node *entry;
  637. int i;
  638. again1:
  639. hlist_for_each_entry(pol, entry,
  640. &net->xfrm.policy_inexact[dir], bydst) {
  641. if (pol->type != type)
  642. continue;
  643. __xfrm_policy_unlink(pol, dir);
  644. write_unlock_bh(&xfrm_policy_lock);
  645. cnt++;
  646. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  647. audit_info->sessionid,
  648. audit_info->secid);
  649. xfrm_policy_kill(pol);
  650. write_lock_bh(&xfrm_policy_lock);
  651. goto again1;
  652. }
  653. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  654. again2:
  655. hlist_for_each_entry(pol, entry,
  656. net->xfrm.policy_bydst[dir].table + i,
  657. bydst) {
  658. if (pol->type != type)
  659. continue;
  660. __xfrm_policy_unlink(pol, dir);
  661. write_unlock_bh(&xfrm_policy_lock);
  662. cnt++;
  663. xfrm_audit_policy_delete(pol, 1,
  664. audit_info->loginuid,
  665. audit_info->sessionid,
  666. audit_info->secid);
  667. xfrm_policy_kill(pol);
  668. write_lock_bh(&xfrm_policy_lock);
  669. goto again2;
  670. }
  671. }
  672. }
  673. if (!cnt)
  674. err = -ESRCH;
  675. out:
  676. write_unlock_bh(&xfrm_policy_lock);
  677. return err;
  678. }
  679. EXPORT_SYMBOL(xfrm_policy_flush);
  680. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  681. int (*func)(struct xfrm_policy *, int, int, void*),
  682. void *data)
  683. {
  684. struct xfrm_policy *pol;
  685. struct xfrm_policy_walk_entry *x;
  686. int error = 0;
  687. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  688. walk->type != XFRM_POLICY_TYPE_ANY)
  689. return -EINVAL;
  690. if (list_empty(&walk->walk.all) && walk->seq != 0)
  691. return 0;
  692. write_lock_bh(&xfrm_policy_lock);
  693. if (list_empty(&walk->walk.all))
  694. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  695. else
  696. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  697. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  698. if (x->dead)
  699. continue;
  700. pol = container_of(x, struct xfrm_policy, walk);
  701. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  702. walk->type != pol->type)
  703. continue;
  704. error = func(pol, xfrm_policy_id2dir(pol->index),
  705. walk->seq, data);
  706. if (error) {
  707. list_move_tail(&walk->walk.all, &x->all);
  708. goto out;
  709. }
  710. walk->seq++;
  711. }
  712. if (walk->seq == 0) {
  713. error = -ENOENT;
  714. goto out;
  715. }
  716. list_del_init(&walk->walk.all);
  717. out:
  718. write_unlock_bh(&xfrm_policy_lock);
  719. return error;
  720. }
  721. EXPORT_SYMBOL(xfrm_policy_walk);
  722. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  723. {
  724. INIT_LIST_HEAD(&walk->walk.all);
  725. walk->walk.dead = 1;
  726. walk->type = type;
  727. walk->seq = 0;
  728. }
  729. EXPORT_SYMBOL(xfrm_policy_walk_init);
  730. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  731. {
  732. if (list_empty(&walk->walk.all))
  733. return;
  734. write_lock_bh(&xfrm_policy_lock);
  735. list_del(&walk->walk.all);
  736. write_unlock_bh(&xfrm_policy_lock);
  737. }
  738. EXPORT_SYMBOL(xfrm_policy_walk_done);
  739. /*
  740. * Find policy to apply to this flow.
  741. *
  742. * Returns 0 if policy found, else an -errno.
  743. */
  744. static int xfrm_policy_match(const struct xfrm_policy *pol,
  745. const struct flowi *fl,
  746. u8 type, u16 family, int dir)
  747. {
  748. const struct xfrm_selector *sel = &pol->selector;
  749. int ret = -ESRCH;
  750. bool match;
  751. if (pol->family != family ||
  752. (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
  753. pol->type != type)
  754. return ret;
  755. match = xfrm_selector_match(sel, fl, family);
  756. if (match)
  757. ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
  758. dir);
  759. return ret;
  760. }
  761. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  762. const struct flowi *fl,
  763. u16 family, u8 dir)
  764. {
  765. int err;
  766. struct xfrm_policy *pol, *ret;
  767. const xfrm_address_t *daddr, *saddr;
  768. struct hlist_node *entry;
  769. struct hlist_head *chain;
  770. u32 priority = ~0U;
  771. daddr = xfrm_flowi_daddr(fl, family);
  772. saddr = xfrm_flowi_saddr(fl, family);
  773. if (unlikely(!daddr || !saddr))
  774. return NULL;
  775. read_lock_bh(&xfrm_policy_lock);
  776. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  777. ret = NULL;
  778. hlist_for_each_entry(pol, entry, chain, bydst) {
  779. err = xfrm_policy_match(pol, fl, type, family, dir);
  780. if (err) {
  781. if (err == -ESRCH)
  782. continue;
  783. else {
  784. ret = ERR_PTR(err);
  785. goto fail;
  786. }
  787. } else {
  788. ret = pol;
  789. priority = ret->priority;
  790. break;
  791. }
  792. }
  793. chain = &net->xfrm.policy_inexact[dir];
  794. hlist_for_each_entry(pol, entry, chain, bydst) {
  795. err = xfrm_policy_match(pol, fl, type, family, dir);
  796. if (err) {
  797. if (err == -ESRCH)
  798. continue;
  799. else {
  800. ret = ERR_PTR(err);
  801. goto fail;
  802. }
  803. } else if (pol->priority < priority) {
  804. ret = pol;
  805. break;
  806. }
  807. }
  808. if (ret)
  809. xfrm_pol_hold(ret);
  810. fail:
  811. read_unlock_bh(&xfrm_policy_lock);
  812. return ret;
  813. }
  814. static struct xfrm_policy *
  815. __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
  816. {
  817. #ifdef CONFIG_XFRM_SUB_POLICY
  818. struct xfrm_policy *pol;
  819. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  820. if (pol != NULL)
  821. return pol;
  822. #endif
  823. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  824. }
  825. static struct flow_cache_object *
  826. xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
  827. u8 dir, struct flow_cache_object *old_obj, void *ctx)
  828. {
  829. struct xfrm_policy *pol;
  830. if (old_obj)
  831. xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
  832. pol = __xfrm_policy_lookup(net, fl, family, dir);
  833. if (IS_ERR_OR_NULL(pol))
  834. return ERR_CAST(pol);
  835. /* Resolver returns two references:
  836. * one for cache and one for caller of flow_cache_lookup() */
  837. xfrm_pol_hold(pol);
  838. return &pol->flo;
  839. }
  840. static inline int policy_to_flow_dir(int dir)
  841. {
  842. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  843. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  844. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  845. return dir;
  846. switch (dir) {
  847. default:
  848. case XFRM_POLICY_IN:
  849. return FLOW_DIR_IN;
  850. case XFRM_POLICY_OUT:
  851. return FLOW_DIR_OUT;
  852. case XFRM_POLICY_FWD:
  853. return FLOW_DIR_FWD;
  854. }
  855. }
  856. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
  857. const struct flowi *fl)
  858. {
  859. struct xfrm_policy *pol;
  860. read_lock_bh(&xfrm_policy_lock);
  861. if ((pol = sk->sk_policy[dir]) != NULL) {
  862. bool match = xfrm_selector_match(&pol->selector, fl,
  863. sk->sk_family);
  864. int err = 0;
  865. if (match) {
  866. if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
  867. pol = NULL;
  868. goto out;
  869. }
  870. err = security_xfrm_policy_lookup(pol->security,
  871. fl->flowi_secid,
  872. policy_to_flow_dir(dir));
  873. if (!err)
  874. xfrm_pol_hold(pol);
  875. else if (err == -ESRCH)
  876. pol = NULL;
  877. else
  878. pol = ERR_PTR(err);
  879. } else
  880. pol = NULL;
  881. }
  882. out:
  883. read_unlock_bh(&xfrm_policy_lock);
  884. return pol;
  885. }
  886. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  887. {
  888. struct net *net = xp_net(pol);
  889. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  890. pol->family, dir);
  891. list_add(&pol->walk.all, &net->xfrm.policy_all);
  892. hlist_add_head(&pol->bydst, chain);
  893. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  894. net->xfrm.policy_count[dir]++;
  895. xfrm_pol_hold(pol);
  896. if (xfrm_bydst_should_resize(net, dir, NULL))
  897. schedule_work(&net->xfrm.policy_hash_work);
  898. }
  899. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  900. int dir)
  901. {
  902. struct net *net = xp_net(pol);
  903. if (hlist_unhashed(&pol->bydst))
  904. return NULL;
  905. hlist_del(&pol->bydst);
  906. hlist_del(&pol->byidx);
  907. list_del(&pol->walk.all);
  908. net->xfrm.policy_count[dir]--;
  909. return pol;
  910. }
  911. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  912. {
  913. write_lock_bh(&xfrm_policy_lock);
  914. pol = __xfrm_policy_unlink(pol, dir);
  915. write_unlock_bh(&xfrm_policy_lock);
  916. if (pol) {
  917. xfrm_policy_kill(pol);
  918. return 0;
  919. }
  920. return -ENOENT;
  921. }
  922. EXPORT_SYMBOL(xfrm_policy_delete);
  923. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  924. {
  925. struct net *net = xp_net(pol);
  926. struct xfrm_policy *old_pol;
  927. #ifdef CONFIG_XFRM_SUB_POLICY
  928. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  929. return -EINVAL;
  930. #endif
  931. write_lock_bh(&xfrm_policy_lock);
  932. old_pol = sk->sk_policy[dir];
  933. sk->sk_policy[dir] = pol;
  934. if (pol) {
  935. pol->curlft.add_time = get_seconds();
  936. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
  937. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  938. }
  939. if (old_pol)
  940. /* Unlinking succeeds always. This is the only function
  941. * allowed to delete or replace socket policy.
  942. */
  943. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  944. write_unlock_bh(&xfrm_policy_lock);
  945. if (old_pol) {
  946. xfrm_policy_kill(old_pol);
  947. }
  948. return 0;
  949. }
  950. static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
  951. {
  952. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  953. if (newp) {
  954. newp->selector = old->selector;
  955. if (security_xfrm_policy_clone(old->security,
  956. &newp->security)) {
  957. kfree(newp);
  958. return NULL; /* ENOMEM */
  959. }
  960. newp->lft = old->lft;
  961. newp->curlft = old->curlft;
  962. newp->mark = old->mark;
  963. newp->action = old->action;
  964. newp->flags = old->flags;
  965. newp->xfrm_nr = old->xfrm_nr;
  966. newp->index = old->index;
  967. newp->type = old->type;
  968. memcpy(newp->xfrm_vec, old->xfrm_vec,
  969. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  970. write_lock_bh(&xfrm_policy_lock);
  971. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  972. write_unlock_bh(&xfrm_policy_lock);
  973. xfrm_pol_put(newp);
  974. }
  975. return newp;
  976. }
  977. int __xfrm_sk_clone_policy(struct sock *sk)
  978. {
  979. struct xfrm_policy *p0 = sk->sk_policy[0],
  980. *p1 = sk->sk_policy[1];
  981. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  982. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  983. return -ENOMEM;
  984. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  985. return -ENOMEM;
  986. return 0;
  987. }
  988. static int
  989. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  990. unsigned short family)
  991. {
  992. int err;
  993. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  994. if (unlikely(afinfo == NULL))
  995. return -EINVAL;
  996. err = afinfo->get_saddr(net, local, remote);
  997. xfrm_policy_put_afinfo(afinfo);
  998. return err;
  999. }
  1000. /* Resolve list of templates for the flow, given policy. */
  1001. static int
  1002. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
  1003. struct xfrm_state **xfrm, unsigned short family)
  1004. {
  1005. struct net *net = xp_net(policy);
  1006. int nx;
  1007. int i, error;
  1008. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1009. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1010. xfrm_address_t tmp;
  1011. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1012. struct xfrm_state *x;
  1013. xfrm_address_t *remote = daddr;
  1014. xfrm_address_t *local = saddr;
  1015. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1016. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1017. tmpl->mode == XFRM_MODE_BEET) {
  1018. remote = &tmpl->id.daddr;
  1019. local = &tmpl->saddr;
  1020. if (xfrm_addr_any(local, tmpl->encap_family)) {
  1021. error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
  1022. if (error)
  1023. goto fail;
  1024. local = &tmp;
  1025. }
  1026. }
  1027. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1028. if (x && x->km.state == XFRM_STATE_VALID) {
  1029. xfrm[nx++] = x;
  1030. daddr = remote;
  1031. saddr = local;
  1032. continue;
  1033. }
  1034. if (x) {
  1035. error = (x->km.state == XFRM_STATE_ERROR ?
  1036. -EINVAL : -EAGAIN);
  1037. xfrm_state_put(x);
  1038. }
  1039. else if (error == -ESRCH)
  1040. error = -EAGAIN;
  1041. if (!tmpl->optional)
  1042. goto fail;
  1043. }
  1044. return nx;
  1045. fail:
  1046. for (nx--; nx>=0; nx--)
  1047. xfrm_state_put(xfrm[nx]);
  1048. return error;
  1049. }
  1050. static int
  1051. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
  1052. struct xfrm_state **xfrm, unsigned short family)
  1053. {
  1054. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1055. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1056. int cnx = 0;
  1057. int error;
  1058. int ret;
  1059. int i;
  1060. for (i = 0; i < npols; i++) {
  1061. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1062. error = -ENOBUFS;
  1063. goto fail;
  1064. }
  1065. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1066. if (ret < 0) {
  1067. error = ret;
  1068. goto fail;
  1069. } else
  1070. cnx += ret;
  1071. }
  1072. /* found states are sorted for outbound processing */
  1073. if (npols > 1)
  1074. xfrm_state_sort(xfrm, tpp, cnx, family);
  1075. return cnx;
  1076. fail:
  1077. for (cnx--; cnx>=0; cnx--)
  1078. xfrm_state_put(tpp[cnx]);
  1079. return error;
  1080. }
  1081. /* Check that the bundle accepts the flow and its components are
  1082. * still valid.
  1083. */
  1084. static inline int xfrm_get_tos(const struct flowi *fl, int family)
  1085. {
  1086. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1087. int tos;
  1088. if (!afinfo)
  1089. return -EINVAL;
  1090. tos = afinfo->get_tos(fl);
  1091. xfrm_policy_put_afinfo(afinfo);
  1092. return tos;
  1093. }
  1094. static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
  1095. {
  1096. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1097. struct dst_entry *dst = &xdst->u.dst;
  1098. if (xdst->route == NULL) {
  1099. /* Dummy bundle - if it has xfrms we were not
  1100. * able to build bundle as template resolution failed.
  1101. * It means we need to try again resolving. */
  1102. if (xdst->num_xfrms > 0)
  1103. return NULL;
  1104. } else {
  1105. /* Real bundle */
  1106. if (stale_bundle(dst))
  1107. return NULL;
  1108. }
  1109. dst_hold(dst);
  1110. return flo;
  1111. }
  1112. static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
  1113. {
  1114. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1115. struct dst_entry *dst = &xdst->u.dst;
  1116. if (!xdst->route)
  1117. return 0;
  1118. if (stale_bundle(dst))
  1119. return 0;
  1120. return 1;
  1121. }
  1122. static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
  1123. {
  1124. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1125. struct dst_entry *dst = &xdst->u.dst;
  1126. dst_free(dst);
  1127. }
  1128. static const struct flow_cache_ops xfrm_bundle_fc_ops = {
  1129. .get = xfrm_bundle_flo_get,
  1130. .check = xfrm_bundle_flo_check,
  1131. .delete = xfrm_bundle_flo_delete,
  1132. };
  1133. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  1134. {
  1135. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1136. struct dst_ops *dst_ops;
  1137. struct xfrm_dst *xdst;
  1138. if (!afinfo)
  1139. return ERR_PTR(-EINVAL);
  1140. switch (family) {
  1141. case AF_INET:
  1142. dst_ops = &net->xfrm.xfrm4_dst_ops;
  1143. break;
  1144. #if IS_ENABLED(CONFIG_IPV6)
  1145. case AF_INET6:
  1146. dst_ops = &net->xfrm.xfrm6_dst_ops;
  1147. break;
  1148. #endif
  1149. default:
  1150. BUG();
  1151. }
  1152. xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
  1153. if (likely(xdst)) {
  1154. memset(&xdst->u.rt6.rt6i_table, 0,
  1155. sizeof(*xdst) - sizeof(struct dst_entry));
  1156. xdst->flo.ops = &xfrm_bundle_fc_ops;
  1157. } else
  1158. xdst = ERR_PTR(-ENOBUFS);
  1159. xfrm_policy_put_afinfo(afinfo);
  1160. return xdst;
  1161. }
  1162. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1163. int nfheader_len)
  1164. {
  1165. struct xfrm_policy_afinfo *afinfo =
  1166. xfrm_policy_get_afinfo(dst->ops->family);
  1167. int err;
  1168. if (!afinfo)
  1169. return -EINVAL;
  1170. err = afinfo->init_path(path, dst, nfheader_len);
  1171. xfrm_policy_put_afinfo(afinfo);
  1172. return err;
  1173. }
  1174. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  1175. const struct flowi *fl)
  1176. {
  1177. struct xfrm_policy_afinfo *afinfo =
  1178. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1179. int err;
  1180. if (!afinfo)
  1181. return -EINVAL;
  1182. err = afinfo->fill_dst(xdst, dev, fl);
  1183. xfrm_policy_put_afinfo(afinfo);
  1184. return err;
  1185. }
  1186. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1187. * all the metrics... Shortly, bundle a bundle.
  1188. */
  1189. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1190. struct xfrm_state **xfrm, int nx,
  1191. const struct flowi *fl,
  1192. struct dst_entry *dst)
  1193. {
  1194. struct net *net = xp_net(policy);
  1195. unsigned long now = jiffies;
  1196. struct net_device *dev;
  1197. struct xfrm_mode *inner_mode;
  1198. struct dst_entry *dst_prev = NULL;
  1199. struct dst_entry *dst0 = NULL;
  1200. int i = 0;
  1201. int err;
  1202. int header_len = 0;
  1203. int nfheader_len = 0;
  1204. int trailer_len = 0;
  1205. int tos;
  1206. int family = policy->selector.family;
  1207. xfrm_address_t saddr, daddr;
  1208. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1209. tos = xfrm_get_tos(fl, family);
  1210. err = tos;
  1211. if (tos < 0)
  1212. goto put_states;
  1213. dst_hold(dst);
  1214. for (; i < nx; i++) {
  1215. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  1216. struct dst_entry *dst1 = &xdst->u.dst;
  1217. err = PTR_ERR(xdst);
  1218. if (IS_ERR(xdst)) {
  1219. dst_release(dst);
  1220. goto put_states;
  1221. }
  1222. if (xfrm[i]->sel.family == AF_UNSPEC) {
  1223. inner_mode = xfrm_ip2inner_mode(xfrm[i],
  1224. xfrm_af2proto(family));
  1225. if (!inner_mode) {
  1226. err = -EAFNOSUPPORT;
  1227. dst_release(dst);
  1228. goto put_states;
  1229. }
  1230. } else
  1231. inner_mode = xfrm[i]->inner_mode;
  1232. if (!dst_prev)
  1233. dst0 = dst1;
  1234. else {
  1235. dst_prev->child = dst_clone(dst1);
  1236. dst1->flags |= DST_NOHASH;
  1237. }
  1238. xdst->route = dst;
  1239. dst_copy_metrics(dst1, dst);
  1240. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1241. family = xfrm[i]->props.family;
  1242. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1243. family);
  1244. err = PTR_ERR(dst);
  1245. if (IS_ERR(dst))
  1246. goto put_states;
  1247. } else
  1248. dst_hold(dst);
  1249. dst1->xfrm = xfrm[i];
  1250. xdst->xfrm_genid = xfrm[i]->genid;
  1251. dst1->obsolete = -1;
  1252. dst1->flags |= DST_HOST;
  1253. dst1->lastuse = now;
  1254. dst1->input = dst_discard;
  1255. dst1->output = inner_mode->afinfo->output;
  1256. dst1->next = dst_prev;
  1257. dst_prev = dst1;
  1258. header_len += xfrm[i]->props.header_len;
  1259. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1260. nfheader_len += xfrm[i]->props.header_len;
  1261. trailer_len += xfrm[i]->props.trailer_len;
  1262. }
  1263. dst_prev->child = dst;
  1264. dst0->path = dst;
  1265. err = -ENODEV;
  1266. dev = dst->dev;
  1267. if (!dev)
  1268. goto free_dst;
  1269. /* Copy neighbour for reachability confirmation */
  1270. dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
  1271. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1272. xfrm_init_pmtu(dst_prev);
  1273. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1274. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1275. err = xfrm_fill_dst(xdst, dev, fl);
  1276. if (err)
  1277. goto free_dst;
  1278. dst_prev->header_len = header_len;
  1279. dst_prev->trailer_len = trailer_len;
  1280. header_len -= xdst->u.dst.xfrm->props.header_len;
  1281. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1282. }
  1283. out:
  1284. return dst0;
  1285. put_states:
  1286. for (; i < nx; i++)
  1287. xfrm_state_put(xfrm[i]);
  1288. free_dst:
  1289. if (dst0)
  1290. dst_free(dst0);
  1291. dst0 = ERR_PTR(err);
  1292. goto out;
  1293. }
  1294. static int inline
  1295. xfrm_dst_alloc_copy(void **target, const void *src, int size)
  1296. {
  1297. if (!*target) {
  1298. *target = kmalloc(size, GFP_ATOMIC);
  1299. if (!*target)
  1300. return -ENOMEM;
  1301. }
  1302. memcpy(*target, src, size);
  1303. return 0;
  1304. }
  1305. static int inline
  1306. xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
  1307. {
  1308. #ifdef CONFIG_XFRM_SUB_POLICY
  1309. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1310. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1311. sel, sizeof(*sel));
  1312. #else
  1313. return 0;
  1314. #endif
  1315. }
  1316. static int inline
  1317. xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
  1318. {
  1319. #ifdef CONFIG_XFRM_SUB_POLICY
  1320. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1321. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1322. #else
  1323. return 0;
  1324. #endif
  1325. }
  1326. static int xfrm_expand_policies(const struct flowi *fl, u16 family,
  1327. struct xfrm_policy **pols,
  1328. int *num_pols, int *num_xfrms)
  1329. {
  1330. int i;
  1331. if (*num_pols == 0 || !pols[0]) {
  1332. *num_pols = 0;
  1333. *num_xfrms = 0;
  1334. return 0;
  1335. }
  1336. if (IS_ERR(pols[0]))
  1337. return PTR_ERR(pols[0]);
  1338. *num_xfrms = pols[0]->xfrm_nr;
  1339. #ifdef CONFIG_XFRM_SUB_POLICY
  1340. if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
  1341. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1342. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  1343. XFRM_POLICY_TYPE_MAIN,
  1344. fl, family,
  1345. XFRM_POLICY_OUT);
  1346. if (pols[1]) {
  1347. if (IS_ERR(pols[1])) {
  1348. xfrm_pols_put(pols, *num_pols);
  1349. return PTR_ERR(pols[1]);
  1350. }
  1351. (*num_pols) ++;
  1352. (*num_xfrms) += pols[1]->xfrm_nr;
  1353. }
  1354. }
  1355. #endif
  1356. for (i = 0; i < *num_pols; i++) {
  1357. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  1358. *num_xfrms = -1;
  1359. break;
  1360. }
  1361. }
  1362. return 0;
  1363. }
  1364. static struct xfrm_dst *
  1365. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  1366. const struct flowi *fl, u16 family,
  1367. struct dst_entry *dst_orig)
  1368. {
  1369. struct net *net = xp_net(pols[0]);
  1370. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1371. struct dst_entry *dst;
  1372. struct xfrm_dst *xdst;
  1373. int err;
  1374. /* Try to instantiate a bundle */
  1375. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  1376. if (err <= 0) {
  1377. if (err != 0 && err != -EAGAIN)
  1378. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1379. return ERR_PTR(err);
  1380. }
  1381. dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
  1382. if (IS_ERR(dst)) {
  1383. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1384. return ERR_CAST(dst);
  1385. }
  1386. xdst = (struct xfrm_dst *)dst;
  1387. xdst->num_xfrms = err;
  1388. if (num_pols > 1)
  1389. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1390. else
  1391. err = xfrm_dst_update_origin(dst, fl);
  1392. if (unlikely(err)) {
  1393. dst_free(dst);
  1394. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1395. return ERR_PTR(err);
  1396. }
  1397. xdst->num_pols = num_pols;
  1398. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1399. xdst->policy_genid = atomic_read(&pols[0]->genid);
  1400. return xdst;
  1401. }
  1402. static struct flow_cache_object *
  1403. xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
  1404. struct flow_cache_object *oldflo, void *ctx)
  1405. {
  1406. struct dst_entry *dst_orig = (struct dst_entry *)ctx;
  1407. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1408. struct xfrm_dst *xdst, *new_xdst;
  1409. int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
  1410. /* Check if the policies from old bundle are usable */
  1411. xdst = NULL;
  1412. if (oldflo) {
  1413. xdst = container_of(oldflo, struct xfrm_dst, flo);
  1414. num_pols = xdst->num_pols;
  1415. num_xfrms = xdst->num_xfrms;
  1416. pol_dead = 0;
  1417. for (i = 0; i < num_pols; i++) {
  1418. pols[i] = xdst->pols[i];
  1419. pol_dead |= pols[i]->walk.dead;
  1420. }
  1421. if (pol_dead) {
  1422. dst_free(&xdst->u.dst);
  1423. xdst = NULL;
  1424. num_pols = 0;
  1425. num_xfrms = 0;
  1426. oldflo = NULL;
  1427. }
  1428. }
  1429. /* Resolve policies to use if we couldn't get them from
  1430. * previous cache entry */
  1431. if (xdst == NULL) {
  1432. num_pols = 1;
  1433. pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
  1434. err = xfrm_expand_policies(fl, family, pols,
  1435. &num_pols, &num_xfrms);
  1436. if (err < 0)
  1437. goto inc_error;
  1438. if (num_pols == 0)
  1439. return NULL;
  1440. if (num_xfrms <= 0)
  1441. goto make_dummy_bundle;
  1442. }
  1443. new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
  1444. if (IS_ERR(new_xdst)) {
  1445. err = PTR_ERR(new_xdst);
  1446. if (err != -EAGAIN)
  1447. goto error;
  1448. if (oldflo == NULL)
  1449. goto make_dummy_bundle;
  1450. dst_hold(&xdst->u.dst);
  1451. return oldflo;
  1452. } else if (new_xdst == NULL) {
  1453. num_xfrms = 0;
  1454. if (oldflo == NULL)
  1455. goto make_dummy_bundle;
  1456. xdst->num_xfrms = 0;
  1457. dst_hold(&xdst->u.dst);
  1458. return oldflo;
  1459. }
  1460. /* Kill the previous bundle */
  1461. if (xdst) {
  1462. /* The policies were stolen for newly generated bundle */
  1463. xdst->num_pols = 0;
  1464. dst_free(&xdst->u.dst);
  1465. }
  1466. /* Flow cache does not have reference, it dst_free()'s,
  1467. * but we do need to return one reference for original caller */
  1468. dst_hold(&new_xdst->u.dst);
  1469. return &new_xdst->flo;
  1470. make_dummy_bundle:
  1471. /* We found policies, but there's no bundles to instantiate:
  1472. * either because the policy blocks, has no transformations or
  1473. * we could not build template (no xfrm_states).*/
  1474. xdst = xfrm_alloc_dst(net, family);
  1475. if (IS_ERR(xdst)) {
  1476. xfrm_pols_put(pols, num_pols);
  1477. return ERR_CAST(xdst);
  1478. }
  1479. xdst->num_pols = num_pols;
  1480. xdst->num_xfrms = num_xfrms;
  1481. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1482. dst_hold(&xdst->u.dst);
  1483. return &xdst->flo;
  1484. inc_error:
  1485. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1486. error:
  1487. if (xdst != NULL)
  1488. dst_free(&xdst->u.dst);
  1489. else
  1490. xfrm_pols_put(pols, num_pols);
  1491. return ERR_PTR(err);
  1492. }
  1493. static struct dst_entry *make_blackhole(struct net *net, u16 family,
  1494. struct dst_entry *dst_orig)
  1495. {
  1496. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1497. struct dst_entry *ret;
  1498. if (!afinfo) {
  1499. dst_release(dst_orig);
  1500. ret = ERR_PTR(-EINVAL);
  1501. } else {
  1502. ret = afinfo->blackhole_route(net, dst_orig);
  1503. }
  1504. xfrm_policy_put_afinfo(afinfo);
  1505. return ret;
  1506. }
  1507. /* Main function: finds/creates a bundle for given flow.
  1508. *
  1509. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1510. * on interfaces with disabled IPsec.
  1511. */
  1512. struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
  1513. const struct flowi *fl,
  1514. struct sock *sk, int flags)
  1515. {
  1516. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1517. struct flow_cache_object *flo;
  1518. struct xfrm_dst *xdst;
  1519. struct dst_entry *dst, *route;
  1520. u16 family = dst_orig->ops->family;
  1521. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1522. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  1523. restart:
  1524. dst = NULL;
  1525. xdst = NULL;
  1526. route = NULL;
  1527. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1528. num_pols = 1;
  1529. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1530. err = xfrm_expand_policies(fl, family, pols,
  1531. &num_pols, &num_xfrms);
  1532. if (err < 0)
  1533. goto dropdst;
  1534. if (num_pols) {
  1535. if (num_xfrms <= 0) {
  1536. drop_pols = num_pols;
  1537. goto no_transform;
  1538. }
  1539. xdst = xfrm_resolve_and_create_bundle(
  1540. pols, num_pols, fl,
  1541. family, dst_orig);
  1542. if (IS_ERR(xdst)) {
  1543. xfrm_pols_put(pols, num_pols);
  1544. err = PTR_ERR(xdst);
  1545. goto dropdst;
  1546. } else if (xdst == NULL) {
  1547. num_xfrms = 0;
  1548. drop_pols = num_pols;
  1549. goto no_transform;
  1550. }
  1551. dst_hold(&xdst->u.dst);
  1552. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1553. xdst->u.dst.next = xfrm_policy_sk_bundles;
  1554. xfrm_policy_sk_bundles = &xdst->u.dst;
  1555. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1556. route = xdst->route;
  1557. }
  1558. }
  1559. if (xdst == NULL) {
  1560. /* To accelerate a bit... */
  1561. if ((dst_orig->flags & DST_NOXFRM) ||
  1562. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1563. goto nopol;
  1564. flo = flow_cache_lookup(net, fl, family, dir,
  1565. xfrm_bundle_lookup, dst_orig);
  1566. if (flo == NULL)
  1567. goto nopol;
  1568. if (IS_ERR(flo)) {
  1569. err = PTR_ERR(flo);
  1570. goto dropdst;
  1571. }
  1572. xdst = container_of(flo, struct xfrm_dst, flo);
  1573. num_pols = xdst->num_pols;
  1574. num_xfrms = xdst->num_xfrms;
  1575. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
  1576. route = xdst->route;
  1577. }
  1578. dst = &xdst->u.dst;
  1579. if (route == NULL && num_xfrms > 0) {
  1580. /* The only case when xfrm_bundle_lookup() returns a
  1581. * bundle with null route, is when the template could
  1582. * not be resolved. It means policies are there, but
  1583. * bundle could not be created, since we don't yet
  1584. * have the xfrm_state's. We need to wait for KM to
  1585. * negotiate new SA's or bail out with error.*/
  1586. if (net->xfrm.sysctl_larval_drop) {
  1587. /* EREMOTE tells the caller to generate
  1588. * a one-shot blackhole route. */
  1589. dst_release(dst);
  1590. xfrm_pols_put(pols, drop_pols);
  1591. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1592. return make_blackhole(net, family, dst_orig);
  1593. }
  1594. if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
  1595. DECLARE_WAITQUEUE(wait, current);
  1596. add_wait_queue(&net->xfrm.km_waitq, &wait);
  1597. set_current_state(TASK_INTERRUPTIBLE);
  1598. schedule();
  1599. set_current_state(TASK_RUNNING);
  1600. remove_wait_queue(&net->xfrm.km_waitq, &wait);
  1601. if (!signal_pending(current)) {
  1602. dst_release(dst);
  1603. goto restart;
  1604. }
  1605. err = -ERESTART;
  1606. } else
  1607. err = -EAGAIN;
  1608. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1609. goto error;
  1610. }
  1611. no_transform:
  1612. if (num_pols == 0)
  1613. goto nopol;
  1614. if ((flags & XFRM_LOOKUP_ICMP) &&
  1615. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  1616. err = -ENOENT;
  1617. goto error;
  1618. }
  1619. for (i = 0; i < num_pols; i++)
  1620. pols[i]->curlft.use_time = get_seconds();
  1621. if (num_xfrms < 0) {
  1622. /* Prohibit the flow */
  1623. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1624. err = -EPERM;
  1625. goto error;
  1626. } else if (num_xfrms > 0) {
  1627. /* Flow transformed */
  1628. dst_release(dst_orig);
  1629. } else {
  1630. /* Flow passes untransformed */
  1631. dst_release(dst);
  1632. dst = dst_orig;
  1633. }
  1634. ok:
  1635. xfrm_pols_put(pols, drop_pols);
  1636. if (dst && dst->xfrm &&
  1637. dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
  1638. dst->flags |= DST_XFRM_TUNNEL;
  1639. return dst;
  1640. nopol:
  1641. if (!(flags & XFRM_LOOKUP_ICMP)) {
  1642. dst = dst_orig;
  1643. goto ok;
  1644. }
  1645. err = -ENOENT;
  1646. error:
  1647. dst_release(dst);
  1648. dropdst:
  1649. dst_release(dst_orig);
  1650. xfrm_pols_put(pols, drop_pols);
  1651. return ERR_PTR(err);
  1652. }
  1653. EXPORT_SYMBOL(xfrm_lookup);
  1654. static inline int
  1655. xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
  1656. {
  1657. struct xfrm_state *x;
  1658. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1659. return 0;
  1660. x = skb->sp->xvec[idx];
  1661. if (!x->type->reject)
  1662. return 0;
  1663. return x->type->reject(x, skb, fl);
  1664. }
  1665. /* When skb is transformed back to its "native" form, we have to
  1666. * check policy restrictions. At the moment we make this in maximally
  1667. * stupid way. Shame on me. :-) Of course, connected sockets must
  1668. * have policy cached at them.
  1669. */
  1670. static inline int
  1671. xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  1672. unsigned short family)
  1673. {
  1674. if (xfrm_state_kern(x))
  1675. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1676. return x->id.proto == tmpl->id.proto &&
  1677. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1678. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1679. x->props.mode == tmpl->mode &&
  1680. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1681. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1682. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1683. xfrm_state_addr_cmp(tmpl, x, family));
  1684. }
  1685. /*
  1686. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1687. * because of optional transport mode, or next index of the mathced secpath
  1688. * state with the template.
  1689. * -1 is returned when no matching template is found.
  1690. * Otherwise "-2 - errored_index" is returned.
  1691. */
  1692. static inline int
  1693. xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
  1694. unsigned short family)
  1695. {
  1696. int idx = start;
  1697. if (tmpl->optional) {
  1698. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1699. return start;
  1700. } else
  1701. start = -1;
  1702. for (; idx < sp->len; idx++) {
  1703. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1704. return ++idx;
  1705. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1706. if (start == -1)
  1707. start = -2-idx;
  1708. break;
  1709. }
  1710. }
  1711. return start;
  1712. }
  1713. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1714. unsigned int family, int reverse)
  1715. {
  1716. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1717. int err;
  1718. if (unlikely(afinfo == NULL))
  1719. return -EAFNOSUPPORT;
  1720. afinfo->decode_session(skb, fl, reverse);
  1721. err = security_xfrm_decode_session(skb, &fl->flowi_secid);
  1722. xfrm_policy_put_afinfo(afinfo);
  1723. return err;
  1724. }
  1725. EXPORT_SYMBOL(__xfrm_decode_session);
  1726. static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
  1727. {
  1728. for (; k < sp->len; k++) {
  1729. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1730. *idxp = k;
  1731. return 1;
  1732. }
  1733. }
  1734. return 0;
  1735. }
  1736. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1737. unsigned short family)
  1738. {
  1739. struct net *net = dev_net(skb->dev);
  1740. struct xfrm_policy *pol;
  1741. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1742. int npols = 0;
  1743. int xfrm_nr;
  1744. int pi;
  1745. int reverse;
  1746. struct flowi fl;
  1747. u8 fl_dir;
  1748. int xerr_idx = -1;
  1749. reverse = dir & ~XFRM_POLICY_MASK;
  1750. dir &= XFRM_POLICY_MASK;
  1751. fl_dir = policy_to_flow_dir(dir);
  1752. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1753. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  1754. return 0;
  1755. }
  1756. nf_nat_decode_session(skb, &fl, family);
  1757. /* First, check used SA against their selectors. */
  1758. if (skb->sp) {
  1759. int i;
  1760. for (i=skb->sp->len-1; i>=0; i--) {
  1761. struct xfrm_state *x = skb->sp->xvec[i];
  1762. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1763. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  1764. return 0;
  1765. }
  1766. }
  1767. }
  1768. pol = NULL;
  1769. if (sk && sk->sk_policy[dir]) {
  1770. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1771. if (IS_ERR(pol)) {
  1772. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1773. return 0;
  1774. }
  1775. }
  1776. if (!pol) {
  1777. struct flow_cache_object *flo;
  1778. flo = flow_cache_lookup(net, &fl, family, fl_dir,
  1779. xfrm_policy_lookup, NULL);
  1780. if (IS_ERR_OR_NULL(flo))
  1781. pol = ERR_CAST(flo);
  1782. else
  1783. pol = container_of(flo, struct xfrm_policy, flo);
  1784. }
  1785. if (IS_ERR(pol)) {
  1786. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1787. return 0;
  1788. }
  1789. if (!pol) {
  1790. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1791. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1792. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  1793. return 0;
  1794. }
  1795. return 1;
  1796. }
  1797. pol->curlft.use_time = get_seconds();
  1798. pols[0] = pol;
  1799. npols ++;
  1800. #ifdef CONFIG_XFRM_SUB_POLICY
  1801. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1802. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1803. &fl, family,
  1804. XFRM_POLICY_IN);
  1805. if (pols[1]) {
  1806. if (IS_ERR(pols[1])) {
  1807. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1808. return 0;
  1809. }
  1810. pols[1]->curlft.use_time = get_seconds();
  1811. npols ++;
  1812. }
  1813. }
  1814. #endif
  1815. if (pol->action == XFRM_POLICY_ALLOW) {
  1816. struct sec_path *sp;
  1817. static struct sec_path dummy;
  1818. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1819. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1820. struct xfrm_tmpl **tpp = tp;
  1821. int ti = 0;
  1822. int i, k;
  1823. if ((sp = skb->sp) == NULL)
  1824. sp = &dummy;
  1825. for (pi = 0; pi < npols; pi++) {
  1826. if (pols[pi] != pol &&
  1827. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1828. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1829. goto reject;
  1830. }
  1831. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1832. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  1833. goto reject_error;
  1834. }
  1835. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1836. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1837. }
  1838. xfrm_nr = ti;
  1839. if (npols > 1) {
  1840. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1841. tpp = stp;
  1842. }
  1843. /* For each tunnel xfrm, find the first matching tmpl.
  1844. * For each tmpl before that, find corresponding xfrm.
  1845. * Order is _important_. Later we will implement
  1846. * some barriers, but at the moment barriers
  1847. * are implied between each two transformations.
  1848. */
  1849. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1850. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1851. if (k < 0) {
  1852. if (k < -1)
  1853. /* "-2 - errored_index" returned */
  1854. xerr_idx = -(2+k);
  1855. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1856. goto reject;
  1857. }
  1858. }
  1859. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1860. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1861. goto reject;
  1862. }
  1863. xfrm_pols_put(pols, npols);
  1864. return 1;
  1865. }
  1866. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1867. reject:
  1868. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1869. reject_error:
  1870. xfrm_pols_put(pols, npols);
  1871. return 0;
  1872. }
  1873. EXPORT_SYMBOL(__xfrm_policy_check);
  1874. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1875. {
  1876. struct net *net = dev_net(skb->dev);
  1877. struct flowi fl;
  1878. struct dst_entry *dst;
  1879. int res = 1;
  1880. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1881. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  1882. return 0;
  1883. }
  1884. skb_dst_force(skb);
  1885. dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
  1886. if (IS_ERR(dst)) {
  1887. res = 0;
  1888. dst = NULL;
  1889. }
  1890. skb_dst_set(skb, dst);
  1891. return res;
  1892. }
  1893. EXPORT_SYMBOL(__xfrm_route_forward);
  1894. /* Optimize later using cookies and generation ids. */
  1895. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1896. {
  1897. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1898. * to "-1" to force all XFRM destinations to get validated by
  1899. * dst_ops->check on every use. We do this because when a
  1900. * normal route referenced by an XFRM dst is obsoleted we do
  1901. * not go looking around for all parent referencing XFRM dsts
  1902. * so that we can invalidate them. It is just too much work.
  1903. * Instead we make the checks here on every use. For example:
  1904. *
  1905. * XFRM dst A --> IPv4 dst X
  1906. *
  1907. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1908. * in this example). If X is marked obsolete, "A" will not
  1909. * notice. That's what we are validating here via the
  1910. * stale_bundle() check.
  1911. *
  1912. * When a policy's bundle is pruned, we dst_free() the XFRM
  1913. * dst which causes it's ->obsolete field to be set to a
  1914. * positive non-zero integer. If an XFRM dst has been pruned
  1915. * like this, we want to force a new route lookup.
  1916. */
  1917. if (dst->obsolete < 0 && !stale_bundle(dst))
  1918. return dst;
  1919. return NULL;
  1920. }
  1921. static int stale_bundle(struct dst_entry *dst)
  1922. {
  1923. return !xfrm_bundle_ok((struct xfrm_dst *)dst);
  1924. }
  1925. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1926. {
  1927. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1928. dst->dev = dev_net(dev)->loopback_dev;
  1929. dev_hold(dst->dev);
  1930. dev_put(dev);
  1931. }
  1932. }
  1933. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1934. static void xfrm_link_failure(struct sk_buff *skb)
  1935. {
  1936. /* Impossible. Such dst must be popped before reaches point of failure. */
  1937. }
  1938. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1939. {
  1940. if (dst) {
  1941. if (dst->obsolete) {
  1942. dst_release(dst);
  1943. dst = NULL;
  1944. }
  1945. }
  1946. return dst;
  1947. }
  1948. static void __xfrm_garbage_collect(struct net *net)
  1949. {
  1950. struct dst_entry *head, *next;
  1951. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1952. head = xfrm_policy_sk_bundles;
  1953. xfrm_policy_sk_bundles = NULL;
  1954. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1955. while (head) {
  1956. next = head->next;
  1957. dst_free(head);
  1958. head = next;
  1959. }
  1960. }
  1961. static void xfrm_garbage_collect(struct net *net)
  1962. {
  1963. flow_cache_flush();
  1964. __xfrm_garbage_collect(net);
  1965. }
  1966. static void xfrm_garbage_collect_deferred(struct net *net)
  1967. {
  1968. flow_cache_flush_deferred();
  1969. __xfrm_garbage_collect(net);
  1970. }
  1971. static void xfrm_init_pmtu(struct dst_entry *dst)
  1972. {
  1973. do {
  1974. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1975. u32 pmtu, route_mtu_cached;
  1976. pmtu = dst_mtu(dst->child);
  1977. xdst->child_mtu_cached = pmtu;
  1978. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1979. route_mtu_cached = dst_mtu(xdst->route);
  1980. xdst->route_mtu_cached = route_mtu_cached;
  1981. if (pmtu > route_mtu_cached)
  1982. pmtu = route_mtu_cached;
  1983. dst_metric_set(dst, RTAX_MTU, pmtu);
  1984. } while ((dst = dst->next));
  1985. }
  1986. /* Check that the bundle accepts the flow and its components are
  1987. * still valid.
  1988. */
  1989. static int xfrm_bundle_ok(struct xfrm_dst *first)
  1990. {
  1991. struct dst_entry *dst = &first->u.dst;
  1992. struct xfrm_dst *last;
  1993. u32 mtu;
  1994. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1995. (dst->dev && !netif_running(dst->dev)))
  1996. return 0;
  1997. last = NULL;
  1998. do {
  1999. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  2000. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  2001. return 0;
  2002. if (xdst->xfrm_genid != dst->xfrm->genid)
  2003. return 0;
  2004. if (xdst->num_pols > 0 &&
  2005. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  2006. return 0;
  2007. mtu = dst_mtu(dst->child);
  2008. if (xdst->child_mtu_cached != mtu) {
  2009. last = xdst;
  2010. xdst->child_mtu_cached = mtu;
  2011. }
  2012. if (!dst_check(xdst->route, xdst->route_cookie))
  2013. return 0;
  2014. mtu = dst_mtu(xdst->route);
  2015. if (xdst->route_mtu_cached != mtu) {
  2016. last = xdst;
  2017. xdst->route_mtu_cached = mtu;
  2018. }
  2019. dst = dst->child;
  2020. } while (dst->xfrm);
  2021. if (likely(!last))
  2022. return 1;
  2023. mtu = last->child_mtu_cached;
  2024. for (;;) {
  2025. dst = &last->u.dst;
  2026. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  2027. if (mtu > last->route_mtu_cached)
  2028. mtu = last->route_mtu_cached;
  2029. dst_metric_set(dst, RTAX_MTU, mtu);
  2030. if (last == first)
  2031. break;
  2032. last = (struct xfrm_dst *)last->u.dst.next;
  2033. last->child_mtu_cached = mtu;
  2034. }
  2035. return 1;
  2036. }
  2037. static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  2038. {
  2039. return dst_metric_advmss(dst->path);
  2040. }
  2041. static unsigned int xfrm_mtu(const struct dst_entry *dst)
  2042. {
  2043. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  2044. return mtu ? : dst_mtu(dst->path);
  2045. }
  2046. static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
  2047. struct sk_buff *skb,
  2048. const void *daddr)
  2049. {
  2050. return dst->path->ops->neigh_lookup(dst, skb, daddr);
  2051. }
  2052. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  2053. {
  2054. struct net *net;
  2055. int err = 0;
  2056. if (unlikely(afinfo == NULL))
  2057. return -EINVAL;
  2058. if (unlikely(afinfo->family >= NPROTO))
  2059. return -EAFNOSUPPORT;
  2060. write_lock_bh(&xfrm_policy_afinfo_lock);
  2061. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  2062. err = -ENOBUFS;
  2063. else {
  2064. struct dst_ops *dst_ops = afinfo->dst_ops;
  2065. if (likely(dst_ops->kmem_cachep == NULL))
  2066. dst_ops->kmem_cachep = xfrm_dst_cache;
  2067. if (likely(dst_ops->check == NULL))
  2068. dst_ops->check = xfrm_dst_check;
  2069. if (likely(dst_ops->default_advmss == NULL))
  2070. dst_ops->default_advmss = xfrm_default_advmss;
  2071. if (likely(dst_ops->mtu == NULL))
  2072. dst_ops->mtu = xfrm_mtu;
  2073. if (likely(dst_ops->negative_advice == NULL))
  2074. dst_ops->negative_advice = xfrm_negative_advice;
  2075. if (likely(dst_ops->link_failure == NULL))
  2076. dst_ops->link_failure = xfrm_link_failure;
  2077. if (likely(dst_ops->neigh_lookup == NULL))
  2078. dst_ops->neigh_lookup = xfrm_neigh_lookup;
  2079. if (likely(afinfo->garbage_collect == NULL))
  2080. afinfo->garbage_collect = xfrm_garbage_collect_deferred;
  2081. xfrm_policy_afinfo[afinfo->family] = afinfo;
  2082. }
  2083. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2084. rtnl_lock();
  2085. for_each_net(net) {
  2086. struct dst_ops *xfrm_dst_ops;
  2087. switch (afinfo->family) {
  2088. case AF_INET:
  2089. xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
  2090. break;
  2091. #if IS_ENABLED(CONFIG_IPV6)
  2092. case AF_INET6:
  2093. xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
  2094. break;
  2095. #endif
  2096. default:
  2097. BUG();
  2098. }
  2099. *xfrm_dst_ops = *afinfo->dst_ops;
  2100. }
  2101. rtnl_unlock();
  2102. return err;
  2103. }
  2104. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  2105. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  2106. {
  2107. int err = 0;
  2108. if (unlikely(afinfo == NULL))
  2109. return -EINVAL;
  2110. if (unlikely(afinfo->family >= NPROTO))
  2111. return -EAFNOSUPPORT;
  2112. write_lock_bh(&xfrm_policy_afinfo_lock);
  2113. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  2114. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  2115. err = -EINVAL;
  2116. else {
  2117. struct dst_ops *dst_ops = afinfo->dst_ops;
  2118. xfrm_policy_afinfo[afinfo->family] = NULL;
  2119. dst_ops->kmem_cachep = NULL;
  2120. dst_ops->check = NULL;
  2121. dst_ops->negative_advice = NULL;
  2122. dst_ops->link_failure = NULL;
  2123. afinfo->garbage_collect = NULL;
  2124. }
  2125. }
  2126. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2127. return err;
  2128. }
  2129. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2130. static void __net_init xfrm_dst_ops_init(struct net *net)
  2131. {
  2132. struct xfrm_policy_afinfo *afinfo;
  2133. read_lock_bh(&xfrm_policy_afinfo_lock);
  2134. afinfo = xfrm_policy_afinfo[AF_INET];
  2135. if (afinfo)
  2136. net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
  2137. #if IS_ENABLED(CONFIG_IPV6)
  2138. afinfo = xfrm_policy_afinfo[AF_INET6];
  2139. if (afinfo)
  2140. net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
  2141. #endif
  2142. read_unlock_bh(&xfrm_policy_afinfo_lock);
  2143. }
  2144. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  2145. {
  2146. struct xfrm_policy_afinfo *afinfo;
  2147. if (unlikely(family >= NPROTO))
  2148. return NULL;
  2149. read_lock(&xfrm_policy_afinfo_lock);
  2150. afinfo = xfrm_policy_afinfo[family];
  2151. if (unlikely(!afinfo))
  2152. read_unlock(&xfrm_policy_afinfo_lock);
  2153. return afinfo;
  2154. }
  2155. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2156. {
  2157. read_unlock(&xfrm_policy_afinfo_lock);
  2158. }
  2159. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2160. {
  2161. struct net_device *dev = ptr;
  2162. switch (event) {
  2163. case NETDEV_DOWN:
  2164. xfrm_garbage_collect(dev_net(dev));
  2165. }
  2166. return NOTIFY_DONE;
  2167. }
  2168. static struct notifier_block xfrm_dev_notifier = {
  2169. .notifier_call = xfrm_dev_event,
  2170. };
  2171. #ifdef CONFIG_XFRM_STATISTICS
  2172. static int __net_init xfrm_statistics_init(struct net *net)
  2173. {
  2174. int rv;
  2175. if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
  2176. sizeof(struct linux_xfrm_mib),
  2177. __alignof__(struct linux_xfrm_mib)) < 0)
  2178. return -ENOMEM;
  2179. rv = xfrm_proc_init(net);
  2180. if (rv < 0)
  2181. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2182. return rv;
  2183. }
  2184. static void xfrm_statistics_fini(struct net *net)
  2185. {
  2186. xfrm_proc_fini(net);
  2187. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2188. }
  2189. #else
  2190. static int __net_init xfrm_statistics_init(struct net *net)
  2191. {
  2192. return 0;
  2193. }
  2194. static void xfrm_statistics_fini(struct net *net)
  2195. {
  2196. }
  2197. #endif
  2198. static int __net_init xfrm_policy_init(struct net *net)
  2199. {
  2200. unsigned int hmask, sz;
  2201. int dir;
  2202. if (net_eq(net, &init_net))
  2203. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2204. sizeof(struct xfrm_dst),
  2205. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2206. NULL);
  2207. hmask = 8 - 1;
  2208. sz = (hmask+1) * sizeof(struct hlist_head);
  2209. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2210. if (!net->xfrm.policy_byidx)
  2211. goto out_byidx;
  2212. net->xfrm.policy_idx_hmask = hmask;
  2213. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2214. struct xfrm_policy_hash *htab;
  2215. net->xfrm.policy_count[dir] = 0;
  2216. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2217. htab = &net->xfrm.policy_bydst[dir];
  2218. htab->table = xfrm_hash_alloc(sz);
  2219. if (!htab->table)
  2220. goto out_bydst;
  2221. htab->hmask = hmask;
  2222. }
  2223. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2224. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2225. if (net_eq(net, &init_net))
  2226. register_netdevice_notifier(&xfrm_dev_notifier);
  2227. return 0;
  2228. out_bydst:
  2229. for (dir--; dir >= 0; dir--) {
  2230. struct xfrm_policy_hash *htab;
  2231. htab = &net->xfrm.policy_bydst[dir];
  2232. xfrm_hash_free(htab->table, sz);
  2233. }
  2234. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2235. out_byidx:
  2236. return -ENOMEM;
  2237. }
  2238. static void xfrm_policy_fini(struct net *net)
  2239. {
  2240. struct xfrm_audit audit_info;
  2241. unsigned int sz;
  2242. int dir;
  2243. flush_work(&net->xfrm.policy_hash_work);
  2244. #ifdef CONFIG_XFRM_SUB_POLICY
  2245. audit_info.loginuid = -1;
  2246. audit_info.sessionid = -1;
  2247. audit_info.secid = 0;
  2248. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
  2249. #endif
  2250. audit_info.loginuid = -1;
  2251. audit_info.sessionid = -1;
  2252. audit_info.secid = 0;
  2253. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
  2254. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2255. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2256. struct xfrm_policy_hash *htab;
  2257. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2258. htab = &net->xfrm.policy_bydst[dir];
  2259. sz = (htab->hmask + 1);
  2260. WARN_ON(!hlist_empty(htab->table));
  2261. xfrm_hash_free(htab->table, sz);
  2262. }
  2263. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2264. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2265. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2266. }
  2267. static int __net_init xfrm_net_init(struct net *net)
  2268. {
  2269. int rv;
  2270. rv = xfrm_statistics_init(net);
  2271. if (rv < 0)
  2272. goto out_statistics;
  2273. rv = xfrm_state_init(net);
  2274. if (rv < 0)
  2275. goto out_state;
  2276. rv = xfrm_policy_init(net);
  2277. if (rv < 0)
  2278. goto out_policy;
  2279. xfrm_dst_ops_init(net);
  2280. rv = xfrm_sysctl_init(net);
  2281. if (rv < 0)
  2282. goto out_sysctl;
  2283. return 0;
  2284. out_sysctl:
  2285. xfrm_policy_fini(net);
  2286. out_policy:
  2287. xfrm_state_fini(net);
  2288. out_state:
  2289. xfrm_statistics_fini(net);
  2290. out_statistics:
  2291. return rv;
  2292. }
  2293. static void __net_exit xfrm_net_exit(struct net *net)
  2294. {
  2295. xfrm_sysctl_fini(net);
  2296. xfrm_policy_fini(net);
  2297. xfrm_state_fini(net);
  2298. xfrm_statistics_fini(net);
  2299. }
  2300. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2301. .init = xfrm_net_init,
  2302. .exit = xfrm_net_exit,
  2303. };
  2304. void __init xfrm_init(void)
  2305. {
  2306. register_pernet_subsys(&xfrm_net_ops);
  2307. xfrm_input_init();
  2308. }
  2309. #ifdef CONFIG_AUDITSYSCALL
  2310. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2311. struct audit_buffer *audit_buf)
  2312. {
  2313. struct xfrm_sec_ctx *ctx = xp->security;
  2314. struct xfrm_selector *sel = &xp->selector;
  2315. if (ctx)
  2316. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2317. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2318. switch(sel->family) {
  2319. case AF_INET:
  2320. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2321. if (sel->prefixlen_s != 32)
  2322. audit_log_format(audit_buf, " src_prefixlen=%d",
  2323. sel->prefixlen_s);
  2324. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2325. if (sel->prefixlen_d != 32)
  2326. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2327. sel->prefixlen_d);
  2328. break;
  2329. case AF_INET6:
  2330. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2331. if (sel->prefixlen_s != 128)
  2332. audit_log_format(audit_buf, " src_prefixlen=%d",
  2333. sel->prefixlen_s);
  2334. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2335. if (sel->prefixlen_d != 128)
  2336. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2337. sel->prefixlen_d);
  2338. break;
  2339. }
  2340. }
  2341. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2342. uid_t auid, u32 sessionid, u32 secid)
  2343. {
  2344. struct audit_buffer *audit_buf;
  2345. audit_buf = xfrm_audit_start("SPD-add");
  2346. if (audit_buf == NULL)
  2347. return;
  2348. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2349. audit_log_format(audit_buf, " res=%u", result);
  2350. xfrm_audit_common_policyinfo(xp, audit_buf);
  2351. audit_log_end(audit_buf);
  2352. }
  2353. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2354. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2355. uid_t auid, u32 sessionid, u32 secid)
  2356. {
  2357. struct audit_buffer *audit_buf;
  2358. audit_buf = xfrm_audit_start("SPD-delete");
  2359. if (audit_buf == NULL)
  2360. return;
  2361. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2362. audit_log_format(audit_buf, " res=%u", result);
  2363. xfrm_audit_common_policyinfo(xp, audit_buf);
  2364. audit_log_end(audit_buf);
  2365. }
  2366. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2367. #endif
  2368. #ifdef CONFIG_XFRM_MIGRATE
  2369. static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
  2370. const struct xfrm_selector *sel_tgt)
  2371. {
  2372. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2373. if (sel_tgt->family == sel_cmp->family &&
  2374. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2375. sel_cmp->family) == 0 &&
  2376. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2377. sel_cmp->family) == 0 &&
  2378. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2379. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2380. return true;
  2381. }
  2382. } else {
  2383. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2384. return true;
  2385. }
  2386. }
  2387. return false;
  2388. }
  2389. static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
  2390. u8 dir, u8 type)
  2391. {
  2392. struct xfrm_policy *pol, *ret = NULL;
  2393. struct hlist_node *entry;
  2394. struct hlist_head *chain;
  2395. u32 priority = ~0U;
  2396. read_lock_bh(&xfrm_policy_lock);
  2397. chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
  2398. hlist_for_each_entry(pol, entry, chain, bydst) {
  2399. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2400. pol->type == type) {
  2401. ret = pol;
  2402. priority = ret->priority;
  2403. break;
  2404. }
  2405. }
  2406. chain = &init_net.xfrm.policy_inexact[dir];
  2407. hlist_for_each_entry(pol, entry, chain, bydst) {
  2408. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2409. pol->type == type &&
  2410. pol->priority < priority) {
  2411. ret = pol;
  2412. break;
  2413. }
  2414. }
  2415. if (ret)
  2416. xfrm_pol_hold(ret);
  2417. read_unlock_bh(&xfrm_policy_lock);
  2418. return ret;
  2419. }
  2420. static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
  2421. {
  2422. int match = 0;
  2423. if (t->mode == m->mode && t->id.proto == m->proto &&
  2424. (m->reqid == 0 || t->reqid == m->reqid)) {
  2425. switch (t->mode) {
  2426. case XFRM_MODE_TUNNEL:
  2427. case XFRM_MODE_BEET:
  2428. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2429. m->old_family) == 0 &&
  2430. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2431. m->old_family) == 0) {
  2432. match = 1;
  2433. }
  2434. break;
  2435. case XFRM_MODE_TRANSPORT:
  2436. /* in case of transport mode, template does not store
  2437. any IP addresses, hence we just compare mode and
  2438. protocol */
  2439. match = 1;
  2440. break;
  2441. default:
  2442. break;
  2443. }
  2444. }
  2445. return match;
  2446. }
  2447. /* update endpoint address(es) of template(s) */
  2448. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2449. struct xfrm_migrate *m, int num_migrate)
  2450. {
  2451. struct xfrm_migrate *mp;
  2452. int i, j, n = 0;
  2453. write_lock_bh(&pol->lock);
  2454. if (unlikely(pol->walk.dead)) {
  2455. /* target policy has been deleted */
  2456. write_unlock_bh(&pol->lock);
  2457. return -ENOENT;
  2458. }
  2459. for (i = 0; i < pol->xfrm_nr; i++) {
  2460. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2461. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2462. continue;
  2463. n++;
  2464. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2465. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2466. continue;
  2467. /* update endpoints */
  2468. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2469. sizeof(pol->xfrm_vec[i].id.daddr));
  2470. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2471. sizeof(pol->xfrm_vec[i].saddr));
  2472. pol->xfrm_vec[i].encap_family = mp->new_family;
  2473. /* flush bundles */
  2474. atomic_inc(&pol->genid);
  2475. }
  2476. }
  2477. write_unlock_bh(&pol->lock);
  2478. if (!n)
  2479. return -ENODATA;
  2480. return 0;
  2481. }
  2482. static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
  2483. {
  2484. int i, j;
  2485. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2486. return -EINVAL;
  2487. for (i = 0; i < num_migrate; i++) {
  2488. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2489. m[i].old_family) == 0) &&
  2490. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2491. m[i].old_family) == 0))
  2492. return -EINVAL;
  2493. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2494. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2495. return -EINVAL;
  2496. /* check if there is any duplicated entry */
  2497. for (j = i + 1; j < num_migrate; j++) {
  2498. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2499. sizeof(m[i].old_daddr)) &&
  2500. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2501. sizeof(m[i].old_saddr)) &&
  2502. m[i].proto == m[j].proto &&
  2503. m[i].mode == m[j].mode &&
  2504. m[i].reqid == m[j].reqid &&
  2505. m[i].old_family == m[j].old_family)
  2506. return -EINVAL;
  2507. }
  2508. }
  2509. return 0;
  2510. }
  2511. int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
  2512. struct xfrm_migrate *m, int num_migrate,
  2513. struct xfrm_kmaddress *k)
  2514. {
  2515. int i, err, nx_cur = 0, nx_new = 0;
  2516. struct xfrm_policy *pol = NULL;
  2517. struct xfrm_state *x, *xc;
  2518. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2519. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2520. struct xfrm_migrate *mp;
  2521. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2522. goto out;
  2523. /* Stage 1 - find policy */
  2524. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2525. err = -ENOENT;
  2526. goto out;
  2527. }
  2528. /* Stage 2 - find and update state(s) */
  2529. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2530. if ((x = xfrm_migrate_state_find(mp))) {
  2531. x_cur[nx_cur] = x;
  2532. nx_cur++;
  2533. if ((xc = xfrm_state_migrate(x, mp))) {
  2534. x_new[nx_new] = xc;
  2535. nx_new++;
  2536. } else {
  2537. err = -ENODATA;
  2538. goto restore_state;
  2539. }
  2540. }
  2541. }
  2542. /* Stage 3 - update policy */
  2543. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2544. goto restore_state;
  2545. /* Stage 4 - delete old state(s) */
  2546. if (nx_cur) {
  2547. xfrm_states_put(x_cur, nx_cur);
  2548. xfrm_states_delete(x_cur, nx_cur);
  2549. }
  2550. /* Stage 5 - announce */
  2551. km_migrate(sel, dir, type, m, num_migrate, k);
  2552. xfrm_pol_put(pol);
  2553. return 0;
  2554. out:
  2555. return err;
  2556. restore_state:
  2557. if (pol)
  2558. xfrm_pol_put(pol);
  2559. if (nx_cur)
  2560. xfrm_states_put(x_cur, nx_cur);
  2561. if (nx_new)
  2562. xfrm_states_delete(x_new, nx_new);
  2563. return err;
  2564. }
  2565. EXPORT_SYMBOL(xfrm_migrate);
  2566. #endif