xfrm_policy.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/xfrm.h>
  29. #include <net/ip.h>
  30. #ifdef CONFIG_XFRM_STATISTICS
  31. #include <net/snmp.h>
  32. #endif
  33. #include "xfrm_hash.h"
  34. int sysctl_xfrm_larval_drop __read_mostly;
  35. #ifdef CONFIG_XFRM_STATISTICS
  36. DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
  37. EXPORT_SYMBOL(xfrm_statistics);
  38. #endif
  39. DEFINE_MUTEX(xfrm_cfg_mutex);
  40. EXPORT_SYMBOL(xfrm_cfg_mutex);
  41. static DEFINE_RWLOCK(xfrm_policy_lock);
  42. unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
  43. EXPORT_SYMBOL(xfrm_policy_count);
  44. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  45. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  46. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  47. static struct work_struct xfrm_policy_gc_work;
  48. static HLIST_HEAD(xfrm_policy_gc_list);
  49. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  50. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  51. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  52. static void xfrm_init_pmtu(struct dst_entry *dst);
  53. static inline int
  54. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  55. {
  56. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  57. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  58. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  59. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  60. (fl->proto == sel->proto || !sel->proto) &&
  61. (fl->oif == sel->ifindex || !sel->ifindex);
  62. }
  63. static inline int
  64. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  65. {
  66. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  67. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  68. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  69. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  70. (fl->proto == sel->proto || !sel->proto) &&
  71. (fl->oif == sel->ifindex || !sel->ifindex);
  72. }
  73. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  74. unsigned short family)
  75. {
  76. switch (family) {
  77. case AF_INET:
  78. return __xfrm4_selector_match(sel, fl);
  79. case AF_INET6:
  80. return __xfrm6_selector_match(sel, fl);
  81. }
  82. return 0;
  83. }
  84. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  85. int family)
  86. {
  87. xfrm_address_t *saddr = &x->props.saddr;
  88. xfrm_address_t *daddr = &x->id.daddr;
  89. struct xfrm_policy_afinfo *afinfo;
  90. struct dst_entry *dst;
  91. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR)
  92. saddr = x->coaddr;
  93. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR)
  94. daddr = x->coaddr;
  95. afinfo = xfrm_policy_get_afinfo(family);
  96. if (unlikely(afinfo == NULL))
  97. return ERR_PTR(-EAFNOSUPPORT);
  98. dst = afinfo->dst_lookup(tos, saddr, daddr);
  99. xfrm_policy_put_afinfo(afinfo);
  100. return dst;
  101. }
  102. static inline unsigned long make_jiffies(long secs)
  103. {
  104. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  105. return MAX_SCHEDULE_TIMEOUT-1;
  106. else
  107. return secs*HZ;
  108. }
  109. static void xfrm_policy_timer(unsigned long data)
  110. {
  111. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  112. unsigned long now = get_seconds();
  113. long next = LONG_MAX;
  114. int warn = 0;
  115. int dir;
  116. read_lock(&xp->lock);
  117. if (xp->dead)
  118. goto out;
  119. dir = xfrm_policy_id2dir(xp->index);
  120. if (xp->lft.hard_add_expires_seconds) {
  121. long tmo = xp->lft.hard_add_expires_seconds +
  122. xp->curlft.add_time - now;
  123. if (tmo <= 0)
  124. goto expired;
  125. if (tmo < next)
  126. next = tmo;
  127. }
  128. if (xp->lft.hard_use_expires_seconds) {
  129. long tmo = xp->lft.hard_use_expires_seconds +
  130. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  131. if (tmo <= 0)
  132. goto expired;
  133. if (tmo < next)
  134. next = tmo;
  135. }
  136. if (xp->lft.soft_add_expires_seconds) {
  137. long tmo = xp->lft.soft_add_expires_seconds +
  138. xp->curlft.add_time - now;
  139. if (tmo <= 0) {
  140. warn = 1;
  141. tmo = XFRM_KM_TIMEOUT;
  142. }
  143. if (tmo < next)
  144. next = tmo;
  145. }
  146. if (xp->lft.soft_use_expires_seconds) {
  147. long tmo = xp->lft.soft_use_expires_seconds +
  148. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  149. if (tmo <= 0) {
  150. warn = 1;
  151. tmo = XFRM_KM_TIMEOUT;
  152. }
  153. if (tmo < next)
  154. next = tmo;
  155. }
  156. if (warn)
  157. km_policy_expired(xp, dir, 0, 0);
  158. if (next != LONG_MAX &&
  159. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  160. xfrm_pol_hold(xp);
  161. out:
  162. read_unlock(&xp->lock);
  163. xfrm_pol_put(xp);
  164. return;
  165. expired:
  166. read_unlock(&xp->lock);
  167. if (!xfrm_policy_delete(xp, dir))
  168. km_policy_expired(xp, dir, 1, 0);
  169. xfrm_pol_put(xp);
  170. }
  171. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  172. * SPD calls.
  173. */
  174. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  175. {
  176. struct xfrm_policy *policy;
  177. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  178. if (policy) {
  179. INIT_HLIST_NODE(&policy->bydst);
  180. INIT_HLIST_NODE(&policy->byidx);
  181. rwlock_init(&policy->lock);
  182. atomic_set(&policy->refcnt, 1);
  183. setup_timer(&policy->timer, xfrm_policy_timer,
  184. (unsigned long)policy);
  185. }
  186. return policy;
  187. }
  188. EXPORT_SYMBOL(xfrm_policy_alloc);
  189. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  190. void xfrm_policy_destroy(struct xfrm_policy *policy)
  191. {
  192. BUG_ON(!policy->dead);
  193. BUG_ON(policy->bundles);
  194. if (del_timer(&policy->timer))
  195. BUG();
  196. security_xfrm_policy_free(policy);
  197. kfree(policy);
  198. }
  199. EXPORT_SYMBOL(xfrm_policy_destroy);
  200. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  201. {
  202. struct dst_entry *dst;
  203. while ((dst = policy->bundles) != NULL) {
  204. policy->bundles = dst->next;
  205. dst_free(dst);
  206. }
  207. if (del_timer(&policy->timer))
  208. atomic_dec(&policy->refcnt);
  209. if (atomic_read(&policy->refcnt) > 1)
  210. flow_cache_flush();
  211. xfrm_pol_put(policy);
  212. }
  213. static void xfrm_policy_gc_task(struct work_struct *work)
  214. {
  215. struct xfrm_policy *policy;
  216. struct hlist_node *entry, *tmp;
  217. struct hlist_head gc_list;
  218. spin_lock_bh(&xfrm_policy_gc_lock);
  219. gc_list.first = xfrm_policy_gc_list.first;
  220. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  221. spin_unlock_bh(&xfrm_policy_gc_lock);
  222. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  223. xfrm_policy_gc_kill(policy);
  224. }
  225. /* Rule must be locked. Release descentant resources, announce
  226. * entry dead. The rule must be unlinked from lists to the moment.
  227. */
  228. static void xfrm_policy_kill(struct xfrm_policy *policy)
  229. {
  230. int dead;
  231. write_lock_bh(&policy->lock);
  232. dead = policy->dead;
  233. policy->dead = 1;
  234. write_unlock_bh(&policy->lock);
  235. if (unlikely(dead)) {
  236. WARN_ON(1);
  237. return;
  238. }
  239. spin_lock(&xfrm_policy_gc_lock);
  240. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  241. spin_unlock(&xfrm_policy_gc_lock);
  242. schedule_work(&xfrm_policy_gc_work);
  243. }
  244. struct xfrm_policy_hash {
  245. struct hlist_head *table;
  246. unsigned int hmask;
  247. };
  248. static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
  249. static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
  250. static struct hlist_head *xfrm_policy_byidx __read_mostly;
  251. static unsigned int xfrm_idx_hmask __read_mostly;
  252. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  253. static inline unsigned int idx_hash(u32 index)
  254. {
  255. return __idx_hash(index, xfrm_idx_hmask);
  256. }
  257. static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
  258. {
  259. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  260. unsigned int hash = __sel_hash(sel, family, hmask);
  261. return (hash == hmask + 1 ?
  262. &xfrm_policy_inexact[dir] :
  263. xfrm_policy_bydst[dir].table + hash);
  264. }
  265. static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  266. {
  267. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  268. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  269. return xfrm_policy_bydst[dir].table + hash;
  270. }
  271. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  272. struct hlist_head *ndsttable,
  273. unsigned int nhashmask)
  274. {
  275. struct hlist_node *entry, *tmp, *entry0 = NULL;
  276. struct xfrm_policy *pol;
  277. unsigned int h0 = 0;
  278. redo:
  279. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  280. unsigned int h;
  281. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  282. pol->family, nhashmask);
  283. if (!entry0) {
  284. hlist_del(entry);
  285. hlist_add_head(&pol->bydst, ndsttable+h);
  286. h0 = h;
  287. } else {
  288. if (h != h0)
  289. continue;
  290. hlist_del(entry);
  291. hlist_add_after(entry0, &pol->bydst);
  292. }
  293. entry0 = entry;
  294. }
  295. if (!hlist_empty(list)) {
  296. entry0 = NULL;
  297. goto redo;
  298. }
  299. }
  300. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  301. struct hlist_head *nidxtable,
  302. unsigned int nhashmask)
  303. {
  304. struct hlist_node *entry, *tmp;
  305. struct xfrm_policy *pol;
  306. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  307. unsigned int h;
  308. h = __idx_hash(pol->index, nhashmask);
  309. hlist_add_head(&pol->byidx, nidxtable+h);
  310. }
  311. }
  312. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  313. {
  314. return ((old_hmask + 1) << 1) - 1;
  315. }
  316. static void xfrm_bydst_resize(int dir)
  317. {
  318. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  319. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  320. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  321. struct hlist_head *odst = xfrm_policy_bydst[dir].table;
  322. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  323. int i;
  324. if (!ndst)
  325. return;
  326. write_lock_bh(&xfrm_policy_lock);
  327. for (i = hmask; i >= 0; i--)
  328. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  329. xfrm_policy_bydst[dir].table = ndst;
  330. xfrm_policy_bydst[dir].hmask = nhashmask;
  331. write_unlock_bh(&xfrm_policy_lock);
  332. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  333. }
  334. static void xfrm_byidx_resize(int total)
  335. {
  336. unsigned int hmask = xfrm_idx_hmask;
  337. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  338. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  339. struct hlist_head *oidx = xfrm_policy_byidx;
  340. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  341. int i;
  342. if (!nidx)
  343. return;
  344. write_lock_bh(&xfrm_policy_lock);
  345. for (i = hmask; i >= 0; i--)
  346. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  347. xfrm_policy_byidx = nidx;
  348. xfrm_idx_hmask = nhashmask;
  349. write_unlock_bh(&xfrm_policy_lock);
  350. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  351. }
  352. static inline int xfrm_bydst_should_resize(int dir, int *total)
  353. {
  354. unsigned int cnt = xfrm_policy_count[dir];
  355. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  356. if (total)
  357. *total += cnt;
  358. if ((hmask + 1) < xfrm_policy_hashmax &&
  359. cnt > hmask)
  360. return 1;
  361. return 0;
  362. }
  363. static inline int xfrm_byidx_should_resize(int total)
  364. {
  365. unsigned int hmask = xfrm_idx_hmask;
  366. if ((hmask + 1) < xfrm_policy_hashmax &&
  367. total > hmask)
  368. return 1;
  369. return 0;
  370. }
  371. void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
  372. {
  373. read_lock_bh(&xfrm_policy_lock);
  374. si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
  375. si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
  376. si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
  377. si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  378. si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  379. si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  380. si->spdhcnt = xfrm_idx_hmask;
  381. si->spdhmcnt = xfrm_policy_hashmax;
  382. read_unlock_bh(&xfrm_policy_lock);
  383. }
  384. EXPORT_SYMBOL(xfrm_spd_getinfo);
  385. static DEFINE_MUTEX(hash_resize_mutex);
  386. static void xfrm_hash_resize(struct work_struct *__unused)
  387. {
  388. int dir, total;
  389. mutex_lock(&hash_resize_mutex);
  390. total = 0;
  391. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  392. if (xfrm_bydst_should_resize(dir, &total))
  393. xfrm_bydst_resize(dir);
  394. }
  395. if (xfrm_byidx_should_resize(total))
  396. xfrm_byidx_resize(total);
  397. mutex_unlock(&hash_resize_mutex);
  398. }
  399. static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
  400. /* Generate new index... KAME seems to generate them ordered by cost
  401. * of an absolute inpredictability of ordering of rules. This will not pass. */
  402. static u32 xfrm_gen_index(u8 type, int dir)
  403. {
  404. static u32 idx_generator;
  405. for (;;) {
  406. struct hlist_node *entry;
  407. struct hlist_head *list;
  408. struct xfrm_policy *p;
  409. u32 idx;
  410. int found;
  411. idx = (idx_generator | dir);
  412. idx_generator += 8;
  413. if (idx == 0)
  414. idx = 8;
  415. list = xfrm_policy_byidx + idx_hash(idx);
  416. found = 0;
  417. hlist_for_each_entry(p, entry, list, byidx) {
  418. if (p->index == idx) {
  419. found = 1;
  420. break;
  421. }
  422. }
  423. if (!found)
  424. return idx;
  425. }
  426. }
  427. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  428. {
  429. u32 *p1 = (u32 *) s1;
  430. u32 *p2 = (u32 *) s2;
  431. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  432. int i;
  433. for (i = 0; i < len; i++) {
  434. if (p1[i] != p2[i])
  435. return 1;
  436. }
  437. return 0;
  438. }
  439. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  440. {
  441. struct xfrm_policy *pol;
  442. struct xfrm_policy *delpol;
  443. struct hlist_head *chain;
  444. struct hlist_node *entry, *newpos;
  445. struct dst_entry *gc_list;
  446. write_lock_bh(&xfrm_policy_lock);
  447. chain = policy_hash_bysel(&policy->selector, policy->family, dir);
  448. delpol = NULL;
  449. newpos = NULL;
  450. hlist_for_each_entry(pol, entry, chain, bydst) {
  451. if (pol->type == policy->type &&
  452. !selector_cmp(&pol->selector, &policy->selector) &&
  453. xfrm_sec_ctx_match(pol->security, policy->security) &&
  454. !WARN_ON(delpol)) {
  455. if (excl) {
  456. write_unlock_bh(&xfrm_policy_lock);
  457. return -EEXIST;
  458. }
  459. delpol = pol;
  460. if (policy->priority > pol->priority)
  461. continue;
  462. } else if (policy->priority >= pol->priority) {
  463. newpos = &pol->bydst;
  464. continue;
  465. }
  466. if (delpol)
  467. break;
  468. }
  469. if (newpos)
  470. hlist_add_after(newpos, &policy->bydst);
  471. else
  472. hlist_add_head(&policy->bydst, chain);
  473. xfrm_pol_hold(policy);
  474. xfrm_policy_count[dir]++;
  475. atomic_inc(&flow_cache_genid);
  476. if (delpol) {
  477. hlist_del(&delpol->bydst);
  478. hlist_del(&delpol->byidx);
  479. xfrm_policy_count[dir]--;
  480. }
  481. policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
  482. hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
  483. policy->curlft.add_time = get_seconds();
  484. policy->curlft.use_time = 0;
  485. if (!mod_timer(&policy->timer, jiffies + HZ))
  486. xfrm_pol_hold(policy);
  487. write_unlock_bh(&xfrm_policy_lock);
  488. if (delpol)
  489. xfrm_policy_kill(delpol);
  490. else if (xfrm_bydst_should_resize(dir, NULL))
  491. schedule_work(&xfrm_hash_work);
  492. read_lock_bh(&xfrm_policy_lock);
  493. gc_list = NULL;
  494. entry = &policy->bydst;
  495. hlist_for_each_entry_continue(policy, entry, bydst) {
  496. struct dst_entry *dst;
  497. write_lock(&policy->lock);
  498. dst = policy->bundles;
  499. if (dst) {
  500. struct dst_entry *tail = dst;
  501. while (tail->next)
  502. tail = tail->next;
  503. tail->next = gc_list;
  504. gc_list = dst;
  505. policy->bundles = NULL;
  506. }
  507. write_unlock(&policy->lock);
  508. }
  509. read_unlock_bh(&xfrm_policy_lock);
  510. while (gc_list) {
  511. struct dst_entry *dst = gc_list;
  512. gc_list = dst->next;
  513. dst_free(dst);
  514. }
  515. return 0;
  516. }
  517. EXPORT_SYMBOL(xfrm_policy_insert);
  518. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  519. struct xfrm_selector *sel,
  520. struct xfrm_sec_ctx *ctx, int delete,
  521. int *err)
  522. {
  523. struct xfrm_policy *pol, *ret;
  524. struct hlist_head *chain;
  525. struct hlist_node *entry;
  526. *err = 0;
  527. write_lock_bh(&xfrm_policy_lock);
  528. chain = policy_hash_bysel(sel, sel->family, dir);
  529. ret = NULL;
  530. hlist_for_each_entry(pol, entry, chain, bydst) {
  531. if (pol->type == type &&
  532. !selector_cmp(sel, &pol->selector) &&
  533. xfrm_sec_ctx_match(ctx, pol->security)) {
  534. xfrm_pol_hold(pol);
  535. if (delete) {
  536. *err = security_xfrm_policy_delete(pol);
  537. if (*err) {
  538. write_unlock_bh(&xfrm_policy_lock);
  539. return pol;
  540. }
  541. hlist_del(&pol->bydst);
  542. hlist_del(&pol->byidx);
  543. xfrm_policy_count[dir]--;
  544. }
  545. ret = pol;
  546. break;
  547. }
  548. }
  549. write_unlock_bh(&xfrm_policy_lock);
  550. if (ret && delete) {
  551. atomic_inc(&flow_cache_genid);
  552. xfrm_policy_kill(ret);
  553. }
  554. return ret;
  555. }
  556. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  557. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
  558. int *err)
  559. {
  560. struct xfrm_policy *pol, *ret;
  561. struct hlist_head *chain;
  562. struct hlist_node *entry;
  563. *err = -ENOENT;
  564. if (xfrm_policy_id2dir(id) != dir)
  565. return NULL;
  566. *err = 0;
  567. write_lock_bh(&xfrm_policy_lock);
  568. chain = xfrm_policy_byidx + idx_hash(id);
  569. ret = NULL;
  570. hlist_for_each_entry(pol, entry, chain, byidx) {
  571. if (pol->type == type && pol->index == id) {
  572. xfrm_pol_hold(pol);
  573. if (delete) {
  574. *err = security_xfrm_policy_delete(pol);
  575. if (*err) {
  576. write_unlock_bh(&xfrm_policy_lock);
  577. return pol;
  578. }
  579. hlist_del(&pol->bydst);
  580. hlist_del(&pol->byidx);
  581. xfrm_policy_count[dir]--;
  582. }
  583. ret = pol;
  584. break;
  585. }
  586. }
  587. write_unlock_bh(&xfrm_policy_lock);
  588. if (ret && delete) {
  589. atomic_inc(&flow_cache_genid);
  590. xfrm_policy_kill(ret);
  591. }
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(xfrm_policy_byid);
  595. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  596. static inline int
  597. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  598. {
  599. int dir, err = 0;
  600. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  601. struct xfrm_policy *pol;
  602. struct hlist_node *entry;
  603. int i;
  604. hlist_for_each_entry(pol, entry,
  605. &xfrm_policy_inexact[dir], bydst) {
  606. if (pol->type != type)
  607. continue;
  608. err = security_xfrm_policy_delete(pol);
  609. if (err) {
  610. xfrm_audit_policy_delete(pol, 0,
  611. audit_info->loginuid,
  612. audit_info->secid);
  613. return err;
  614. }
  615. }
  616. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  617. hlist_for_each_entry(pol, entry,
  618. xfrm_policy_bydst[dir].table + i,
  619. bydst) {
  620. if (pol->type != type)
  621. continue;
  622. err = security_xfrm_policy_delete(pol);
  623. if (err) {
  624. xfrm_audit_policy_delete(pol, 0,
  625. audit_info->loginuid,
  626. audit_info->secid);
  627. return err;
  628. }
  629. }
  630. }
  631. }
  632. return err;
  633. }
  634. #else
  635. static inline int
  636. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  637. {
  638. return 0;
  639. }
  640. #endif
  641. int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
  642. {
  643. int dir, err = 0;
  644. write_lock_bh(&xfrm_policy_lock);
  645. err = xfrm_policy_flush_secctx_check(type, audit_info);
  646. if (err)
  647. goto out;
  648. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  649. struct xfrm_policy *pol;
  650. struct hlist_node *entry;
  651. int i, killed;
  652. killed = 0;
  653. again1:
  654. hlist_for_each_entry(pol, entry,
  655. &xfrm_policy_inexact[dir], bydst) {
  656. if (pol->type != type)
  657. continue;
  658. hlist_del(&pol->bydst);
  659. hlist_del(&pol->byidx);
  660. write_unlock_bh(&xfrm_policy_lock);
  661. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  662. audit_info->secid);
  663. xfrm_policy_kill(pol);
  664. killed++;
  665. write_lock_bh(&xfrm_policy_lock);
  666. goto again1;
  667. }
  668. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  669. again2:
  670. hlist_for_each_entry(pol, entry,
  671. xfrm_policy_bydst[dir].table + i,
  672. bydst) {
  673. if (pol->type != type)
  674. continue;
  675. hlist_del(&pol->bydst);
  676. hlist_del(&pol->byidx);
  677. write_unlock_bh(&xfrm_policy_lock);
  678. xfrm_audit_policy_delete(pol, 1,
  679. audit_info->loginuid,
  680. audit_info->secid);
  681. xfrm_policy_kill(pol);
  682. killed++;
  683. write_lock_bh(&xfrm_policy_lock);
  684. goto again2;
  685. }
  686. }
  687. xfrm_policy_count[dir] -= killed;
  688. }
  689. atomic_inc(&flow_cache_genid);
  690. out:
  691. write_unlock_bh(&xfrm_policy_lock);
  692. return err;
  693. }
  694. EXPORT_SYMBOL(xfrm_policy_flush);
  695. int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
  696. void *data)
  697. {
  698. struct xfrm_policy *pol, *last = NULL;
  699. struct hlist_node *entry;
  700. int dir, last_dir = 0, count, error;
  701. read_lock_bh(&xfrm_policy_lock);
  702. count = 0;
  703. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  704. struct hlist_head *table = xfrm_policy_bydst[dir].table;
  705. int i;
  706. hlist_for_each_entry(pol, entry,
  707. &xfrm_policy_inexact[dir], bydst) {
  708. if (pol->type != type)
  709. continue;
  710. if (last) {
  711. error = func(last, last_dir % XFRM_POLICY_MAX,
  712. count, data);
  713. if (error)
  714. goto out;
  715. }
  716. last = pol;
  717. last_dir = dir;
  718. count++;
  719. }
  720. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  721. hlist_for_each_entry(pol, entry, table + i, bydst) {
  722. if (pol->type != type)
  723. continue;
  724. if (last) {
  725. error = func(last, last_dir % XFRM_POLICY_MAX,
  726. count, data);
  727. if (error)
  728. goto out;
  729. }
  730. last = pol;
  731. last_dir = dir;
  732. count++;
  733. }
  734. }
  735. }
  736. if (count == 0) {
  737. error = -ENOENT;
  738. goto out;
  739. }
  740. error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
  741. out:
  742. read_unlock_bh(&xfrm_policy_lock);
  743. return error;
  744. }
  745. EXPORT_SYMBOL(xfrm_policy_walk);
  746. /*
  747. * Find policy to apply to this flow.
  748. *
  749. * Returns 0 if policy found, else an -errno.
  750. */
  751. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  752. u8 type, u16 family, int dir)
  753. {
  754. struct xfrm_selector *sel = &pol->selector;
  755. int match, ret = -ESRCH;
  756. if (pol->family != family ||
  757. pol->type != type)
  758. return ret;
  759. match = xfrm_selector_match(sel, fl, family);
  760. if (match)
  761. ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
  762. return ret;
  763. }
  764. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  765. u16 family, u8 dir)
  766. {
  767. int err;
  768. struct xfrm_policy *pol, *ret;
  769. xfrm_address_t *daddr, *saddr;
  770. struct hlist_node *entry;
  771. struct hlist_head *chain;
  772. u32 priority = ~0U;
  773. daddr = xfrm_flowi_daddr(fl, family);
  774. saddr = xfrm_flowi_saddr(fl, family);
  775. if (unlikely(!daddr || !saddr))
  776. return NULL;
  777. read_lock_bh(&xfrm_policy_lock);
  778. chain = policy_hash_direct(daddr, saddr, family, dir);
  779. ret = NULL;
  780. hlist_for_each_entry(pol, entry, chain, bydst) {
  781. err = xfrm_policy_match(pol, fl, type, family, dir);
  782. if (err) {
  783. if (err == -ESRCH)
  784. continue;
  785. else {
  786. ret = ERR_PTR(err);
  787. goto fail;
  788. }
  789. } else {
  790. ret = pol;
  791. priority = ret->priority;
  792. break;
  793. }
  794. }
  795. chain = &xfrm_policy_inexact[dir];
  796. hlist_for_each_entry(pol, entry, chain, bydst) {
  797. err = xfrm_policy_match(pol, fl, type, family, dir);
  798. if (err) {
  799. if (err == -ESRCH)
  800. continue;
  801. else {
  802. ret = ERR_PTR(err);
  803. goto fail;
  804. }
  805. } else if (pol->priority < priority) {
  806. ret = pol;
  807. break;
  808. }
  809. }
  810. if (ret)
  811. xfrm_pol_hold(ret);
  812. fail:
  813. read_unlock_bh(&xfrm_policy_lock);
  814. return ret;
  815. }
  816. static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  817. void **objp, atomic_t **obj_refp)
  818. {
  819. struct xfrm_policy *pol;
  820. int err = 0;
  821. #ifdef CONFIG_XFRM_SUB_POLICY
  822. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  823. if (IS_ERR(pol)) {
  824. err = PTR_ERR(pol);
  825. pol = NULL;
  826. }
  827. if (pol || err)
  828. goto end;
  829. #endif
  830. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  831. if (IS_ERR(pol)) {
  832. err = PTR_ERR(pol);
  833. pol = NULL;
  834. }
  835. #ifdef CONFIG_XFRM_SUB_POLICY
  836. end:
  837. #endif
  838. if ((*objp = (void *) pol) != NULL)
  839. *obj_refp = &pol->refcnt;
  840. return err;
  841. }
  842. static inline int policy_to_flow_dir(int dir)
  843. {
  844. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  845. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  846. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  847. return dir;
  848. switch (dir) {
  849. default:
  850. case XFRM_POLICY_IN:
  851. return FLOW_DIR_IN;
  852. case XFRM_POLICY_OUT:
  853. return FLOW_DIR_OUT;
  854. case XFRM_POLICY_FWD:
  855. return FLOW_DIR_FWD;
  856. }
  857. }
  858. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  859. {
  860. struct xfrm_policy *pol;
  861. read_lock_bh(&xfrm_policy_lock);
  862. if ((pol = sk->sk_policy[dir]) != NULL) {
  863. int match = xfrm_selector_match(&pol->selector, fl,
  864. sk->sk_family);
  865. int err = 0;
  866. if (match) {
  867. err = security_xfrm_policy_lookup(pol, fl->secid,
  868. policy_to_flow_dir(dir));
  869. if (!err)
  870. xfrm_pol_hold(pol);
  871. else if (err == -ESRCH)
  872. pol = NULL;
  873. else
  874. pol = ERR_PTR(err);
  875. } else
  876. pol = NULL;
  877. }
  878. read_unlock_bh(&xfrm_policy_lock);
  879. return pol;
  880. }
  881. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  882. {
  883. struct hlist_head *chain = policy_hash_bysel(&pol->selector,
  884. pol->family, dir);
  885. hlist_add_head(&pol->bydst, chain);
  886. hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
  887. xfrm_policy_count[dir]++;
  888. xfrm_pol_hold(pol);
  889. if (xfrm_bydst_should_resize(dir, NULL))
  890. schedule_work(&xfrm_hash_work);
  891. }
  892. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  893. int dir)
  894. {
  895. if (hlist_unhashed(&pol->bydst))
  896. return NULL;
  897. hlist_del(&pol->bydst);
  898. hlist_del(&pol->byidx);
  899. xfrm_policy_count[dir]--;
  900. return pol;
  901. }
  902. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  903. {
  904. write_lock_bh(&xfrm_policy_lock);
  905. pol = __xfrm_policy_unlink(pol, dir);
  906. write_unlock_bh(&xfrm_policy_lock);
  907. if (pol) {
  908. if (dir < XFRM_POLICY_MAX)
  909. atomic_inc(&flow_cache_genid);
  910. xfrm_policy_kill(pol);
  911. return 0;
  912. }
  913. return -ENOENT;
  914. }
  915. EXPORT_SYMBOL(xfrm_policy_delete);
  916. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  917. {
  918. struct xfrm_policy *old_pol;
  919. #ifdef CONFIG_XFRM_SUB_POLICY
  920. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  921. return -EINVAL;
  922. #endif
  923. write_lock_bh(&xfrm_policy_lock);
  924. old_pol = sk->sk_policy[dir];
  925. sk->sk_policy[dir] = pol;
  926. if (pol) {
  927. pol->curlft.add_time = get_seconds();
  928. pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
  929. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  930. }
  931. if (old_pol)
  932. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  933. write_unlock_bh(&xfrm_policy_lock);
  934. if (old_pol) {
  935. xfrm_policy_kill(old_pol);
  936. }
  937. return 0;
  938. }
  939. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  940. {
  941. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  942. if (newp) {
  943. newp->selector = old->selector;
  944. if (security_xfrm_policy_clone(old, newp)) {
  945. kfree(newp);
  946. return NULL; /* ENOMEM */
  947. }
  948. newp->lft = old->lft;
  949. newp->curlft = old->curlft;
  950. newp->action = old->action;
  951. newp->flags = old->flags;
  952. newp->xfrm_nr = old->xfrm_nr;
  953. newp->index = old->index;
  954. newp->type = old->type;
  955. memcpy(newp->xfrm_vec, old->xfrm_vec,
  956. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  957. write_lock_bh(&xfrm_policy_lock);
  958. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  959. write_unlock_bh(&xfrm_policy_lock);
  960. xfrm_pol_put(newp);
  961. }
  962. return newp;
  963. }
  964. int __xfrm_sk_clone_policy(struct sock *sk)
  965. {
  966. struct xfrm_policy *p0 = sk->sk_policy[0],
  967. *p1 = sk->sk_policy[1];
  968. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  969. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  970. return -ENOMEM;
  971. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  972. return -ENOMEM;
  973. return 0;
  974. }
  975. static int
  976. xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
  977. unsigned short family)
  978. {
  979. int err;
  980. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  981. if (unlikely(afinfo == NULL))
  982. return -EINVAL;
  983. err = afinfo->get_saddr(local, remote);
  984. xfrm_policy_put_afinfo(afinfo);
  985. return err;
  986. }
  987. /* Resolve list of templates for the flow, given policy. */
  988. static int
  989. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  990. struct xfrm_state **xfrm,
  991. unsigned short family)
  992. {
  993. int nx;
  994. int i, error;
  995. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  996. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  997. xfrm_address_t tmp;
  998. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  999. struct xfrm_state *x;
  1000. xfrm_address_t *remote = daddr;
  1001. xfrm_address_t *local = saddr;
  1002. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1003. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1004. tmpl->mode == XFRM_MODE_BEET) {
  1005. remote = &tmpl->id.daddr;
  1006. local = &tmpl->saddr;
  1007. family = tmpl->encap_family;
  1008. if (xfrm_addr_any(local, family)) {
  1009. error = xfrm_get_saddr(&tmp, remote, family);
  1010. if (error)
  1011. goto fail;
  1012. local = &tmp;
  1013. }
  1014. }
  1015. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1016. if (x && x->km.state == XFRM_STATE_VALID) {
  1017. xfrm[nx++] = x;
  1018. daddr = remote;
  1019. saddr = local;
  1020. continue;
  1021. }
  1022. if (x) {
  1023. error = (x->km.state == XFRM_STATE_ERROR ?
  1024. -EINVAL : -EAGAIN);
  1025. xfrm_state_put(x);
  1026. }
  1027. if (!tmpl->optional)
  1028. goto fail;
  1029. }
  1030. return nx;
  1031. fail:
  1032. for (nx--; nx>=0; nx--)
  1033. xfrm_state_put(xfrm[nx]);
  1034. return error;
  1035. }
  1036. static int
  1037. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1038. struct xfrm_state **xfrm,
  1039. unsigned short family)
  1040. {
  1041. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1042. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1043. int cnx = 0;
  1044. int error;
  1045. int ret;
  1046. int i;
  1047. for (i = 0; i < npols; i++) {
  1048. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1049. error = -ENOBUFS;
  1050. goto fail;
  1051. }
  1052. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1053. if (ret < 0) {
  1054. error = ret;
  1055. goto fail;
  1056. } else
  1057. cnx += ret;
  1058. }
  1059. /* found states are sorted for outbound processing */
  1060. if (npols > 1)
  1061. xfrm_state_sort(xfrm, tpp, cnx, family);
  1062. return cnx;
  1063. fail:
  1064. for (cnx--; cnx>=0; cnx--)
  1065. xfrm_state_put(tpp[cnx]);
  1066. return error;
  1067. }
  1068. /* Check that the bundle accepts the flow and its components are
  1069. * still valid.
  1070. */
  1071. static struct dst_entry *
  1072. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1073. {
  1074. struct dst_entry *x;
  1075. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1076. if (unlikely(afinfo == NULL))
  1077. return ERR_PTR(-EINVAL);
  1078. x = afinfo->find_bundle(fl, policy);
  1079. xfrm_policy_put_afinfo(afinfo);
  1080. return x;
  1081. }
  1082. static inline int xfrm_get_tos(struct flowi *fl, int family)
  1083. {
  1084. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1085. int tos;
  1086. if (!afinfo)
  1087. return -EINVAL;
  1088. tos = afinfo->get_tos(fl);
  1089. xfrm_policy_put_afinfo(afinfo);
  1090. return tos;
  1091. }
  1092. static inline struct xfrm_dst *xfrm_alloc_dst(int family)
  1093. {
  1094. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1095. struct xfrm_dst *xdst;
  1096. if (!afinfo)
  1097. return ERR_PTR(-EINVAL);
  1098. xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
  1099. xfrm_policy_put_afinfo(afinfo);
  1100. return xdst;
  1101. }
  1102. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1103. int nfheader_len)
  1104. {
  1105. struct xfrm_policy_afinfo *afinfo =
  1106. xfrm_policy_get_afinfo(dst->ops->family);
  1107. int err;
  1108. if (!afinfo)
  1109. return -EINVAL;
  1110. err = afinfo->init_path(path, dst, nfheader_len);
  1111. xfrm_policy_put_afinfo(afinfo);
  1112. return err;
  1113. }
  1114. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
  1115. {
  1116. struct xfrm_policy_afinfo *afinfo =
  1117. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1118. int err;
  1119. if (!afinfo)
  1120. return -EINVAL;
  1121. err = afinfo->fill_dst(xdst, dev);
  1122. xfrm_policy_put_afinfo(afinfo);
  1123. return err;
  1124. }
  1125. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1126. * all the metrics... Shortly, bundle a bundle.
  1127. */
  1128. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1129. struct xfrm_state **xfrm, int nx,
  1130. struct flowi *fl,
  1131. struct dst_entry *dst)
  1132. {
  1133. unsigned long now = jiffies;
  1134. struct net_device *dev;
  1135. struct dst_entry *dst_prev = NULL;
  1136. struct dst_entry *dst0 = NULL;
  1137. int i = 0;
  1138. int err;
  1139. int header_len = 0;
  1140. int nfheader_len = 0;
  1141. int trailer_len = 0;
  1142. int tos;
  1143. int family = policy->selector.family;
  1144. tos = xfrm_get_tos(fl, family);
  1145. err = tos;
  1146. if (tos < 0)
  1147. goto put_states;
  1148. dst_hold(dst);
  1149. for (; i < nx; i++) {
  1150. struct xfrm_dst *xdst = xfrm_alloc_dst(family);
  1151. struct dst_entry *dst1 = &xdst->u.dst;
  1152. err = PTR_ERR(xdst);
  1153. if (IS_ERR(xdst)) {
  1154. dst_release(dst);
  1155. goto put_states;
  1156. }
  1157. if (!dst_prev)
  1158. dst0 = dst1;
  1159. else {
  1160. dst_prev->child = dst_clone(dst1);
  1161. dst1->flags |= DST_NOHASH;
  1162. }
  1163. xdst->route = dst;
  1164. memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
  1165. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1166. family = xfrm[i]->props.family;
  1167. dst = xfrm_dst_lookup(xfrm[i], tos, family);
  1168. err = PTR_ERR(dst);
  1169. if (IS_ERR(dst))
  1170. goto put_states;
  1171. } else
  1172. dst_hold(dst);
  1173. dst1->xfrm = xfrm[i];
  1174. xdst->genid = xfrm[i]->genid;
  1175. dst1->obsolete = -1;
  1176. dst1->flags |= DST_HOST;
  1177. dst1->lastuse = now;
  1178. dst1->input = dst_discard;
  1179. dst1->output = xfrm[i]->outer_mode->afinfo->output;
  1180. dst1->next = dst_prev;
  1181. dst_prev = dst1;
  1182. header_len += xfrm[i]->props.header_len;
  1183. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1184. nfheader_len += xfrm[i]->props.header_len;
  1185. trailer_len += xfrm[i]->props.trailer_len;
  1186. }
  1187. dst_prev->child = dst;
  1188. dst0->path = dst;
  1189. err = -ENODEV;
  1190. dev = dst->dev;
  1191. if (!dev)
  1192. goto free_dst;
  1193. /* Copy neighbout for reachability confirmation */
  1194. dst0->neighbour = neigh_clone(dst->neighbour);
  1195. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1196. xfrm_init_pmtu(dst_prev);
  1197. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1198. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1199. err = xfrm_fill_dst(xdst, dev);
  1200. if (err)
  1201. goto free_dst;
  1202. dst_prev->header_len = header_len;
  1203. dst_prev->trailer_len = trailer_len;
  1204. header_len -= xdst->u.dst.xfrm->props.header_len;
  1205. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1206. }
  1207. out:
  1208. return dst0;
  1209. put_states:
  1210. for (; i < nx; i++)
  1211. xfrm_state_put(xfrm[i]);
  1212. free_dst:
  1213. if (dst0)
  1214. dst_free(dst0);
  1215. dst0 = ERR_PTR(err);
  1216. goto out;
  1217. }
  1218. static int inline
  1219. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1220. {
  1221. if (!*target) {
  1222. *target = kmalloc(size, GFP_ATOMIC);
  1223. if (!*target)
  1224. return -ENOMEM;
  1225. }
  1226. memcpy(*target, src, size);
  1227. return 0;
  1228. }
  1229. static int inline
  1230. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1231. {
  1232. #ifdef CONFIG_XFRM_SUB_POLICY
  1233. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1234. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1235. sel, sizeof(*sel));
  1236. #else
  1237. return 0;
  1238. #endif
  1239. }
  1240. static int inline
  1241. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1242. {
  1243. #ifdef CONFIG_XFRM_SUB_POLICY
  1244. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1245. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1246. #else
  1247. return 0;
  1248. #endif
  1249. }
  1250. static int stale_bundle(struct dst_entry *dst);
  1251. /* Main function: finds/creates a bundle for given flow.
  1252. *
  1253. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1254. * on interfaces with disabled IPsec.
  1255. */
  1256. int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1257. struct sock *sk, int flags)
  1258. {
  1259. struct xfrm_policy *policy;
  1260. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1261. int npols;
  1262. int pol_dead;
  1263. int xfrm_nr;
  1264. int pi;
  1265. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1266. struct dst_entry *dst, *dst_orig = *dst_p;
  1267. int nx = 0;
  1268. int err;
  1269. u32 genid;
  1270. u16 family;
  1271. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1272. restart:
  1273. genid = atomic_read(&flow_cache_genid);
  1274. policy = NULL;
  1275. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1276. pols[pi] = NULL;
  1277. npols = 0;
  1278. pol_dead = 0;
  1279. xfrm_nr = 0;
  1280. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1281. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1282. err = PTR_ERR(policy);
  1283. if (IS_ERR(policy)) {
  1284. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1285. goto dropdst;
  1286. }
  1287. }
  1288. if (!policy) {
  1289. /* To accelerate a bit... */
  1290. if ((dst_orig->flags & DST_NOXFRM) ||
  1291. !xfrm_policy_count[XFRM_POLICY_OUT])
  1292. goto nopol;
  1293. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  1294. dir, xfrm_policy_lookup);
  1295. err = PTR_ERR(policy);
  1296. if (IS_ERR(policy)) {
  1297. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1298. goto dropdst;
  1299. }
  1300. }
  1301. if (!policy)
  1302. goto nopol;
  1303. family = dst_orig->ops->family;
  1304. pols[0] = policy;
  1305. npols ++;
  1306. xfrm_nr += pols[0]->xfrm_nr;
  1307. err = -ENOENT;
  1308. if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
  1309. goto error;
  1310. policy->curlft.use_time = get_seconds();
  1311. switch (policy->action) {
  1312. default:
  1313. case XFRM_POLICY_BLOCK:
  1314. /* Prohibit the flow */
  1315. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
  1316. err = -EPERM;
  1317. goto error;
  1318. case XFRM_POLICY_ALLOW:
  1319. #ifndef CONFIG_XFRM_SUB_POLICY
  1320. if (policy->xfrm_nr == 0) {
  1321. /* Flow passes not transformed. */
  1322. xfrm_pol_put(policy);
  1323. return 0;
  1324. }
  1325. #endif
  1326. /* Try to find matching bundle.
  1327. *
  1328. * LATER: help from flow cache. It is optional, this
  1329. * is required only for output policy.
  1330. */
  1331. dst = xfrm_find_bundle(fl, policy, family);
  1332. if (IS_ERR(dst)) {
  1333. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1334. err = PTR_ERR(dst);
  1335. goto error;
  1336. }
  1337. if (dst)
  1338. break;
  1339. #ifdef CONFIG_XFRM_SUB_POLICY
  1340. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1341. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1342. fl, family,
  1343. XFRM_POLICY_OUT);
  1344. if (pols[1]) {
  1345. if (IS_ERR(pols[1])) {
  1346. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1347. err = PTR_ERR(pols[1]);
  1348. goto error;
  1349. }
  1350. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1351. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
  1352. err = -EPERM;
  1353. goto error;
  1354. }
  1355. npols ++;
  1356. xfrm_nr += pols[1]->xfrm_nr;
  1357. }
  1358. }
  1359. /*
  1360. * Because neither flowi nor bundle information knows about
  1361. * transformation template size. On more than one policy usage
  1362. * we can realize whether all of them is bypass or not after
  1363. * they are searched. See above not-transformed bypass
  1364. * is surrounded by non-sub policy configuration, too.
  1365. */
  1366. if (xfrm_nr == 0) {
  1367. /* Flow passes not transformed. */
  1368. xfrm_pols_put(pols, npols);
  1369. return 0;
  1370. }
  1371. #endif
  1372. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1373. if (unlikely(nx<0)) {
  1374. err = nx;
  1375. if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
  1376. /* EREMOTE tells the caller to generate
  1377. * a one-shot blackhole route.
  1378. */
  1379. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1380. xfrm_pol_put(policy);
  1381. return -EREMOTE;
  1382. }
  1383. if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
  1384. DECLARE_WAITQUEUE(wait, current);
  1385. add_wait_queue(&km_waitq, &wait);
  1386. set_current_state(TASK_INTERRUPTIBLE);
  1387. schedule();
  1388. set_current_state(TASK_RUNNING);
  1389. remove_wait_queue(&km_waitq, &wait);
  1390. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1391. if (nx == -EAGAIN && signal_pending(current)) {
  1392. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1393. err = -ERESTART;
  1394. goto error;
  1395. }
  1396. if (nx == -EAGAIN ||
  1397. genid != atomic_read(&flow_cache_genid)) {
  1398. xfrm_pols_put(pols, npols);
  1399. goto restart;
  1400. }
  1401. err = nx;
  1402. }
  1403. if (err < 0) {
  1404. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1405. goto error;
  1406. }
  1407. }
  1408. if (nx == 0) {
  1409. /* Flow passes not transformed. */
  1410. xfrm_pols_put(pols, npols);
  1411. return 0;
  1412. }
  1413. dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
  1414. err = PTR_ERR(dst);
  1415. if (IS_ERR(dst)) {
  1416. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1417. goto error;
  1418. }
  1419. for (pi = 0; pi < npols; pi++) {
  1420. read_lock_bh(&pols[pi]->lock);
  1421. pol_dead |= pols[pi]->dead;
  1422. read_unlock_bh(&pols[pi]->lock);
  1423. }
  1424. write_lock_bh(&policy->lock);
  1425. if (unlikely(pol_dead || stale_bundle(dst))) {
  1426. /* Wow! While we worked on resolving, this
  1427. * policy has gone. Retry. It is not paranoia,
  1428. * we just cannot enlist new bundle to dead object.
  1429. * We can't enlist stable bundles either.
  1430. */
  1431. write_unlock_bh(&policy->lock);
  1432. if (dst)
  1433. dst_free(dst);
  1434. if (pol_dead)
  1435. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD);
  1436. else
  1437. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1438. err = -EHOSTUNREACH;
  1439. goto error;
  1440. }
  1441. if (npols > 1)
  1442. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1443. else
  1444. err = xfrm_dst_update_origin(dst, fl);
  1445. if (unlikely(err)) {
  1446. write_unlock_bh(&policy->lock);
  1447. if (dst)
  1448. dst_free(dst);
  1449. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1450. goto error;
  1451. }
  1452. dst->next = policy->bundles;
  1453. policy->bundles = dst;
  1454. dst_hold(dst);
  1455. write_unlock_bh(&policy->lock);
  1456. }
  1457. *dst_p = dst;
  1458. dst_release(dst_orig);
  1459. xfrm_pols_put(pols, npols);
  1460. return 0;
  1461. error:
  1462. xfrm_pols_put(pols, npols);
  1463. dropdst:
  1464. dst_release(dst_orig);
  1465. *dst_p = NULL;
  1466. return err;
  1467. nopol:
  1468. err = -ENOENT;
  1469. if (flags & XFRM_LOOKUP_ICMP)
  1470. goto dropdst;
  1471. return 0;
  1472. }
  1473. EXPORT_SYMBOL(__xfrm_lookup);
  1474. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1475. struct sock *sk, int flags)
  1476. {
  1477. int err = __xfrm_lookup(dst_p, fl, sk, flags);
  1478. if (err == -EREMOTE) {
  1479. dst_release(*dst_p);
  1480. *dst_p = NULL;
  1481. err = -EAGAIN;
  1482. }
  1483. return err;
  1484. }
  1485. EXPORT_SYMBOL(xfrm_lookup);
  1486. static inline int
  1487. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1488. {
  1489. struct xfrm_state *x;
  1490. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1491. return 0;
  1492. x = skb->sp->xvec[idx];
  1493. if (!x->type->reject)
  1494. return 0;
  1495. return x->type->reject(x, skb, fl);
  1496. }
  1497. /* When skb is transformed back to its "native" form, we have to
  1498. * check policy restrictions. At the moment we make this in maximally
  1499. * stupid way. Shame on me. :-) Of course, connected sockets must
  1500. * have policy cached at them.
  1501. */
  1502. static inline int
  1503. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1504. unsigned short family)
  1505. {
  1506. if (xfrm_state_kern(x))
  1507. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1508. return x->id.proto == tmpl->id.proto &&
  1509. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1510. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1511. x->props.mode == tmpl->mode &&
  1512. ((tmpl->aalgos & (1<<x->props.aalgo)) ||
  1513. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1514. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1515. xfrm_state_addr_cmp(tmpl, x, family));
  1516. }
  1517. /*
  1518. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1519. * because of optional transport mode, or next index of the mathced secpath
  1520. * state with the template.
  1521. * -1 is returned when no matching template is found.
  1522. * Otherwise "-2 - errored_index" is returned.
  1523. */
  1524. static inline int
  1525. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1526. unsigned short family)
  1527. {
  1528. int idx = start;
  1529. if (tmpl->optional) {
  1530. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1531. return start;
  1532. } else
  1533. start = -1;
  1534. for (; idx < sp->len; idx++) {
  1535. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1536. return ++idx;
  1537. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1538. if (start == -1)
  1539. start = -2-idx;
  1540. break;
  1541. }
  1542. }
  1543. return start;
  1544. }
  1545. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1546. unsigned int family, int reverse)
  1547. {
  1548. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1549. int err;
  1550. if (unlikely(afinfo == NULL))
  1551. return -EAFNOSUPPORT;
  1552. afinfo->decode_session(skb, fl, reverse);
  1553. err = security_xfrm_decode_session(skb, &fl->secid);
  1554. xfrm_policy_put_afinfo(afinfo);
  1555. return err;
  1556. }
  1557. EXPORT_SYMBOL(__xfrm_decode_session);
  1558. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1559. {
  1560. for (; k < sp->len; k++) {
  1561. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1562. *idxp = k;
  1563. return 1;
  1564. }
  1565. }
  1566. return 0;
  1567. }
  1568. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1569. unsigned short family)
  1570. {
  1571. struct xfrm_policy *pol;
  1572. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1573. int npols = 0;
  1574. int xfrm_nr;
  1575. int pi;
  1576. int reverse;
  1577. struct flowi fl;
  1578. u8 fl_dir;
  1579. int xerr_idx = -1;
  1580. reverse = dir & ~XFRM_POLICY_MASK;
  1581. dir &= XFRM_POLICY_MASK;
  1582. fl_dir = policy_to_flow_dir(dir);
  1583. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1584. XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
  1585. return 0;
  1586. }
  1587. nf_nat_decode_session(skb, &fl, family);
  1588. /* First, check used SA against their selectors. */
  1589. if (skb->sp) {
  1590. int i;
  1591. for (i=skb->sp->len-1; i>=0; i--) {
  1592. struct xfrm_state *x = skb->sp->xvec[i];
  1593. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1594. XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH);
  1595. return 0;
  1596. }
  1597. }
  1598. }
  1599. pol = NULL;
  1600. if (sk && sk->sk_policy[dir]) {
  1601. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1602. if (IS_ERR(pol)) {
  1603. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1604. return 0;
  1605. }
  1606. }
  1607. if (!pol)
  1608. pol = flow_cache_lookup(&fl, family, fl_dir,
  1609. xfrm_policy_lookup);
  1610. if (IS_ERR(pol)) {
  1611. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1612. return 0;
  1613. }
  1614. if (!pol) {
  1615. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1616. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1617. XFRM_INC_STATS(LINUX_MIB_XFRMINNOPOLS);
  1618. return 0;
  1619. }
  1620. return 1;
  1621. }
  1622. pol->curlft.use_time = get_seconds();
  1623. pols[0] = pol;
  1624. npols ++;
  1625. #ifdef CONFIG_XFRM_SUB_POLICY
  1626. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1627. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1628. &fl, family,
  1629. XFRM_POLICY_IN);
  1630. if (pols[1]) {
  1631. if (IS_ERR(pols[1])) {
  1632. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1633. return 0;
  1634. }
  1635. pols[1]->curlft.use_time = get_seconds();
  1636. npols ++;
  1637. }
  1638. }
  1639. #endif
  1640. if (pol->action == XFRM_POLICY_ALLOW) {
  1641. struct sec_path *sp;
  1642. static struct sec_path dummy;
  1643. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1644. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1645. struct xfrm_tmpl **tpp = tp;
  1646. int ti = 0;
  1647. int i, k;
  1648. if ((sp = skb->sp) == NULL)
  1649. sp = &dummy;
  1650. for (pi = 0; pi < npols; pi++) {
  1651. if (pols[pi] != pol &&
  1652. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1653. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
  1654. goto reject;
  1655. }
  1656. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1657. XFRM_INC_STATS(LINUX_MIB_XFRMINBUFFERERROR);
  1658. goto reject_error;
  1659. }
  1660. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1661. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1662. }
  1663. xfrm_nr = ti;
  1664. if (npols > 1) {
  1665. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1666. tpp = stp;
  1667. }
  1668. /* For each tunnel xfrm, find the first matching tmpl.
  1669. * For each tmpl before that, find corresponding xfrm.
  1670. * Order is _important_. Later we will implement
  1671. * some barriers, but at the moment barriers
  1672. * are implied between each two transformations.
  1673. */
  1674. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1675. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1676. if (k < 0) {
  1677. if (k < -1)
  1678. /* "-2 - errored_index" returned */
  1679. xerr_idx = -(2+k);
  1680. XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
  1681. goto reject;
  1682. }
  1683. }
  1684. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1685. XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
  1686. goto reject;
  1687. }
  1688. xfrm_pols_put(pols, npols);
  1689. return 1;
  1690. }
  1691. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
  1692. reject:
  1693. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1694. reject_error:
  1695. xfrm_pols_put(pols, npols);
  1696. return 0;
  1697. }
  1698. EXPORT_SYMBOL(__xfrm_policy_check);
  1699. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1700. {
  1701. struct flowi fl;
  1702. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1703. /* XXX: we should have something like FWDHDRERROR here. */
  1704. XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
  1705. return 0;
  1706. }
  1707. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1708. }
  1709. EXPORT_SYMBOL(__xfrm_route_forward);
  1710. /* Optimize later using cookies and generation ids. */
  1711. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1712. {
  1713. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1714. * to "-1" to force all XFRM destinations to get validated by
  1715. * dst_ops->check on every use. We do this because when a
  1716. * normal route referenced by an XFRM dst is obsoleted we do
  1717. * not go looking around for all parent referencing XFRM dsts
  1718. * so that we can invalidate them. It is just too much work.
  1719. * Instead we make the checks here on every use. For example:
  1720. *
  1721. * XFRM dst A --> IPv4 dst X
  1722. *
  1723. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1724. * in this example). If X is marked obsolete, "A" will not
  1725. * notice. That's what we are validating here via the
  1726. * stale_bundle() check.
  1727. *
  1728. * When a policy's bundle is pruned, we dst_free() the XFRM
  1729. * dst which causes it's ->obsolete field to be set to a
  1730. * positive non-zero integer. If an XFRM dst has been pruned
  1731. * like this, we want to force a new route lookup.
  1732. */
  1733. if (dst->obsolete < 0 && !stale_bundle(dst))
  1734. return dst;
  1735. return NULL;
  1736. }
  1737. static int stale_bundle(struct dst_entry *dst)
  1738. {
  1739. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1740. }
  1741. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1742. {
  1743. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1744. dst->dev = dev->nd_net->loopback_dev;
  1745. dev_hold(dst->dev);
  1746. dev_put(dev);
  1747. }
  1748. }
  1749. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1750. static void xfrm_link_failure(struct sk_buff *skb)
  1751. {
  1752. /* Impossible. Such dst must be popped before reaches point of failure. */
  1753. return;
  1754. }
  1755. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1756. {
  1757. if (dst) {
  1758. if (dst->obsolete) {
  1759. dst_release(dst);
  1760. dst = NULL;
  1761. }
  1762. }
  1763. return dst;
  1764. }
  1765. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1766. {
  1767. struct dst_entry *dst, **dstp;
  1768. write_lock(&pol->lock);
  1769. dstp = &pol->bundles;
  1770. while ((dst=*dstp) != NULL) {
  1771. if (func(dst)) {
  1772. *dstp = dst->next;
  1773. dst->next = *gc_list_p;
  1774. *gc_list_p = dst;
  1775. } else {
  1776. dstp = &dst->next;
  1777. }
  1778. }
  1779. write_unlock(&pol->lock);
  1780. }
  1781. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1782. {
  1783. struct dst_entry *gc_list = NULL;
  1784. int dir;
  1785. read_lock_bh(&xfrm_policy_lock);
  1786. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1787. struct xfrm_policy *pol;
  1788. struct hlist_node *entry;
  1789. struct hlist_head *table;
  1790. int i;
  1791. hlist_for_each_entry(pol, entry,
  1792. &xfrm_policy_inexact[dir], bydst)
  1793. prune_one_bundle(pol, func, &gc_list);
  1794. table = xfrm_policy_bydst[dir].table;
  1795. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  1796. hlist_for_each_entry(pol, entry, table + i, bydst)
  1797. prune_one_bundle(pol, func, &gc_list);
  1798. }
  1799. }
  1800. read_unlock_bh(&xfrm_policy_lock);
  1801. while (gc_list) {
  1802. struct dst_entry *dst = gc_list;
  1803. gc_list = dst->next;
  1804. dst_free(dst);
  1805. }
  1806. }
  1807. static int unused_bundle(struct dst_entry *dst)
  1808. {
  1809. return !atomic_read(&dst->__refcnt);
  1810. }
  1811. static void __xfrm_garbage_collect(void)
  1812. {
  1813. xfrm_prune_bundles(unused_bundle);
  1814. }
  1815. static int xfrm_flush_bundles(void)
  1816. {
  1817. xfrm_prune_bundles(stale_bundle);
  1818. return 0;
  1819. }
  1820. static void xfrm_init_pmtu(struct dst_entry *dst)
  1821. {
  1822. do {
  1823. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1824. u32 pmtu, route_mtu_cached;
  1825. pmtu = dst_mtu(dst->child);
  1826. xdst->child_mtu_cached = pmtu;
  1827. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1828. route_mtu_cached = dst_mtu(xdst->route);
  1829. xdst->route_mtu_cached = route_mtu_cached;
  1830. if (pmtu > route_mtu_cached)
  1831. pmtu = route_mtu_cached;
  1832. dst->metrics[RTAX_MTU-1] = pmtu;
  1833. } while ((dst = dst->next));
  1834. }
  1835. /* Check that the bundle accepts the flow and its components are
  1836. * still valid.
  1837. */
  1838. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1839. struct flowi *fl, int family, int strict)
  1840. {
  1841. struct dst_entry *dst = &first->u.dst;
  1842. struct xfrm_dst *last;
  1843. u32 mtu;
  1844. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1845. (dst->dev && !netif_running(dst->dev)))
  1846. return 0;
  1847. #ifdef CONFIG_XFRM_SUB_POLICY
  1848. if (fl) {
  1849. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1850. return 0;
  1851. if (first->partner &&
  1852. !xfrm_selector_match(first->partner, fl, family))
  1853. return 0;
  1854. }
  1855. #endif
  1856. last = NULL;
  1857. do {
  1858. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1859. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1860. return 0;
  1861. if (fl && pol &&
  1862. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1863. return 0;
  1864. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1865. return 0;
  1866. if (xdst->genid != dst->xfrm->genid)
  1867. return 0;
  1868. if (strict && fl &&
  1869. !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
  1870. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1871. return 0;
  1872. mtu = dst_mtu(dst->child);
  1873. if (xdst->child_mtu_cached != mtu) {
  1874. last = xdst;
  1875. xdst->child_mtu_cached = mtu;
  1876. }
  1877. if (!dst_check(xdst->route, xdst->route_cookie))
  1878. return 0;
  1879. mtu = dst_mtu(xdst->route);
  1880. if (xdst->route_mtu_cached != mtu) {
  1881. last = xdst;
  1882. xdst->route_mtu_cached = mtu;
  1883. }
  1884. dst = dst->child;
  1885. } while (dst->xfrm);
  1886. if (likely(!last))
  1887. return 1;
  1888. mtu = last->child_mtu_cached;
  1889. for (;;) {
  1890. dst = &last->u.dst;
  1891. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1892. if (mtu > last->route_mtu_cached)
  1893. mtu = last->route_mtu_cached;
  1894. dst->metrics[RTAX_MTU-1] = mtu;
  1895. if (last == first)
  1896. break;
  1897. last = (struct xfrm_dst *)last->u.dst.next;
  1898. last->child_mtu_cached = mtu;
  1899. }
  1900. return 1;
  1901. }
  1902. EXPORT_SYMBOL(xfrm_bundle_ok);
  1903. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1904. {
  1905. int err = 0;
  1906. if (unlikely(afinfo == NULL))
  1907. return -EINVAL;
  1908. if (unlikely(afinfo->family >= NPROTO))
  1909. return -EAFNOSUPPORT;
  1910. write_lock_bh(&xfrm_policy_afinfo_lock);
  1911. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1912. err = -ENOBUFS;
  1913. else {
  1914. struct dst_ops *dst_ops = afinfo->dst_ops;
  1915. if (likely(dst_ops->kmem_cachep == NULL))
  1916. dst_ops->kmem_cachep = xfrm_dst_cache;
  1917. if (likely(dst_ops->check == NULL))
  1918. dst_ops->check = xfrm_dst_check;
  1919. if (likely(dst_ops->negative_advice == NULL))
  1920. dst_ops->negative_advice = xfrm_negative_advice;
  1921. if (likely(dst_ops->link_failure == NULL))
  1922. dst_ops->link_failure = xfrm_link_failure;
  1923. if (likely(afinfo->garbage_collect == NULL))
  1924. afinfo->garbage_collect = __xfrm_garbage_collect;
  1925. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1926. }
  1927. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1928. return err;
  1929. }
  1930. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1931. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1932. {
  1933. int err = 0;
  1934. if (unlikely(afinfo == NULL))
  1935. return -EINVAL;
  1936. if (unlikely(afinfo->family >= NPROTO))
  1937. return -EAFNOSUPPORT;
  1938. write_lock_bh(&xfrm_policy_afinfo_lock);
  1939. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1940. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1941. err = -EINVAL;
  1942. else {
  1943. struct dst_ops *dst_ops = afinfo->dst_ops;
  1944. xfrm_policy_afinfo[afinfo->family] = NULL;
  1945. dst_ops->kmem_cachep = NULL;
  1946. dst_ops->check = NULL;
  1947. dst_ops->negative_advice = NULL;
  1948. dst_ops->link_failure = NULL;
  1949. afinfo->garbage_collect = NULL;
  1950. }
  1951. }
  1952. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1953. return err;
  1954. }
  1955. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1956. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1957. {
  1958. struct xfrm_policy_afinfo *afinfo;
  1959. if (unlikely(family >= NPROTO))
  1960. return NULL;
  1961. read_lock(&xfrm_policy_afinfo_lock);
  1962. afinfo = xfrm_policy_afinfo[family];
  1963. if (unlikely(!afinfo))
  1964. read_unlock(&xfrm_policy_afinfo_lock);
  1965. return afinfo;
  1966. }
  1967. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1968. {
  1969. read_unlock(&xfrm_policy_afinfo_lock);
  1970. }
  1971. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1972. {
  1973. struct net_device *dev = ptr;
  1974. if (dev->nd_net != &init_net)
  1975. return NOTIFY_DONE;
  1976. switch (event) {
  1977. case NETDEV_DOWN:
  1978. xfrm_flush_bundles();
  1979. }
  1980. return NOTIFY_DONE;
  1981. }
  1982. static struct notifier_block xfrm_dev_notifier = {
  1983. xfrm_dev_event,
  1984. NULL,
  1985. 0
  1986. };
  1987. #ifdef CONFIG_XFRM_STATISTICS
  1988. static int __init xfrm_statistics_init(void)
  1989. {
  1990. if (snmp_mib_init((void **)xfrm_statistics,
  1991. sizeof(struct linux_xfrm_mib)) < 0)
  1992. return -ENOMEM;
  1993. return 0;
  1994. }
  1995. #endif
  1996. static void __init xfrm_policy_init(void)
  1997. {
  1998. unsigned int hmask, sz;
  1999. int dir;
  2000. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2001. sizeof(struct xfrm_dst),
  2002. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2003. NULL);
  2004. hmask = 8 - 1;
  2005. sz = (hmask+1) * sizeof(struct hlist_head);
  2006. xfrm_policy_byidx = xfrm_hash_alloc(sz);
  2007. xfrm_idx_hmask = hmask;
  2008. if (!xfrm_policy_byidx)
  2009. panic("XFRM: failed to allocate byidx hash\n");
  2010. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2011. struct xfrm_policy_hash *htab;
  2012. INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
  2013. htab = &xfrm_policy_bydst[dir];
  2014. htab->table = xfrm_hash_alloc(sz);
  2015. htab->hmask = hmask;
  2016. if (!htab->table)
  2017. panic("XFRM: failed to allocate bydst hash\n");
  2018. }
  2019. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
  2020. register_netdevice_notifier(&xfrm_dev_notifier);
  2021. }
  2022. void __init xfrm_init(void)
  2023. {
  2024. #ifdef CONFIG_XFRM_STATISTICS
  2025. xfrm_statistics_init();
  2026. #endif
  2027. xfrm_state_init();
  2028. xfrm_policy_init();
  2029. xfrm_input_init();
  2030. #ifdef CONFIG_XFRM_STATISTICS
  2031. xfrm_proc_init();
  2032. #endif
  2033. }
  2034. #ifdef CONFIG_AUDITSYSCALL
  2035. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2036. struct audit_buffer *audit_buf)
  2037. {
  2038. struct xfrm_sec_ctx *ctx = xp->security;
  2039. struct xfrm_selector *sel = &xp->selector;
  2040. if (ctx)
  2041. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2042. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2043. switch(sel->family) {
  2044. case AF_INET:
  2045. audit_log_format(audit_buf, " src=" NIPQUAD_FMT,
  2046. NIPQUAD(sel->saddr.a4));
  2047. if (sel->prefixlen_s != 32)
  2048. audit_log_format(audit_buf, " src_prefixlen=%d",
  2049. sel->prefixlen_s);
  2050. audit_log_format(audit_buf, " dst=" NIPQUAD_FMT,
  2051. NIPQUAD(sel->daddr.a4));
  2052. if (sel->prefixlen_d != 32)
  2053. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2054. sel->prefixlen_d);
  2055. break;
  2056. case AF_INET6:
  2057. audit_log_format(audit_buf, " src=" NIP6_FMT,
  2058. NIP6(*(struct in6_addr *)sel->saddr.a6));
  2059. if (sel->prefixlen_s != 128)
  2060. audit_log_format(audit_buf, " src_prefixlen=%d",
  2061. sel->prefixlen_s);
  2062. audit_log_format(audit_buf, " dst=" NIP6_FMT,
  2063. NIP6(*(struct in6_addr *)sel->daddr.a6));
  2064. if (sel->prefixlen_d != 128)
  2065. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2066. sel->prefixlen_d);
  2067. break;
  2068. }
  2069. }
  2070. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2071. u32 auid, u32 secid)
  2072. {
  2073. struct audit_buffer *audit_buf;
  2074. audit_buf = xfrm_audit_start("SPD-add");
  2075. if (audit_buf == NULL)
  2076. return;
  2077. xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
  2078. audit_log_format(audit_buf, " res=%u", result);
  2079. xfrm_audit_common_policyinfo(xp, audit_buf);
  2080. audit_log_end(audit_buf);
  2081. }
  2082. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2083. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2084. u32 auid, u32 secid)
  2085. {
  2086. struct audit_buffer *audit_buf;
  2087. audit_buf = xfrm_audit_start("SPD-delete");
  2088. if (audit_buf == NULL)
  2089. return;
  2090. xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
  2091. audit_log_format(audit_buf, " res=%u", result);
  2092. xfrm_audit_common_policyinfo(xp, audit_buf);
  2093. audit_log_end(audit_buf);
  2094. }
  2095. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2096. #endif
  2097. #ifdef CONFIG_XFRM_MIGRATE
  2098. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2099. struct xfrm_selector *sel_tgt)
  2100. {
  2101. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2102. if (sel_tgt->family == sel_cmp->family &&
  2103. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2104. sel_cmp->family) == 0 &&
  2105. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2106. sel_cmp->family) == 0 &&
  2107. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2108. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2109. return 1;
  2110. }
  2111. } else {
  2112. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2113. return 1;
  2114. }
  2115. }
  2116. return 0;
  2117. }
  2118. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2119. u8 dir, u8 type)
  2120. {
  2121. struct xfrm_policy *pol, *ret = NULL;
  2122. struct hlist_node *entry;
  2123. struct hlist_head *chain;
  2124. u32 priority = ~0U;
  2125. read_lock_bh(&xfrm_policy_lock);
  2126. chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
  2127. hlist_for_each_entry(pol, entry, chain, bydst) {
  2128. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2129. pol->type == type) {
  2130. ret = pol;
  2131. priority = ret->priority;
  2132. break;
  2133. }
  2134. }
  2135. chain = &xfrm_policy_inexact[dir];
  2136. hlist_for_each_entry(pol, entry, chain, bydst) {
  2137. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2138. pol->type == type &&
  2139. pol->priority < priority) {
  2140. ret = pol;
  2141. break;
  2142. }
  2143. }
  2144. if (ret)
  2145. xfrm_pol_hold(ret);
  2146. read_unlock_bh(&xfrm_policy_lock);
  2147. return ret;
  2148. }
  2149. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2150. {
  2151. int match = 0;
  2152. if (t->mode == m->mode && t->id.proto == m->proto &&
  2153. (m->reqid == 0 || t->reqid == m->reqid)) {
  2154. switch (t->mode) {
  2155. case XFRM_MODE_TUNNEL:
  2156. case XFRM_MODE_BEET:
  2157. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2158. m->old_family) == 0 &&
  2159. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2160. m->old_family) == 0) {
  2161. match = 1;
  2162. }
  2163. break;
  2164. case XFRM_MODE_TRANSPORT:
  2165. /* in case of transport mode, template does not store
  2166. any IP addresses, hence we just compare mode and
  2167. protocol */
  2168. match = 1;
  2169. break;
  2170. default:
  2171. break;
  2172. }
  2173. }
  2174. return match;
  2175. }
  2176. /* update endpoint address(es) of template(s) */
  2177. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2178. struct xfrm_migrate *m, int num_migrate)
  2179. {
  2180. struct xfrm_migrate *mp;
  2181. struct dst_entry *dst;
  2182. int i, j, n = 0;
  2183. write_lock_bh(&pol->lock);
  2184. if (unlikely(pol->dead)) {
  2185. /* target policy has been deleted */
  2186. write_unlock_bh(&pol->lock);
  2187. return -ENOENT;
  2188. }
  2189. for (i = 0; i < pol->xfrm_nr; i++) {
  2190. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2191. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2192. continue;
  2193. n++;
  2194. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2195. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2196. continue;
  2197. /* update endpoints */
  2198. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2199. sizeof(pol->xfrm_vec[i].id.daddr));
  2200. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2201. sizeof(pol->xfrm_vec[i].saddr));
  2202. pol->xfrm_vec[i].encap_family = mp->new_family;
  2203. /* flush bundles */
  2204. while ((dst = pol->bundles) != NULL) {
  2205. pol->bundles = dst->next;
  2206. dst_free(dst);
  2207. }
  2208. }
  2209. }
  2210. write_unlock_bh(&pol->lock);
  2211. if (!n)
  2212. return -ENODATA;
  2213. return 0;
  2214. }
  2215. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2216. {
  2217. int i, j;
  2218. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2219. return -EINVAL;
  2220. for (i = 0; i < num_migrate; i++) {
  2221. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2222. m[i].old_family) == 0) &&
  2223. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2224. m[i].old_family) == 0))
  2225. return -EINVAL;
  2226. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2227. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2228. return -EINVAL;
  2229. /* check if there is any duplicated entry */
  2230. for (j = i + 1; j < num_migrate; j++) {
  2231. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2232. sizeof(m[i].old_daddr)) &&
  2233. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2234. sizeof(m[i].old_saddr)) &&
  2235. m[i].proto == m[j].proto &&
  2236. m[i].mode == m[j].mode &&
  2237. m[i].reqid == m[j].reqid &&
  2238. m[i].old_family == m[j].old_family)
  2239. return -EINVAL;
  2240. }
  2241. }
  2242. return 0;
  2243. }
  2244. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2245. struct xfrm_migrate *m, int num_migrate)
  2246. {
  2247. int i, err, nx_cur = 0, nx_new = 0;
  2248. struct xfrm_policy *pol = NULL;
  2249. struct xfrm_state *x, *xc;
  2250. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2251. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2252. struct xfrm_migrate *mp;
  2253. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2254. goto out;
  2255. /* Stage 1 - find policy */
  2256. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2257. err = -ENOENT;
  2258. goto out;
  2259. }
  2260. /* Stage 2 - find and update state(s) */
  2261. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2262. if ((x = xfrm_migrate_state_find(mp))) {
  2263. x_cur[nx_cur] = x;
  2264. nx_cur++;
  2265. if ((xc = xfrm_state_migrate(x, mp))) {
  2266. x_new[nx_new] = xc;
  2267. nx_new++;
  2268. } else {
  2269. err = -ENODATA;
  2270. goto restore_state;
  2271. }
  2272. }
  2273. }
  2274. /* Stage 3 - update policy */
  2275. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2276. goto restore_state;
  2277. /* Stage 4 - delete old state(s) */
  2278. if (nx_cur) {
  2279. xfrm_states_put(x_cur, nx_cur);
  2280. xfrm_states_delete(x_cur, nx_cur);
  2281. }
  2282. /* Stage 5 - announce */
  2283. km_migrate(sel, dir, type, m, num_migrate);
  2284. xfrm_pol_put(pol);
  2285. return 0;
  2286. out:
  2287. return err;
  2288. restore_state:
  2289. if (pol)
  2290. xfrm_pol_put(pol);
  2291. if (nx_cur)
  2292. xfrm_states_put(x_cur, nx_cur);
  2293. if (nx_new)
  2294. xfrm_states_delete(x_new, nx_new);
  2295. return err;
  2296. }
  2297. EXPORT_SYMBOL(xfrm_migrate);
  2298. #endif