xfrm_policy.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <net/dst.h>
  27. #include <net/xfrm.h>
  28. #include <net/ip.h>
  29. #include "xfrm_hash.h"
  30. int sysctl_xfrm_larval_drop __read_mostly;
  31. DEFINE_MUTEX(xfrm_cfg_mutex);
  32. EXPORT_SYMBOL(xfrm_cfg_mutex);
  33. static DEFINE_RWLOCK(xfrm_policy_lock);
  34. unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
  35. EXPORT_SYMBOL(xfrm_policy_count);
  36. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  37. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  38. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  39. static struct work_struct xfrm_policy_gc_work;
  40. static HLIST_HEAD(xfrm_policy_gc_list);
  41. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  42. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  43. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  44. static void xfrm_init_pmtu(struct dst_entry *dst);
  45. static inline int
  46. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  47. {
  48. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  49. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  50. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  51. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  52. (fl->proto == sel->proto || !sel->proto) &&
  53. (fl->oif == sel->ifindex || !sel->ifindex);
  54. }
  55. static inline int
  56. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  57. {
  58. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  59. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  60. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  61. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  62. (fl->proto == sel->proto || !sel->proto) &&
  63. (fl->oif == sel->ifindex || !sel->ifindex);
  64. }
  65. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  66. unsigned short family)
  67. {
  68. switch (family) {
  69. case AF_INET:
  70. return __xfrm4_selector_match(sel, fl);
  71. case AF_INET6:
  72. return __xfrm6_selector_match(sel, fl);
  73. }
  74. return 0;
  75. }
  76. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  77. int family)
  78. {
  79. xfrm_address_t *saddr = &x->props.saddr;
  80. xfrm_address_t *daddr = &x->id.daddr;
  81. struct xfrm_policy_afinfo *afinfo;
  82. struct dst_entry *dst;
  83. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR)
  84. saddr = x->coaddr;
  85. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR)
  86. daddr = x->coaddr;
  87. afinfo = xfrm_policy_get_afinfo(family);
  88. if (unlikely(afinfo == NULL))
  89. return ERR_PTR(-EAFNOSUPPORT);
  90. dst = afinfo->dst_lookup(tos, saddr, daddr);
  91. xfrm_policy_put_afinfo(afinfo);
  92. return dst;
  93. }
  94. static inline unsigned long make_jiffies(long secs)
  95. {
  96. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  97. return MAX_SCHEDULE_TIMEOUT-1;
  98. else
  99. return secs*HZ;
  100. }
  101. static void xfrm_policy_timer(unsigned long data)
  102. {
  103. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  104. unsigned long now = get_seconds();
  105. long next = LONG_MAX;
  106. int warn = 0;
  107. int dir;
  108. read_lock(&xp->lock);
  109. if (xp->dead)
  110. goto out;
  111. dir = xfrm_policy_id2dir(xp->index);
  112. if (xp->lft.hard_add_expires_seconds) {
  113. long tmo = xp->lft.hard_add_expires_seconds +
  114. xp->curlft.add_time - now;
  115. if (tmo <= 0)
  116. goto expired;
  117. if (tmo < next)
  118. next = tmo;
  119. }
  120. if (xp->lft.hard_use_expires_seconds) {
  121. long tmo = xp->lft.hard_use_expires_seconds +
  122. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  123. if (tmo <= 0)
  124. goto expired;
  125. if (tmo < next)
  126. next = tmo;
  127. }
  128. if (xp->lft.soft_add_expires_seconds) {
  129. long tmo = xp->lft.soft_add_expires_seconds +
  130. xp->curlft.add_time - now;
  131. if (tmo <= 0) {
  132. warn = 1;
  133. tmo = XFRM_KM_TIMEOUT;
  134. }
  135. if (tmo < next)
  136. next = tmo;
  137. }
  138. if (xp->lft.soft_use_expires_seconds) {
  139. long tmo = xp->lft.soft_use_expires_seconds +
  140. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  141. if (tmo <= 0) {
  142. warn = 1;
  143. tmo = XFRM_KM_TIMEOUT;
  144. }
  145. if (tmo < next)
  146. next = tmo;
  147. }
  148. if (warn)
  149. km_policy_expired(xp, dir, 0, 0);
  150. if (next != LONG_MAX &&
  151. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  152. xfrm_pol_hold(xp);
  153. out:
  154. read_unlock(&xp->lock);
  155. xfrm_pol_put(xp);
  156. return;
  157. expired:
  158. read_unlock(&xp->lock);
  159. if (!xfrm_policy_delete(xp, dir))
  160. km_policy_expired(xp, dir, 1, 0);
  161. xfrm_pol_put(xp);
  162. }
  163. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  164. * SPD calls.
  165. */
  166. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  167. {
  168. struct xfrm_policy *policy;
  169. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  170. if (policy) {
  171. INIT_HLIST_NODE(&policy->bydst);
  172. INIT_HLIST_NODE(&policy->byidx);
  173. rwlock_init(&policy->lock);
  174. atomic_set(&policy->refcnt, 1);
  175. setup_timer(&policy->timer, xfrm_policy_timer,
  176. (unsigned long)policy);
  177. }
  178. return policy;
  179. }
  180. EXPORT_SYMBOL(xfrm_policy_alloc);
  181. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  182. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  183. {
  184. BUG_ON(!policy->dead);
  185. BUG_ON(policy->bundles);
  186. if (del_timer(&policy->timer))
  187. BUG();
  188. security_xfrm_policy_free(policy);
  189. kfree(policy);
  190. }
  191. EXPORT_SYMBOL(__xfrm_policy_destroy);
  192. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  193. {
  194. struct dst_entry *dst;
  195. while ((dst = policy->bundles) != NULL) {
  196. policy->bundles = dst->next;
  197. dst_free(dst);
  198. }
  199. if (del_timer(&policy->timer))
  200. atomic_dec(&policy->refcnt);
  201. if (atomic_read(&policy->refcnt) > 1)
  202. flow_cache_flush();
  203. xfrm_pol_put(policy);
  204. }
  205. static void xfrm_policy_gc_task(struct work_struct *work)
  206. {
  207. struct xfrm_policy *policy;
  208. struct hlist_node *entry, *tmp;
  209. struct hlist_head gc_list;
  210. spin_lock_bh(&xfrm_policy_gc_lock);
  211. gc_list.first = xfrm_policy_gc_list.first;
  212. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  213. spin_unlock_bh(&xfrm_policy_gc_lock);
  214. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  215. xfrm_policy_gc_kill(policy);
  216. }
  217. /* Rule must be locked. Release descentant resources, announce
  218. * entry dead. The rule must be unlinked from lists to the moment.
  219. */
  220. static void xfrm_policy_kill(struct xfrm_policy *policy)
  221. {
  222. int dead;
  223. write_lock_bh(&policy->lock);
  224. dead = policy->dead;
  225. policy->dead = 1;
  226. write_unlock_bh(&policy->lock);
  227. if (unlikely(dead)) {
  228. WARN_ON(1);
  229. return;
  230. }
  231. spin_lock(&xfrm_policy_gc_lock);
  232. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  233. spin_unlock(&xfrm_policy_gc_lock);
  234. schedule_work(&xfrm_policy_gc_work);
  235. }
  236. struct xfrm_policy_hash {
  237. struct hlist_head *table;
  238. unsigned int hmask;
  239. };
  240. static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
  241. static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
  242. static struct hlist_head *xfrm_policy_byidx __read_mostly;
  243. static unsigned int xfrm_idx_hmask __read_mostly;
  244. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  245. static inline unsigned int idx_hash(u32 index)
  246. {
  247. return __idx_hash(index, xfrm_idx_hmask);
  248. }
  249. static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
  250. {
  251. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  252. unsigned int hash = __sel_hash(sel, family, hmask);
  253. return (hash == hmask + 1 ?
  254. &xfrm_policy_inexact[dir] :
  255. xfrm_policy_bydst[dir].table + hash);
  256. }
  257. static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  258. {
  259. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  260. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  261. return xfrm_policy_bydst[dir].table + hash;
  262. }
  263. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  264. struct hlist_head *ndsttable,
  265. unsigned int nhashmask)
  266. {
  267. struct hlist_node *entry, *tmp;
  268. struct xfrm_policy *pol;
  269. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  270. unsigned int h;
  271. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  272. pol->family, nhashmask);
  273. hlist_add_head(&pol->bydst, ndsttable+h);
  274. }
  275. }
  276. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  277. struct hlist_head *nidxtable,
  278. unsigned int nhashmask)
  279. {
  280. struct hlist_node *entry, *tmp;
  281. struct xfrm_policy *pol;
  282. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  283. unsigned int h;
  284. h = __idx_hash(pol->index, nhashmask);
  285. hlist_add_head(&pol->byidx, nidxtable+h);
  286. }
  287. }
  288. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  289. {
  290. return ((old_hmask + 1) << 1) - 1;
  291. }
  292. static void xfrm_bydst_resize(int dir)
  293. {
  294. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  295. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  296. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  297. struct hlist_head *odst = xfrm_policy_bydst[dir].table;
  298. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  299. int i;
  300. if (!ndst)
  301. return;
  302. write_lock_bh(&xfrm_policy_lock);
  303. for (i = hmask; i >= 0; i--)
  304. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  305. xfrm_policy_bydst[dir].table = ndst;
  306. xfrm_policy_bydst[dir].hmask = nhashmask;
  307. write_unlock_bh(&xfrm_policy_lock);
  308. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  309. }
  310. static void xfrm_byidx_resize(int total)
  311. {
  312. unsigned int hmask = xfrm_idx_hmask;
  313. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  314. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  315. struct hlist_head *oidx = xfrm_policy_byidx;
  316. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  317. int i;
  318. if (!nidx)
  319. return;
  320. write_lock_bh(&xfrm_policy_lock);
  321. for (i = hmask; i >= 0; i--)
  322. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  323. xfrm_policy_byidx = nidx;
  324. xfrm_idx_hmask = nhashmask;
  325. write_unlock_bh(&xfrm_policy_lock);
  326. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  327. }
  328. static inline int xfrm_bydst_should_resize(int dir, int *total)
  329. {
  330. unsigned int cnt = xfrm_policy_count[dir];
  331. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  332. if (total)
  333. *total += cnt;
  334. if ((hmask + 1) < xfrm_policy_hashmax &&
  335. cnt > hmask)
  336. return 1;
  337. return 0;
  338. }
  339. static inline int xfrm_byidx_should_resize(int total)
  340. {
  341. unsigned int hmask = xfrm_idx_hmask;
  342. if ((hmask + 1) < xfrm_policy_hashmax &&
  343. total > hmask)
  344. return 1;
  345. return 0;
  346. }
  347. void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
  348. {
  349. read_lock_bh(&xfrm_policy_lock);
  350. si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
  351. si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
  352. si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
  353. si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  354. si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  355. si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  356. si->spdhcnt = xfrm_idx_hmask;
  357. si->spdhmcnt = xfrm_policy_hashmax;
  358. read_unlock_bh(&xfrm_policy_lock);
  359. }
  360. EXPORT_SYMBOL(xfrm_spd_getinfo);
  361. static DEFINE_MUTEX(hash_resize_mutex);
  362. static void xfrm_hash_resize(struct work_struct *__unused)
  363. {
  364. int dir, total;
  365. mutex_lock(&hash_resize_mutex);
  366. total = 0;
  367. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  368. if (xfrm_bydst_should_resize(dir, &total))
  369. xfrm_bydst_resize(dir);
  370. }
  371. if (xfrm_byidx_should_resize(total))
  372. xfrm_byidx_resize(total);
  373. mutex_unlock(&hash_resize_mutex);
  374. }
  375. static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
  376. /* Generate new index... KAME seems to generate them ordered by cost
  377. * of an absolute inpredictability of ordering of rules. This will not pass. */
  378. static u32 xfrm_gen_index(u8 type, int dir)
  379. {
  380. static u32 idx_generator;
  381. for (;;) {
  382. struct hlist_node *entry;
  383. struct hlist_head *list;
  384. struct xfrm_policy *p;
  385. u32 idx;
  386. int found;
  387. idx = (idx_generator | dir);
  388. idx_generator += 8;
  389. if (idx == 0)
  390. idx = 8;
  391. list = xfrm_policy_byidx + idx_hash(idx);
  392. found = 0;
  393. hlist_for_each_entry(p, entry, list, byidx) {
  394. if (p->index == idx) {
  395. found = 1;
  396. break;
  397. }
  398. }
  399. if (!found)
  400. return idx;
  401. }
  402. }
  403. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  404. {
  405. u32 *p1 = (u32 *) s1;
  406. u32 *p2 = (u32 *) s2;
  407. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  408. int i;
  409. for (i = 0; i < len; i++) {
  410. if (p1[i] != p2[i])
  411. return 1;
  412. }
  413. return 0;
  414. }
  415. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  416. {
  417. struct xfrm_policy *pol;
  418. struct xfrm_policy *delpol;
  419. struct hlist_head *chain;
  420. struct hlist_node *entry, *newpos;
  421. struct dst_entry *gc_list;
  422. write_lock_bh(&xfrm_policy_lock);
  423. chain = policy_hash_bysel(&policy->selector, policy->family, dir);
  424. delpol = NULL;
  425. newpos = NULL;
  426. hlist_for_each_entry(pol, entry, chain, bydst) {
  427. if (pol->type == policy->type &&
  428. !selector_cmp(&pol->selector, &policy->selector) &&
  429. xfrm_sec_ctx_match(pol->security, policy->security) &&
  430. !WARN_ON(delpol)) {
  431. if (excl) {
  432. write_unlock_bh(&xfrm_policy_lock);
  433. return -EEXIST;
  434. }
  435. delpol = pol;
  436. if (policy->priority > pol->priority)
  437. continue;
  438. } else if (policy->priority >= pol->priority) {
  439. newpos = &pol->bydst;
  440. continue;
  441. }
  442. if (delpol)
  443. break;
  444. }
  445. if (newpos)
  446. hlist_add_after(newpos, &policy->bydst);
  447. else
  448. hlist_add_head(&policy->bydst, chain);
  449. xfrm_pol_hold(policy);
  450. xfrm_policy_count[dir]++;
  451. atomic_inc(&flow_cache_genid);
  452. if (delpol) {
  453. hlist_del(&delpol->bydst);
  454. hlist_del(&delpol->byidx);
  455. xfrm_policy_count[dir]--;
  456. }
  457. policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
  458. hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
  459. policy->curlft.add_time = get_seconds();
  460. policy->curlft.use_time = 0;
  461. if (!mod_timer(&policy->timer, jiffies + HZ))
  462. xfrm_pol_hold(policy);
  463. write_unlock_bh(&xfrm_policy_lock);
  464. if (delpol)
  465. xfrm_policy_kill(delpol);
  466. else if (xfrm_bydst_should_resize(dir, NULL))
  467. schedule_work(&xfrm_hash_work);
  468. read_lock_bh(&xfrm_policy_lock);
  469. gc_list = NULL;
  470. entry = &policy->bydst;
  471. hlist_for_each_entry_continue(policy, entry, bydst) {
  472. struct dst_entry *dst;
  473. write_lock(&policy->lock);
  474. dst = policy->bundles;
  475. if (dst) {
  476. struct dst_entry *tail = dst;
  477. while (tail->next)
  478. tail = tail->next;
  479. tail->next = gc_list;
  480. gc_list = dst;
  481. policy->bundles = NULL;
  482. }
  483. write_unlock(&policy->lock);
  484. }
  485. read_unlock_bh(&xfrm_policy_lock);
  486. while (gc_list) {
  487. struct dst_entry *dst = gc_list;
  488. gc_list = dst->next;
  489. dst_free(dst);
  490. }
  491. return 0;
  492. }
  493. EXPORT_SYMBOL(xfrm_policy_insert);
  494. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  495. struct xfrm_selector *sel,
  496. struct xfrm_sec_ctx *ctx, int delete,
  497. int *err)
  498. {
  499. struct xfrm_policy *pol, *ret;
  500. struct hlist_head *chain;
  501. struct hlist_node *entry;
  502. *err = 0;
  503. write_lock_bh(&xfrm_policy_lock);
  504. chain = policy_hash_bysel(sel, sel->family, dir);
  505. ret = NULL;
  506. hlist_for_each_entry(pol, entry, chain, bydst) {
  507. if (pol->type == type &&
  508. !selector_cmp(sel, &pol->selector) &&
  509. xfrm_sec_ctx_match(ctx, pol->security)) {
  510. xfrm_pol_hold(pol);
  511. if (delete) {
  512. *err = security_xfrm_policy_delete(pol);
  513. if (*err) {
  514. write_unlock_bh(&xfrm_policy_lock);
  515. return pol;
  516. }
  517. hlist_del(&pol->bydst);
  518. hlist_del(&pol->byidx);
  519. xfrm_policy_count[dir]--;
  520. }
  521. ret = pol;
  522. break;
  523. }
  524. }
  525. write_unlock_bh(&xfrm_policy_lock);
  526. if (ret && delete) {
  527. atomic_inc(&flow_cache_genid);
  528. xfrm_policy_kill(ret);
  529. }
  530. return ret;
  531. }
  532. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  533. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
  534. int *err)
  535. {
  536. struct xfrm_policy *pol, *ret;
  537. struct hlist_head *chain;
  538. struct hlist_node *entry;
  539. *err = -ENOENT;
  540. if (xfrm_policy_id2dir(id) != dir)
  541. return NULL;
  542. *err = 0;
  543. write_lock_bh(&xfrm_policy_lock);
  544. chain = xfrm_policy_byidx + idx_hash(id);
  545. ret = NULL;
  546. hlist_for_each_entry(pol, entry, chain, byidx) {
  547. if (pol->type == type && pol->index == id) {
  548. xfrm_pol_hold(pol);
  549. if (delete) {
  550. *err = security_xfrm_policy_delete(pol);
  551. if (*err) {
  552. write_unlock_bh(&xfrm_policy_lock);
  553. return pol;
  554. }
  555. hlist_del(&pol->bydst);
  556. hlist_del(&pol->byidx);
  557. xfrm_policy_count[dir]--;
  558. }
  559. ret = pol;
  560. break;
  561. }
  562. }
  563. write_unlock_bh(&xfrm_policy_lock);
  564. if (ret && delete) {
  565. atomic_inc(&flow_cache_genid);
  566. xfrm_policy_kill(ret);
  567. }
  568. return ret;
  569. }
  570. EXPORT_SYMBOL(xfrm_policy_byid);
  571. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  572. static inline int
  573. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  574. {
  575. int dir, err = 0;
  576. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  577. struct xfrm_policy *pol;
  578. struct hlist_node *entry;
  579. int i;
  580. hlist_for_each_entry(pol, entry,
  581. &xfrm_policy_inexact[dir], bydst) {
  582. if (pol->type != type)
  583. continue;
  584. err = security_xfrm_policy_delete(pol);
  585. if (err) {
  586. xfrm_audit_policy_delete(pol, 0,
  587. audit_info->loginuid,
  588. audit_info->secid);
  589. return err;
  590. }
  591. }
  592. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  593. hlist_for_each_entry(pol, entry,
  594. xfrm_policy_bydst[dir].table + i,
  595. bydst) {
  596. if (pol->type != type)
  597. continue;
  598. err = security_xfrm_policy_delete(pol);
  599. if (err) {
  600. xfrm_audit_policy_delete(pol, 0,
  601. audit_info->loginuid,
  602. audit_info->secid);
  603. return err;
  604. }
  605. }
  606. }
  607. }
  608. return err;
  609. }
  610. #else
  611. static inline int
  612. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  613. {
  614. return 0;
  615. }
  616. #endif
  617. int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
  618. {
  619. int dir, err = 0;
  620. write_lock_bh(&xfrm_policy_lock);
  621. err = xfrm_policy_flush_secctx_check(type, audit_info);
  622. if (err)
  623. goto out;
  624. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  625. struct xfrm_policy *pol;
  626. struct hlist_node *entry;
  627. int i, killed;
  628. killed = 0;
  629. again1:
  630. hlist_for_each_entry(pol, entry,
  631. &xfrm_policy_inexact[dir], bydst) {
  632. if (pol->type != type)
  633. continue;
  634. hlist_del(&pol->bydst);
  635. hlist_del(&pol->byidx);
  636. write_unlock_bh(&xfrm_policy_lock);
  637. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  638. audit_info->secid);
  639. xfrm_policy_kill(pol);
  640. killed++;
  641. write_lock_bh(&xfrm_policy_lock);
  642. goto again1;
  643. }
  644. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  645. again2:
  646. hlist_for_each_entry(pol, entry,
  647. xfrm_policy_bydst[dir].table + i,
  648. bydst) {
  649. if (pol->type != type)
  650. continue;
  651. hlist_del(&pol->bydst);
  652. hlist_del(&pol->byidx);
  653. write_unlock_bh(&xfrm_policy_lock);
  654. xfrm_audit_policy_delete(pol, 1,
  655. audit_info->loginuid,
  656. audit_info->secid);
  657. xfrm_policy_kill(pol);
  658. killed++;
  659. write_lock_bh(&xfrm_policy_lock);
  660. goto again2;
  661. }
  662. }
  663. xfrm_policy_count[dir] -= killed;
  664. }
  665. atomic_inc(&flow_cache_genid);
  666. out:
  667. write_unlock_bh(&xfrm_policy_lock);
  668. return err;
  669. }
  670. EXPORT_SYMBOL(xfrm_policy_flush);
  671. int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
  672. void *data)
  673. {
  674. struct xfrm_policy *pol, *last = NULL;
  675. struct hlist_node *entry;
  676. int dir, last_dir = 0, count, error;
  677. read_lock_bh(&xfrm_policy_lock);
  678. count = 0;
  679. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  680. struct hlist_head *table = xfrm_policy_bydst[dir].table;
  681. int i;
  682. hlist_for_each_entry(pol, entry,
  683. &xfrm_policy_inexact[dir], bydst) {
  684. if (pol->type != type)
  685. continue;
  686. if (last) {
  687. error = func(last, last_dir % XFRM_POLICY_MAX,
  688. count, data);
  689. if (error)
  690. goto out;
  691. }
  692. last = pol;
  693. last_dir = dir;
  694. count++;
  695. }
  696. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  697. hlist_for_each_entry(pol, entry, table + i, bydst) {
  698. if (pol->type != type)
  699. continue;
  700. if (last) {
  701. error = func(last, last_dir % XFRM_POLICY_MAX,
  702. count, data);
  703. if (error)
  704. goto out;
  705. }
  706. last = pol;
  707. last_dir = dir;
  708. count++;
  709. }
  710. }
  711. }
  712. if (count == 0) {
  713. error = -ENOENT;
  714. goto out;
  715. }
  716. error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
  717. out:
  718. read_unlock_bh(&xfrm_policy_lock);
  719. return error;
  720. }
  721. EXPORT_SYMBOL(xfrm_policy_walk);
  722. /*
  723. * Find policy to apply to this flow.
  724. *
  725. * Returns 0 if policy found, else an -errno.
  726. */
  727. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  728. u8 type, u16 family, int dir)
  729. {
  730. struct xfrm_selector *sel = &pol->selector;
  731. int match, ret = -ESRCH;
  732. if (pol->family != family ||
  733. pol->type != type)
  734. return ret;
  735. match = xfrm_selector_match(sel, fl, family);
  736. if (match)
  737. ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
  738. return ret;
  739. }
  740. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  741. u16 family, u8 dir)
  742. {
  743. int err;
  744. struct xfrm_policy *pol, *ret;
  745. xfrm_address_t *daddr, *saddr;
  746. struct hlist_node *entry;
  747. struct hlist_head *chain;
  748. u32 priority = ~0U;
  749. daddr = xfrm_flowi_daddr(fl, family);
  750. saddr = xfrm_flowi_saddr(fl, family);
  751. if (unlikely(!daddr || !saddr))
  752. return NULL;
  753. read_lock_bh(&xfrm_policy_lock);
  754. chain = policy_hash_direct(daddr, saddr, family, dir);
  755. ret = NULL;
  756. hlist_for_each_entry(pol, entry, chain, bydst) {
  757. err = xfrm_policy_match(pol, fl, type, family, dir);
  758. if (err) {
  759. if (err == -ESRCH)
  760. continue;
  761. else {
  762. ret = ERR_PTR(err);
  763. goto fail;
  764. }
  765. } else {
  766. ret = pol;
  767. priority = ret->priority;
  768. break;
  769. }
  770. }
  771. chain = &xfrm_policy_inexact[dir];
  772. hlist_for_each_entry(pol, entry, chain, bydst) {
  773. err = xfrm_policy_match(pol, fl, type, family, dir);
  774. if (err) {
  775. if (err == -ESRCH)
  776. continue;
  777. else {
  778. ret = ERR_PTR(err);
  779. goto fail;
  780. }
  781. } else if (pol->priority < priority) {
  782. ret = pol;
  783. break;
  784. }
  785. }
  786. if (ret)
  787. xfrm_pol_hold(ret);
  788. fail:
  789. read_unlock_bh(&xfrm_policy_lock);
  790. return ret;
  791. }
  792. static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  793. void **objp, atomic_t **obj_refp)
  794. {
  795. struct xfrm_policy *pol;
  796. int err = 0;
  797. #ifdef CONFIG_XFRM_SUB_POLICY
  798. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  799. if (IS_ERR(pol)) {
  800. err = PTR_ERR(pol);
  801. pol = NULL;
  802. }
  803. if (pol || err)
  804. goto end;
  805. #endif
  806. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  807. if (IS_ERR(pol)) {
  808. err = PTR_ERR(pol);
  809. pol = NULL;
  810. }
  811. #ifdef CONFIG_XFRM_SUB_POLICY
  812. end:
  813. #endif
  814. if ((*objp = (void *) pol) != NULL)
  815. *obj_refp = &pol->refcnt;
  816. return err;
  817. }
  818. static inline int policy_to_flow_dir(int dir)
  819. {
  820. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  821. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  822. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  823. return dir;
  824. switch (dir) {
  825. default:
  826. case XFRM_POLICY_IN:
  827. return FLOW_DIR_IN;
  828. case XFRM_POLICY_OUT:
  829. return FLOW_DIR_OUT;
  830. case XFRM_POLICY_FWD:
  831. return FLOW_DIR_FWD;
  832. }
  833. }
  834. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  835. {
  836. struct xfrm_policy *pol;
  837. read_lock_bh(&xfrm_policy_lock);
  838. if ((pol = sk->sk_policy[dir]) != NULL) {
  839. int match = xfrm_selector_match(&pol->selector, fl,
  840. sk->sk_family);
  841. int err = 0;
  842. if (match) {
  843. err = security_xfrm_policy_lookup(pol, fl->secid,
  844. policy_to_flow_dir(dir));
  845. if (!err)
  846. xfrm_pol_hold(pol);
  847. else if (err == -ESRCH)
  848. pol = NULL;
  849. else
  850. pol = ERR_PTR(err);
  851. } else
  852. pol = NULL;
  853. }
  854. read_unlock_bh(&xfrm_policy_lock);
  855. return pol;
  856. }
  857. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  858. {
  859. struct hlist_head *chain = policy_hash_bysel(&pol->selector,
  860. pol->family, dir);
  861. hlist_add_head(&pol->bydst, chain);
  862. hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
  863. xfrm_policy_count[dir]++;
  864. xfrm_pol_hold(pol);
  865. if (xfrm_bydst_should_resize(dir, NULL))
  866. schedule_work(&xfrm_hash_work);
  867. }
  868. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  869. int dir)
  870. {
  871. if (hlist_unhashed(&pol->bydst))
  872. return NULL;
  873. hlist_del(&pol->bydst);
  874. hlist_del(&pol->byidx);
  875. xfrm_policy_count[dir]--;
  876. return pol;
  877. }
  878. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  879. {
  880. write_lock_bh(&xfrm_policy_lock);
  881. pol = __xfrm_policy_unlink(pol, dir);
  882. write_unlock_bh(&xfrm_policy_lock);
  883. if (pol) {
  884. if (dir < XFRM_POLICY_MAX)
  885. atomic_inc(&flow_cache_genid);
  886. xfrm_policy_kill(pol);
  887. return 0;
  888. }
  889. return -ENOENT;
  890. }
  891. EXPORT_SYMBOL(xfrm_policy_delete);
  892. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  893. {
  894. struct xfrm_policy *old_pol;
  895. #ifdef CONFIG_XFRM_SUB_POLICY
  896. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  897. return -EINVAL;
  898. #endif
  899. write_lock_bh(&xfrm_policy_lock);
  900. old_pol = sk->sk_policy[dir];
  901. sk->sk_policy[dir] = pol;
  902. if (pol) {
  903. pol->curlft.add_time = get_seconds();
  904. pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
  905. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  906. }
  907. if (old_pol)
  908. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  909. write_unlock_bh(&xfrm_policy_lock);
  910. if (old_pol) {
  911. xfrm_policy_kill(old_pol);
  912. }
  913. return 0;
  914. }
  915. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  916. {
  917. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  918. if (newp) {
  919. newp->selector = old->selector;
  920. if (security_xfrm_policy_clone(old, newp)) {
  921. kfree(newp);
  922. return NULL; /* ENOMEM */
  923. }
  924. newp->lft = old->lft;
  925. newp->curlft = old->curlft;
  926. newp->action = old->action;
  927. newp->flags = old->flags;
  928. newp->xfrm_nr = old->xfrm_nr;
  929. newp->index = old->index;
  930. newp->type = old->type;
  931. memcpy(newp->xfrm_vec, old->xfrm_vec,
  932. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  933. write_lock_bh(&xfrm_policy_lock);
  934. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  935. write_unlock_bh(&xfrm_policy_lock);
  936. xfrm_pol_put(newp);
  937. }
  938. return newp;
  939. }
  940. int __xfrm_sk_clone_policy(struct sock *sk)
  941. {
  942. struct xfrm_policy *p0 = sk->sk_policy[0],
  943. *p1 = sk->sk_policy[1];
  944. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  945. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  946. return -ENOMEM;
  947. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  948. return -ENOMEM;
  949. return 0;
  950. }
  951. static int
  952. xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
  953. unsigned short family)
  954. {
  955. int err;
  956. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  957. if (unlikely(afinfo == NULL))
  958. return -EINVAL;
  959. err = afinfo->get_saddr(local, remote);
  960. xfrm_policy_put_afinfo(afinfo);
  961. return err;
  962. }
  963. /* Resolve list of templates for the flow, given policy. */
  964. static int
  965. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  966. struct xfrm_state **xfrm,
  967. unsigned short family)
  968. {
  969. int nx;
  970. int i, error;
  971. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  972. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  973. xfrm_address_t tmp;
  974. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  975. struct xfrm_state *x;
  976. xfrm_address_t *remote = daddr;
  977. xfrm_address_t *local = saddr;
  978. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  979. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  980. tmpl->mode == XFRM_MODE_BEET) {
  981. remote = &tmpl->id.daddr;
  982. local = &tmpl->saddr;
  983. family = tmpl->encap_family;
  984. if (xfrm_addr_any(local, family)) {
  985. error = xfrm_get_saddr(&tmp, remote, family);
  986. if (error)
  987. goto fail;
  988. local = &tmp;
  989. }
  990. }
  991. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  992. if (x && x->km.state == XFRM_STATE_VALID) {
  993. xfrm[nx++] = x;
  994. daddr = remote;
  995. saddr = local;
  996. continue;
  997. }
  998. if (x) {
  999. error = (x->km.state == XFRM_STATE_ERROR ?
  1000. -EINVAL : -EAGAIN);
  1001. xfrm_state_put(x);
  1002. }
  1003. if (!tmpl->optional)
  1004. goto fail;
  1005. }
  1006. return nx;
  1007. fail:
  1008. for (nx--; nx>=0; nx--)
  1009. xfrm_state_put(xfrm[nx]);
  1010. return error;
  1011. }
  1012. static int
  1013. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1014. struct xfrm_state **xfrm,
  1015. unsigned short family)
  1016. {
  1017. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1018. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1019. int cnx = 0;
  1020. int error;
  1021. int ret;
  1022. int i;
  1023. for (i = 0; i < npols; i++) {
  1024. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1025. error = -ENOBUFS;
  1026. goto fail;
  1027. }
  1028. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1029. if (ret < 0) {
  1030. error = ret;
  1031. goto fail;
  1032. } else
  1033. cnx += ret;
  1034. }
  1035. /* found states are sorted for outbound processing */
  1036. if (npols > 1)
  1037. xfrm_state_sort(xfrm, tpp, cnx, family);
  1038. return cnx;
  1039. fail:
  1040. for (cnx--; cnx>=0; cnx--)
  1041. xfrm_state_put(tpp[cnx]);
  1042. return error;
  1043. }
  1044. /* Check that the bundle accepts the flow and its components are
  1045. * still valid.
  1046. */
  1047. static struct dst_entry *
  1048. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1049. {
  1050. struct dst_entry *x;
  1051. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1052. if (unlikely(afinfo == NULL))
  1053. return ERR_PTR(-EINVAL);
  1054. x = afinfo->find_bundle(fl, policy);
  1055. xfrm_policy_put_afinfo(afinfo);
  1056. return x;
  1057. }
  1058. static inline int xfrm_get_tos(struct flowi *fl, int family)
  1059. {
  1060. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1061. int tos;
  1062. if (!afinfo)
  1063. return -EINVAL;
  1064. tos = afinfo->get_tos(fl);
  1065. xfrm_policy_put_afinfo(afinfo);
  1066. return tos;
  1067. }
  1068. static inline struct xfrm_dst *xfrm_alloc_dst(int family)
  1069. {
  1070. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1071. struct xfrm_dst *xdst;
  1072. if (!afinfo)
  1073. return ERR_PTR(-EINVAL);
  1074. xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
  1075. xfrm_policy_put_afinfo(afinfo);
  1076. return xdst;
  1077. }
  1078. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
  1079. {
  1080. struct xfrm_policy_afinfo *afinfo =
  1081. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1082. int err;
  1083. if (!afinfo)
  1084. return -EINVAL;
  1085. err = afinfo->fill_dst(xdst, dev);
  1086. xfrm_policy_put_afinfo(afinfo);
  1087. return err;
  1088. }
  1089. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1090. * all the metrics... Shortly, bundle a bundle.
  1091. */
  1092. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1093. struct xfrm_state **xfrm, int nx,
  1094. struct flowi *fl,
  1095. struct dst_entry *dst)
  1096. {
  1097. unsigned long now = jiffies;
  1098. struct net_device *dev;
  1099. struct dst_entry *dst_prev = NULL;
  1100. struct dst_entry *dst0 = NULL;
  1101. int i = 0;
  1102. int err;
  1103. int header_len = 0;
  1104. int trailer_len = 0;
  1105. int tos;
  1106. int family = policy->selector.family;
  1107. tos = xfrm_get_tos(fl, family);
  1108. err = tos;
  1109. if (tos < 0)
  1110. goto put_states;
  1111. dst_hold(dst);
  1112. for (; i < nx; i++) {
  1113. struct xfrm_dst *xdst = xfrm_alloc_dst(family);
  1114. struct dst_entry *dst1 = &xdst->u.dst;
  1115. err = PTR_ERR(xdst);
  1116. if (IS_ERR(xdst)) {
  1117. dst_release(dst);
  1118. goto put_states;
  1119. }
  1120. if (!dst_prev)
  1121. dst0 = dst1;
  1122. else {
  1123. dst_prev->child = dst_clone(dst1);
  1124. dst1->flags |= DST_NOHASH;
  1125. }
  1126. xdst->route = dst;
  1127. memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
  1128. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1129. family = xfrm[i]->props.family;
  1130. dst = xfrm_dst_lookup(xfrm[i], tos, family);
  1131. err = PTR_ERR(dst);
  1132. if (IS_ERR(dst))
  1133. goto put_states;
  1134. } else
  1135. dst_hold(dst);
  1136. dst1->xfrm = xfrm[i];
  1137. xdst->genid = xfrm[i]->genid;
  1138. dst1->obsolete = -1;
  1139. dst1->flags |= DST_HOST;
  1140. dst1->lastuse = now;
  1141. dst1->input = dst_discard;
  1142. dst1->output = xfrm[i]->outer_mode->afinfo->output;
  1143. dst1->next = dst_prev;
  1144. dst_prev = dst1;
  1145. header_len += xfrm[i]->props.header_len;
  1146. trailer_len += xfrm[i]->props.trailer_len;
  1147. }
  1148. dst_prev->child = dst;
  1149. dst0->path = dst;
  1150. err = -ENODEV;
  1151. dev = dst->dev;
  1152. if (!dev)
  1153. goto free_dst;
  1154. /* Copy neighbout for reachability confirmation */
  1155. dst0->neighbour = neigh_clone(dst->neighbour);
  1156. xfrm_init_pmtu(dst_prev);
  1157. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1158. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1159. err = xfrm_fill_dst(xdst, dev);
  1160. if (err)
  1161. goto free_dst;
  1162. dst_prev->header_len = header_len;
  1163. dst_prev->trailer_len = trailer_len;
  1164. header_len -= xdst->u.dst.xfrm->props.header_len;
  1165. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1166. }
  1167. out:
  1168. return dst0;
  1169. put_states:
  1170. for (; i < nx; i++)
  1171. xfrm_state_put(xfrm[i]);
  1172. free_dst:
  1173. if (dst0)
  1174. dst_free(dst0);
  1175. dst0 = ERR_PTR(err);
  1176. goto out;
  1177. }
  1178. static int inline
  1179. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1180. {
  1181. if (!*target) {
  1182. *target = kmalloc(size, GFP_ATOMIC);
  1183. if (!*target)
  1184. return -ENOMEM;
  1185. }
  1186. memcpy(*target, src, size);
  1187. return 0;
  1188. }
  1189. static int inline
  1190. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1191. {
  1192. #ifdef CONFIG_XFRM_SUB_POLICY
  1193. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1194. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1195. sel, sizeof(*sel));
  1196. #else
  1197. return 0;
  1198. #endif
  1199. }
  1200. static int inline
  1201. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1202. {
  1203. #ifdef CONFIG_XFRM_SUB_POLICY
  1204. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1205. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1206. #else
  1207. return 0;
  1208. #endif
  1209. }
  1210. static int stale_bundle(struct dst_entry *dst);
  1211. /* Main function: finds/creates a bundle for given flow.
  1212. *
  1213. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1214. * on interfaces with disabled IPsec.
  1215. */
  1216. int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1217. struct sock *sk, int flags)
  1218. {
  1219. struct xfrm_policy *policy;
  1220. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1221. int npols;
  1222. int pol_dead;
  1223. int xfrm_nr;
  1224. int pi;
  1225. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1226. struct dst_entry *dst, *dst_orig = *dst_p;
  1227. int nx = 0;
  1228. int err;
  1229. u32 genid;
  1230. u16 family;
  1231. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1232. restart:
  1233. genid = atomic_read(&flow_cache_genid);
  1234. policy = NULL;
  1235. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1236. pols[pi] = NULL;
  1237. npols = 0;
  1238. pol_dead = 0;
  1239. xfrm_nr = 0;
  1240. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1241. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1242. err = PTR_ERR(policy);
  1243. if (IS_ERR(policy))
  1244. goto dropdst;
  1245. }
  1246. err = -ENOENT;
  1247. if (!policy) {
  1248. /* To accelerate a bit... */
  1249. if ((dst_orig->flags & DST_NOXFRM) ||
  1250. !xfrm_policy_count[XFRM_POLICY_OUT])
  1251. goto nopol;
  1252. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  1253. dir, xfrm_policy_lookup);
  1254. err = PTR_ERR(policy);
  1255. if (IS_ERR(policy))
  1256. goto dropdst;
  1257. }
  1258. if (!policy)
  1259. goto nopol;
  1260. family = dst_orig->ops->family;
  1261. pols[0] = policy;
  1262. npols ++;
  1263. xfrm_nr += pols[0]->xfrm_nr;
  1264. if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
  1265. goto error;
  1266. policy->curlft.use_time = get_seconds();
  1267. switch (policy->action) {
  1268. default:
  1269. case XFRM_POLICY_BLOCK:
  1270. /* Prohibit the flow */
  1271. err = -EPERM;
  1272. goto error;
  1273. case XFRM_POLICY_ALLOW:
  1274. #ifndef CONFIG_XFRM_SUB_POLICY
  1275. if (policy->xfrm_nr == 0) {
  1276. /* Flow passes not transformed. */
  1277. xfrm_pol_put(policy);
  1278. return 0;
  1279. }
  1280. #endif
  1281. /* Try to find matching bundle.
  1282. *
  1283. * LATER: help from flow cache. It is optional, this
  1284. * is required only for output policy.
  1285. */
  1286. dst = xfrm_find_bundle(fl, policy, family);
  1287. if (IS_ERR(dst)) {
  1288. err = PTR_ERR(dst);
  1289. goto error;
  1290. }
  1291. if (dst)
  1292. break;
  1293. #ifdef CONFIG_XFRM_SUB_POLICY
  1294. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1295. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1296. fl, family,
  1297. XFRM_POLICY_OUT);
  1298. if (pols[1]) {
  1299. if (IS_ERR(pols[1])) {
  1300. err = PTR_ERR(pols[1]);
  1301. goto error;
  1302. }
  1303. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1304. err = -EPERM;
  1305. goto error;
  1306. }
  1307. npols ++;
  1308. xfrm_nr += pols[1]->xfrm_nr;
  1309. }
  1310. }
  1311. /*
  1312. * Because neither flowi nor bundle information knows about
  1313. * transformation template size. On more than one policy usage
  1314. * we can realize whether all of them is bypass or not after
  1315. * they are searched. See above not-transformed bypass
  1316. * is surrounded by non-sub policy configuration, too.
  1317. */
  1318. if (xfrm_nr == 0) {
  1319. /* Flow passes not transformed. */
  1320. xfrm_pols_put(pols, npols);
  1321. return 0;
  1322. }
  1323. #endif
  1324. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1325. if (unlikely(nx<0)) {
  1326. err = nx;
  1327. if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
  1328. /* EREMOTE tells the caller to generate
  1329. * a one-shot blackhole route.
  1330. */
  1331. xfrm_pol_put(policy);
  1332. return -EREMOTE;
  1333. }
  1334. if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
  1335. DECLARE_WAITQUEUE(wait, current);
  1336. add_wait_queue(&km_waitq, &wait);
  1337. set_current_state(TASK_INTERRUPTIBLE);
  1338. schedule();
  1339. set_current_state(TASK_RUNNING);
  1340. remove_wait_queue(&km_waitq, &wait);
  1341. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1342. if (nx == -EAGAIN && signal_pending(current)) {
  1343. err = -ERESTART;
  1344. goto error;
  1345. }
  1346. if (nx == -EAGAIN ||
  1347. genid != atomic_read(&flow_cache_genid)) {
  1348. xfrm_pols_put(pols, npols);
  1349. goto restart;
  1350. }
  1351. err = nx;
  1352. }
  1353. if (err < 0)
  1354. goto error;
  1355. }
  1356. if (nx == 0) {
  1357. /* Flow passes not transformed. */
  1358. xfrm_pols_put(pols, npols);
  1359. return 0;
  1360. }
  1361. dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
  1362. err = PTR_ERR(dst);
  1363. if (IS_ERR(dst))
  1364. goto error;
  1365. for (pi = 0; pi < npols; pi++) {
  1366. read_lock_bh(&pols[pi]->lock);
  1367. pol_dead |= pols[pi]->dead;
  1368. read_unlock_bh(&pols[pi]->lock);
  1369. }
  1370. write_lock_bh(&policy->lock);
  1371. if (unlikely(pol_dead || stale_bundle(dst))) {
  1372. /* Wow! While we worked on resolving, this
  1373. * policy has gone. Retry. It is not paranoia,
  1374. * we just cannot enlist new bundle to dead object.
  1375. * We can't enlist stable bundles either.
  1376. */
  1377. write_unlock_bh(&policy->lock);
  1378. if (dst)
  1379. dst_free(dst);
  1380. err = -EHOSTUNREACH;
  1381. goto error;
  1382. }
  1383. if (npols > 1)
  1384. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1385. else
  1386. err = xfrm_dst_update_origin(dst, fl);
  1387. if (unlikely(err)) {
  1388. write_unlock_bh(&policy->lock);
  1389. if (dst)
  1390. dst_free(dst);
  1391. goto error;
  1392. }
  1393. dst->next = policy->bundles;
  1394. policy->bundles = dst;
  1395. dst_hold(dst);
  1396. write_unlock_bh(&policy->lock);
  1397. }
  1398. *dst_p = dst;
  1399. dst_release(dst_orig);
  1400. xfrm_pols_put(pols, npols);
  1401. return 0;
  1402. error:
  1403. xfrm_pols_put(pols, npols);
  1404. dropdst:
  1405. dst_release(dst_orig);
  1406. *dst_p = NULL;
  1407. return err;
  1408. nopol:
  1409. if (flags & XFRM_LOOKUP_ICMP)
  1410. goto dropdst;
  1411. return 0;
  1412. }
  1413. EXPORT_SYMBOL(__xfrm_lookup);
  1414. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1415. struct sock *sk, int flags)
  1416. {
  1417. int err = __xfrm_lookup(dst_p, fl, sk, flags);
  1418. if (err == -EREMOTE) {
  1419. dst_release(*dst_p);
  1420. *dst_p = NULL;
  1421. err = -EAGAIN;
  1422. }
  1423. return err;
  1424. }
  1425. EXPORT_SYMBOL(xfrm_lookup);
  1426. static inline int
  1427. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1428. {
  1429. struct xfrm_state *x;
  1430. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1431. return 0;
  1432. x = skb->sp->xvec[idx];
  1433. if (!x->type->reject)
  1434. return 0;
  1435. return x->type->reject(x, skb, fl);
  1436. }
  1437. /* When skb is transformed back to its "native" form, we have to
  1438. * check policy restrictions. At the moment we make this in maximally
  1439. * stupid way. Shame on me. :-) Of course, connected sockets must
  1440. * have policy cached at them.
  1441. */
  1442. static inline int
  1443. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1444. unsigned short family)
  1445. {
  1446. if (xfrm_state_kern(x))
  1447. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1448. return x->id.proto == tmpl->id.proto &&
  1449. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1450. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1451. x->props.mode == tmpl->mode &&
  1452. ((tmpl->aalgos & (1<<x->props.aalgo)) ||
  1453. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1454. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1455. xfrm_state_addr_cmp(tmpl, x, family));
  1456. }
  1457. /*
  1458. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1459. * because of optional transport mode, or next index of the mathced secpath
  1460. * state with the template.
  1461. * -1 is returned when no matching template is found.
  1462. * Otherwise "-2 - errored_index" is returned.
  1463. */
  1464. static inline int
  1465. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1466. unsigned short family)
  1467. {
  1468. int idx = start;
  1469. if (tmpl->optional) {
  1470. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1471. return start;
  1472. } else
  1473. start = -1;
  1474. for (; idx < sp->len; idx++) {
  1475. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1476. return ++idx;
  1477. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1478. if (start == -1)
  1479. start = -2-idx;
  1480. break;
  1481. }
  1482. }
  1483. return start;
  1484. }
  1485. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1486. unsigned int family, int reverse)
  1487. {
  1488. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1489. int err;
  1490. if (unlikely(afinfo == NULL))
  1491. return -EAFNOSUPPORT;
  1492. afinfo->decode_session(skb, fl, reverse);
  1493. err = security_xfrm_decode_session(skb, &fl->secid);
  1494. xfrm_policy_put_afinfo(afinfo);
  1495. return err;
  1496. }
  1497. EXPORT_SYMBOL(__xfrm_decode_session);
  1498. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1499. {
  1500. for (; k < sp->len; k++) {
  1501. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1502. *idxp = k;
  1503. return 1;
  1504. }
  1505. }
  1506. return 0;
  1507. }
  1508. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1509. unsigned short family)
  1510. {
  1511. struct xfrm_policy *pol;
  1512. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1513. int npols = 0;
  1514. int xfrm_nr;
  1515. int pi;
  1516. int reverse;
  1517. struct flowi fl;
  1518. u8 fl_dir;
  1519. int xerr_idx = -1;
  1520. reverse = dir & ~XFRM_POLICY_MASK;
  1521. dir &= XFRM_POLICY_MASK;
  1522. fl_dir = policy_to_flow_dir(dir);
  1523. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0)
  1524. return 0;
  1525. nf_nat_decode_session(skb, &fl, family);
  1526. /* First, check used SA against their selectors. */
  1527. if (skb->sp) {
  1528. int i;
  1529. for (i=skb->sp->len-1; i>=0; i--) {
  1530. struct xfrm_state *x = skb->sp->xvec[i];
  1531. if (!xfrm_selector_match(&x->sel, &fl, family))
  1532. return 0;
  1533. }
  1534. }
  1535. pol = NULL;
  1536. if (sk && sk->sk_policy[dir]) {
  1537. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1538. if (IS_ERR(pol))
  1539. return 0;
  1540. }
  1541. if (!pol)
  1542. pol = flow_cache_lookup(&fl, family, fl_dir,
  1543. xfrm_policy_lookup);
  1544. if (IS_ERR(pol))
  1545. return 0;
  1546. if (!pol) {
  1547. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1548. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1549. return 0;
  1550. }
  1551. return 1;
  1552. }
  1553. pol->curlft.use_time = get_seconds();
  1554. pols[0] = pol;
  1555. npols ++;
  1556. #ifdef CONFIG_XFRM_SUB_POLICY
  1557. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1558. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1559. &fl, family,
  1560. XFRM_POLICY_IN);
  1561. if (pols[1]) {
  1562. if (IS_ERR(pols[1]))
  1563. return 0;
  1564. pols[1]->curlft.use_time = get_seconds();
  1565. npols ++;
  1566. }
  1567. }
  1568. #endif
  1569. if (pol->action == XFRM_POLICY_ALLOW) {
  1570. struct sec_path *sp;
  1571. static struct sec_path dummy;
  1572. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1573. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1574. struct xfrm_tmpl **tpp = tp;
  1575. int ti = 0;
  1576. int i, k;
  1577. if ((sp = skb->sp) == NULL)
  1578. sp = &dummy;
  1579. for (pi = 0; pi < npols; pi++) {
  1580. if (pols[pi] != pol &&
  1581. pols[pi]->action != XFRM_POLICY_ALLOW)
  1582. goto reject;
  1583. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
  1584. goto reject_error;
  1585. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1586. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1587. }
  1588. xfrm_nr = ti;
  1589. if (npols > 1) {
  1590. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1591. tpp = stp;
  1592. }
  1593. /* For each tunnel xfrm, find the first matching tmpl.
  1594. * For each tmpl before that, find corresponding xfrm.
  1595. * Order is _important_. Later we will implement
  1596. * some barriers, but at the moment barriers
  1597. * are implied between each two transformations.
  1598. */
  1599. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1600. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1601. if (k < 0) {
  1602. if (k < -1)
  1603. /* "-2 - errored_index" returned */
  1604. xerr_idx = -(2+k);
  1605. goto reject;
  1606. }
  1607. }
  1608. if (secpath_has_nontransport(sp, k, &xerr_idx))
  1609. goto reject;
  1610. xfrm_pols_put(pols, npols);
  1611. return 1;
  1612. }
  1613. reject:
  1614. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1615. reject_error:
  1616. xfrm_pols_put(pols, npols);
  1617. return 0;
  1618. }
  1619. EXPORT_SYMBOL(__xfrm_policy_check);
  1620. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1621. {
  1622. struct flowi fl;
  1623. if (xfrm_decode_session(skb, &fl, family) < 0)
  1624. return 0;
  1625. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1626. }
  1627. EXPORT_SYMBOL(__xfrm_route_forward);
  1628. /* Optimize later using cookies and generation ids. */
  1629. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1630. {
  1631. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1632. * to "-1" to force all XFRM destinations to get validated by
  1633. * dst_ops->check on every use. We do this because when a
  1634. * normal route referenced by an XFRM dst is obsoleted we do
  1635. * not go looking around for all parent referencing XFRM dsts
  1636. * so that we can invalidate them. It is just too much work.
  1637. * Instead we make the checks here on every use. For example:
  1638. *
  1639. * XFRM dst A --> IPv4 dst X
  1640. *
  1641. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1642. * in this example). If X is marked obsolete, "A" will not
  1643. * notice. That's what we are validating here via the
  1644. * stale_bundle() check.
  1645. *
  1646. * When a policy's bundle is pruned, we dst_free() the XFRM
  1647. * dst which causes it's ->obsolete field to be set to a
  1648. * positive non-zero integer. If an XFRM dst has been pruned
  1649. * like this, we want to force a new route lookup.
  1650. */
  1651. if (dst->obsolete < 0 && !stale_bundle(dst))
  1652. return dst;
  1653. return NULL;
  1654. }
  1655. static int stale_bundle(struct dst_entry *dst)
  1656. {
  1657. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1658. }
  1659. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1660. {
  1661. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1662. dst->dev = dev->nd_net->loopback_dev;
  1663. dev_hold(dst->dev);
  1664. dev_put(dev);
  1665. }
  1666. }
  1667. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1668. static void xfrm_link_failure(struct sk_buff *skb)
  1669. {
  1670. /* Impossible. Such dst must be popped before reaches point of failure. */
  1671. return;
  1672. }
  1673. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1674. {
  1675. if (dst) {
  1676. if (dst->obsolete) {
  1677. dst_release(dst);
  1678. dst = NULL;
  1679. }
  1680. }
  1681. return dst;
  1682. }
  1683. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1684. {
  1685. struct dst_entry *dst, **dstp;
  1686. write_lock(&pol->lock);
  1687. dstp = &pol->bundles;
  1688. while ((dst=*dstp) != NULL) {
  1689. if (func(dst)) {
  1690. *dstp = dst->next;
  1691. dst->next = *gc_list_p;
  1692. *gc_list_p = dst;
  1693. } else {
  1694. dstp = &dst->next;
  1695. }
  1696. }
  1697. write_unlock(&pol->lock);
  1698. }
  1699. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1700. {
  1701. struct dst_entry *gc_list = NULL;
  1702. int dir;
  1703. read_lock_bh(&xfrm_policy_lock);
  1704. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1705. struct xfrm_policy *pol;
  1706. struct hlist_node *entry;
  1707. struct hlist_head *table;
  1708. int i;
  1709. hlist_for_each_entry(pol, entry,
  1710. &xfrm_policy_inexact[dir], bydst)
  1711. prune_one_bundle(pol, func, &gc_list);
  1712. table = xfrm_policy_bydst[dir].table;
  1713. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  1714. hlist_for_each_entry(pol, entry, table + i, bydst)
  1715. prune_one_bundle(pol, func, &gc_list);
  1716. }
  1717. }
  1718. read_unlock_bh(&xfrm_policy_lock);
  1719. while (gc_list) {
  1720. struct dst_entry *dst = gc_list;
  1721. gc_list = dst->next;
  1722. dst_free(dst);
  1723. }
  1724. }
  1725. static int unused_bundle(struct dst_entry *dst)
  1726. {
  1727. return !atomic_read(&dst->__refcnt);
  1728. }
  1729. static void __xfrm_garbage_collect(void)
  1730. {
  1731. xfrm_prune_bundles(unused_bundle);
  1732. }
  1733. static int xfrm_flush_bundles(void)
  1734. {
  1735. xfrm_prune_bundles(stale_bundle);
  1736. return 0;
  1737. }
  1738. static void xfrm_init_pmtu(struct dst_entry *dst)
  1739. {
  1740. do {
  1741. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1742. u32 pmtu, route_mtu_cached;
  1743. pmtu = dst_mtu(dst->child);
  1744. xdst->child_mtu_cached = pmtu;
  1745. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1746. route_mtu_cached = dst_mtu(xdst->route);
  1747. xdst->route_mtu_cached = route_mtu_cached;
  1748. if (pmtu > route_mtu_cached)
  1749. pmtu = route_mtu_cached;
  1750. dst->metrics[RTAX_MTU-1] = pmtu;
  1751. } while ((dst = dst->next));
  1752. }
  1753. /* Check that the bundle accepts the flow and its components are
  1754. * still valid.
  1755. */
  1756. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1757. struct flowi *fl, int family, int strict)
  1758. {
  1759. struct dst_entry *dst = &first->u.dst;
  1760. struct xfrm_dst *last;
  1761. u32 mtu;
  1762. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1763. (dst->dev && !netif_running(dst->dev)))
  1764. return 0;
  1765. #ifdef CONFIG_XFRM_SUB_POLICY
  1766. if (fl) {
  1767. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1768. return 0;
  1769. if (first->partner &&
  1770. !xfrm_selector_match(first->partner, fl, family))
  1771. return 0;
  1772. }
  1773. #endif
  1774. last = NULL;
  1775. do {
  1776. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1777. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1778. return 0;
  1779. if (fl && pol &&
  1780. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1781. return 0;
  1782. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1783. return 0;
  1784. if (xdst->genid != dst->xfrm->genid)
  1785. return 0;
  1786. if (strict && fl &&
  1787. !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
  1788. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1789. return 0;
  1790. mtu = dst_mtu(dst->child);
  1791. if (xdst->child_mtu_cached != mtu) {
  1792. last = xdst;
  1793. xdst->child_mtu_cached = mtu;
  1794. }
  1795. if (!dst_check(xdst->route, xdst->route_cookie))
  1796. return 0;
  1797. mtu = dst_mtu(xdst->route);
  1798. if (xdst->route_mtu_cached != mtu) {
  1799. last = xdst;
  1800. xdst->route_mtu_cached = mtu;
  1801. }
  1802. dst = dst->child;
  1803. } while (dst->xfrm);
  1804. if (likely(!last))
  1805. return 1;
  1806. mtu = last->child_mtu_cached;
  1807. for (;;) {
  1808. dst = &last->u.dst;
  1809. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1810. if (mtu > last->route_mtu_cached)
  1811. mtu = last->route_mtu_cached;
  1812. dst->metrics[RTAX_MTU-1] = mtu;
  1813. if (last == first)
  1814. break;
  1815. last = (struct xfrm_dst *)last->u.dst.next;
  1816. last->child_mtu_cached = mtu;
  1817. }
  1818. return 1;
  1819. }
  1820. EXPORT_SYMBOL(xfrm_bundle_ok);
  1821. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1822. {
  1823. int err = 0;
  1824. if (unlikely(afinfo == NULL))
  1825. return -EINVAL;
  1826. if (unlikely(afinfo->family >= NPROTO))
  1827. return -EAFNOSUPPORT;
  1828. write_lock_bh(&xfrm_policy_afinfo_lock);
  1829. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1830. err = -ENOBUFS;
  1831. else {
  1832. struct dst_ops *dst_ops = afinfo->dst_ops;
  1833. if (likely(dst_ops->kmem_cachep == NULL))
  1834. dst_ops->kmem_cachep = xfrm_dst_cache;
  1835. if (likely(dst_ops->check == NULL))
  1836. dst_ops->check = xfrm_dst_check;
  1837. if (likely(dst_ops->negative_advice == NULL))
  1838. dst_ops->negative_advice = xfrm_negative_advice;
  1839. if (likely(dst_ops->link_failure == NULL))
  1840. dst_ops->link_failure = xfrm_link_failure;
  1841. if (likely(afinfo->garbage_collect == NULL))
  1842. afinfo->garbage_collect = __xfrm_garbage_collect;
  1843. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1844. }
  1845. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1846. return err;
  1847. }
  1848. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1849. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1850. {
  1851. int err = 0;
  1852. if (unlikely(afinfo == NULL))
  1853. return -EINVAL;
  1854. if (unlikely(afinfo->family >= NPROTO))
  1855. return -EAFNOSUPPORT;
  1856. write_lock_bh(&xfrm_policy_afinfo_lock);
  1857. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1858. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1859. err = -EINVAL;
  1860. else {
  1861. struct dst_ops *dst_ops = afinfo->dst_ops;
  1862. xfrm_policy_afinfo[afinfo->family] = NULL;
  1863. dst_ops->kmem_cachep = NULL;
  1864. dst_ops->check = NULL;
  1865. dst_ops->negative_advice = NULL;
  1866. dst_ops->link_failure = NULL;
  1867. afinfo->garbage_collect = NULL;
  1868. }
  1869. }
  1870. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1871. return err;
  1872. }
  1873. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1874. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1875. {
  1876. struct xfrm_policy_afinfo *afinfo;
  1877. if (unlikely(family >= NPROTO))
  1878. return NULL;
  1879. read_lock(&xfrm_policy_afinfo_lock);
  1880. afinfo = xfrm_policy_afinfo[family];
  1881. if (unlikely(!afinfo))
  1882. read_unlock(&xfrm_policy_afinfo_lock);
  1883. return afinfo;
  1884. }
  1885. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1886. {
  1887. read_unlock(&xfrm_policy_afinfo_lock);
  1888. }
  1889. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1890. {
  1891. struct net_device *dev = ptr;
  1892. if (dev->nd_net != &init_net)
  1893. return NOTIFY_DONE;
  1894. switch (event) {
  1895. case NETDEV_DOWN:
  1896. xfrm_flush_bundles();
  1897. }
  1898. return NOTIFY_DONE;
  1899. }
  1900. static struct notifier_block xfrm_dev_notifier = {
  1901. xfrm_dev_event,
  1902. NULL,
  1903. 0
  1904. };
  1905. static void __init xfrm_policy_init(void)
  1906. {
  1907. unsigned int hmask, sz;
  1908. int dir;
  1909. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1910. sizeof(struct xfrm_dst),
  1911. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1912. NULL);
  1913. hmask = 8 - 1;
  1914. sz = (hmask+1) * sizeof(struct hlist_head);
  1915. xfrm_policy_byidx = xfrm_hash_alloc(sz);
  1916. xfrm_idx_hmask = hmask;
  1917. if (!xfrm_policy_byidx)
  1918. panic("XFRM: failed to allocate byidx hash\n");
  1919. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1920. struct xfrm_policy_hash *htab;
  1921. INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
  1922. htab = &xfrm_policy_bydst[dir];
  1923. htab->table = xfrm_hash_alloc(sz);
  1924. htab->hmask = hmask;
  1925. if (!htab->table)
  1926. panic("XFRM: failed to allocate bydst hash\n");
  1927. }
  1928. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
  1929. register_netdevice_notifier(&xfrm_dev_notifier);
  1930. }
  1931. void __init xfrm_init(void)
  1932. {
  1933. xfrm_state_init();
  1934. xfrm_policy_init();
  1935. xfrm_input_init();
  1936. }
  1937. #ifdef CONFIG_AUDITSYSCALL
  1938. static inline void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  1939. struct audit_buffer *audit_buf)
  1940. {
  1941. struct xfrm_sec_ctx *ctx = xp->security;
  1942. struct xfrm_selector *sel = &xp->selector;
  1943. if (ctx)
  1944. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  1945. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  1946. switch(sel->family) {
  1947. case AF_INET:
  1948. audit_log_format(audit_buf, " src=" NIPQUAD_FMT,
  1949. NIPQUAD(sel->saddr.a4));
  1950. if (sel->prefixlen_s != 32)
  1951. audit_log_format(audit_buf, " src_prefixlen=%d",
  1952. sel->prefixlen_s);
  1953. audit_log_format(audit_buf, " dst=" NIPQUAD_FMT,
  1954. NIPQUAD(sel->daddr.a4));
  1955. if (sel->prefixlen_d != 32)
  1956. audit_log_format(audit_buf, " dst_prefixlen=%d",
  1957. sel->prefixlen_d);
  1958. break;
  1959. case AF_INET6:
  1960. audit_log_format(audit_buf, " src=" NIP6_FMT,
  1961. NIP6(*(struct in6_addr *)sel->saddr.a6));
  1962. if (sel->prefixlen_s != 128)
  1963. audit_log_format(audit_buf, " src_prefixlen=%d",
  1964. sel->prefixlen_s);
  1965. audit_log_format(audit_buf, " dst=" NIP6_FMT,
  1966. NIP6(*(struct in6_addr *)sel->daddr.a6));
  1967. if (sel->prefixlen_d != 128)
  1968. audit_log_format(audit_buf, " dst_prefixlen=%d",
  1969. sel->prefixlen_d);
  1970. break;
  1971. }
  1972. }
  1973. void
  1974. xfrm_audit_policy_add(struct xfrm_policy *xp, int result, u32 auid, u32 sid)
  1975. {
  1976. struct audit_buffer *audit_buf;
  1977. extern int audit_enabled;
  1978. if (audit_enabled == 0)
  1979. return;
  1980. audit_buf = xfrm_audit_start(auid, sid);
  1981. if (audit_buf == NULL)
  1982. return;
  1983. audit_log_format(audit_buf, " op=SPD-add res=%u", result);
  1984. xfrm_audit_common_policyinfo(xp, audit_buf);
  1985. audit_log_end(audit_buf);
  1986. }
  1987. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  1988. void
  1989. xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, u32 auid, u32 sid)
  1990. {
  1991. struct audit_buffer *audit_buf;
  1992. extern int audit_enabled;
  1993. if (audit_enabled == 0)
  1994. return;
  1995. audit_buf = xfrm_audit_start(auid, sid);
  1996. if (audit_buf == NULL)
  1997. return;
  1998. audit_log_format(audit_buf, " op=SPD-delete res=%u", result);
  1999. xfrm_audit_common_policyinfo(xp, audit_buf);
  2000. audit_log_end(audit_buf);
  2001. }
  2002. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2003. #endif
  2004. #ifdef CONFIG_XFRM_MIGRATE
  2005. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2006. struct xfrm_selector *sel_tgt)
  2007. {
  2008. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2009. if (sel_tgt->family == sel_cmp->family &&
  2010. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2011. sel_cmp->family) == 0 &&
  2012. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2013. sel_cmp->family) == 0 &&
  2014. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2015. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2016. return 1;
  2017. }
  2018. } else {
  2019. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2020. return 1;
  2021. }
  2022. }
  2023. return 0;
  2024. }
  2025. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2026. u8 dir, u8 type)
  2027. {
  2028. struct xfrm_policy *pol, *ret = NULL;
  2029. struct hlist_node *entry;
  2030. struct hlist_head *chain;
  2031. u32 priority = ~0U;
  2032. read_lock_bh(&xfrm_policy_lock);
  2033. chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
  2034. hlist_for_each_entry(pol, entry, chain, bydst) {
  2035. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2036. pol->type == type) {
  2037. ret = pol;
  2038. priority = ret->priority;
  2039. break;
  2040. }
  2041. }
  2042. chain = &xfrm_policy_inexact[dir];
  2043. hlist_for_each_entry(pol, entry, chain, bydst) {
  2044. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2045. pol->type == type &&
  2046. pol->priority < priority) {
  2047. ret = pol;
  2048. break;
  2049. }
  2050. }
  2051. if (ret)
  2052. xfrm_pol_hold(ret);
  2053. read_unlock_bh(&xfrm_policy_lock);
  2054. return ret;
  2055. }
  2056. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2057. {
  2058. int match = 0;
  2059. if (t->mode == m->mode && t->id.proto == m->proto &&
  2060. (m->reqid == 0 || t->reqid == m->reqid)) {
  2061. switch (t->mode) {
  2062. case XFRM_MODE_TUNNEL:
  2063. case XFRM_MODE_BEET:
  2064. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2065. m->old_family) == 0 &&
  2066. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2067. m->old_family) == 0) {
  2068. match = 1;
  2069. }
  2070. break;
  2071. case XFRM_MODE_TRANSPORT:
  2072. /* in case of transport mode, template does not store
  2073. any IP addresses, hence we just compare mode and
  2074. protocol */
  2075. match = 1;
  2076. break;
  2077. default:
  2078. break;
  2079. }
  2080. }
  2081. return match;
  2082. }
  2083. /* update endpoint address(es) of template(s) */
  2084. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2085. struct xfrm_migrate *m, int num_migrate)
  2086. {
  2087. struct xfrm_migrate *mp;
  2088. struct dst_entry *dst;
  2089. int i, j, n = 0;
  2090. write_lock_bh(&pol->lock);
  2091. if (unlikely(pol->dead)) {
  2092. /* target policy has been deleted */
  2093. write_unlock_bh(&pol->lock);
  2094. return -ENOENT;
  2095. }
  2096. for (i = 0; i < pol->xfrm_nr; i++) {
  2097. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2098. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2099. continue;
  2100. n++;
  2101. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2102. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2103. continue;
  2104. /* update endpoints */
  2105. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2106. sizeof(pol->xfrm_vec[i].id.daddr));
  2107. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2108. sizeof(pol->xfrm_vec[i].saddr));
  2109. pol->xfrm_vec[i].encap_family = mp->new_family;
  2110. /* flush bundles */
  2111. while ((dst = pol->bundles) != NULL) {
  2112. pol->bundles = dst->next;
  2113. dst_free(dst);
  2114. }
  2115. }
  2116. }
  2117. write_unlock_bh(&pol->lock);
  2118. if (!n)
  2119. return -ENODATA;
  2120. return 0;
  2121. }
  2122. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2123. {
  2124. int i, j;
  2125. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2126. return -EINVAL;
  2127. for (i = 0; i < num_migrate; i++) {
  2128. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2129. m[i].old_family) == 0) &&
  2130. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2131. m[i].old_family) == 0))
  2132. return -EINVAL;
  2133. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2134. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2135. return -EINVAL;
  2136. /* check if there is any duplicated entry */
  2137. for (j = i + 1; j < num_migrate; j++) {
  2138. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2139. sizeof(m[i].old_daddr)) &&
  2140. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2141. sizeof(m[i].old_saddr)) &&
  2142. m[i].proto == m[j].proto &&
  2143. m[i].mode == m[j].mode &&
  2144. m[i].reqid == m[j].reqid &&
  2145. m[i].old_family == m[j].old_family)
  2146. return -EINVAL;
  2147. }
  2148. }
  2149. return 0;
  2150. }
  2151. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2152. struct xfrm_migrate *m, int num_migrate)
  2153. {
  2154. int i, err, nx_cur = 0, nx_new = 0;
  2155. struct xfrm_policy *pol = NULL;
  2156. struct xfrm_state *x, *xc;
  2157. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2158. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2159. struct xfrm_migrate *mp;
  2160. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2161. goto out;
  2162. /* Stage 1 - find policy */
  2163. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2164. err = -ENOENT;
  2165. goto out;
  2166. }
  2167. /* Stage 2 - find and update state(s) */
  2168. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2169. if ((x = xfrm_migrate_state_find(mp))) {
  2170. x_cur[nx_cur] = x;
  2171. nx_cur++;
  2172. if ((xc = xfrm_state_migrate(x, mp))) {
  2173. x_new[nx_new] = xc;
  2174. nx_new++;
  2175. } else {
  2176. err = -ENODATA;
  2177. goto restore_state;
  2178. }
  2179. }
  2180. }
  2181. /* Stage 3 - update policy */
  2182. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2183. goto restore_state;
  2184. /* Stage 4 - delete old state(s) */
  2185. if (nx_cur) {
  2186. xfrm_states_put(x_cur, nx_cur);
  2187. xfrm_states_delete(x_cur, nx_cur);
  2188. }
  2189. /* Stage 5 - announce */
  2190. km_migrate(sel, dir, type, m, num_migrate);
  2191. xfrm_pol_put(pol);
  2192. return 0;
  2193. out:
  2194. return err;
  2195. restore_state:
  2196. if (pol)
  2197. xfrm_pol_put(pol);
  2198. if (nx_cur)
  2199. xfrm_states_put(x_cur, nx_cur);
  2200. if (nx_new)
  2201. xfrm_states_delete(x_new, nx_new);
  2202. return err;
  2203. }
  2204. EXPORT_SYMBOL(xfrm_migrate);
  2205. #endif