xfrm_policy.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/kmod.h>
  17. #include <linux/list.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/notifier.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/netfilter.h>
  23. #include <linux/module.h>
  24. #include <linux/cache.h>
  25. #include <net/xfrm.h>
  26. #include <net/ip.h>
  27. #include <linux/audit.h>
  28. #include "xfrm_hash.h"
  29. DEFINE_MUTEX(xfrm_cfg_mutex);
  30. EXPORT_SYMBOL(xfrm_cfg_mutex);
  31. static DEFINE_RWLOCK(xfrm_policy_lock);
  32. unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
  33. EXPORT_SYMBOL(xfrm_policy_count);
  34. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  35. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  36. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  37. static struct work_struct xfrm_policy_gc_work;
  38. static HLIST_HEAD(xfrm_policy_gc_list);
  39. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  40. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  41. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  42. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family);
  43. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo);
  44. static inline int
  45. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  46. {
  47. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  48. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  49. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  50. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  51. (fl->proto == sel->proto || !sel->proto) &&
  52. (fl->oif == sel->ifindex || !sel->ifindex);
  53. }
  54. static inline int
  55. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  56. {
  57. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  58. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  59. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  60. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  61. (fl->proto == sel->proto || !sel->proto) &&
  62. (fl->oif == sel->ifindex || !sel->ifindex);
  63. }
  64. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  65. unsigned short family)
  66. {
  67. switch (family) {
  68. case AF_INET:
  69. return __xfrm4_selector_match(sel, fl);
  70. case AF_INET6:
  71. return __xfrm6_selector_match(sel, fl);
  72. }
  73. return 0;
  74. }
  75. int xfrm_register_type(struct xfrm_type *type, unsigned short family)
  76. {
  77. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  78. struct xfrm_type **typemap;
  79. int err = 0;
  80. if (unlikely(afinfo == NULL))
  81. return -EAFNOSUPPORT;
  82. typemap = afinfo->type_map;
  83. if (likely(typemap[type->proto] == NULL))
  84. typemap[type->proto] = type;
  85. else
  86. err = -EEXIST;
  87. xfrm_policy_unlock_afinfo(afinfo);
  88. return err;
  89. }
  90. EXPORT_SYMBOL(xfrm_register_type);
  91. int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
  92. {
  93. struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family);
  94. struct xfrm_type **typemap;
  95. int err = 0;
  96. if (unlikely(afinfo == NULL))
  97. return -EAFNOSUPPORT;
  98. typemap = afinfo->type_map;
  99. if (unlikely(typemap[type->proto] != type))
  100. err = -ENOENT;
  101. else
  102. typemap[type->proto] = NULL;
  103. xfrm_policy_unlock_afinfo(afinfo);
  104. return err;
  105. }
  106. EXPORT_SYMBOL(xfrm_unregister_type);
  107. struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
  108. {
  109. struct xfrm_policy_afinfo *afinfo;
  110. struct xfrm_type **typemap;
  111. struct xfrm_type *type;
  112. int modload_attempted = 0;
  113. retry:
  114. afinfo = xfrm_policy_get_afinfo(family);
  115. if (unlikely(afinfo == NULL))
  116. return NULL;
  117. typemap = afinfo->type_map;
  118. type = typemap[proto];
  119. if (unlikely(type && !try_module_get(type->owner)))
  120. type = NULL;
  121. if (!type && !modload_attempted) {
  122. xfrm_policy_put_afinfo(afinfo);
  123. request_module("xfrm-type-%d-%d",
  124. (int) family, (int) proto);
  125. modload_attempted = 1;
  126. goto retry;
  127. }
  128. xfrm_policy_put_afinfo(afinfo);
  129. return type;
  130. }
  131. int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
  132. unsigned short family)
  133. {
  134. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  135. int err = 0;
  136. if (unlikely(afinfo == NULL))
  137. return -EAFNOSUPPORT;
  138. if (likely(afinfo->dst_lookup != NULL))
  139. err = afinfo->dst_lookup(dst, fl);
  140. else
  141. err = -EINVAL;
  142. xfrm_policy_put_afinfo(afinfo);
  143. return err;
  144. }
  145. EXPORT_SYMBOL(xfrm_dst_lookup);
  146. void xfrm_put_type(struct xfrm_type *type)
  147. {
  148. module_put(type->owner);
  149. }
  150. int xfrm_register_mode(struct xfrm_mode *mode, int family)
  151. {
  152. struct xfrm_policy_afinfo *afinfo;
  153. struct xfrm_mode **modemap;
  154. int err;
  155. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  156. return -EINVAL;
  157. afinfo = xfrm_policy_lock_afinfo(family);
  158. if (unlikely(afinfo == NULL))
  159. return -EAFNOSUPPORT;
  160. err = -EEXIST;
  161. modemap = afinfo->mode_map;
  162. if (likely(modemap[mode->encap] == NULL)) {
  163. modemap[mode->encap] = mode;
  164. err = 0;
  165. }
  166. xfrm_policy_unlock_afinfo(afinfo);
  167. return err;
  168. }
  169. EXPORT_SYMBOL(xfrm_register_mode);
  170. int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
  171. {
  172. struct xfrm_policy_afinfo *afinfo;
  173. struct xfrm_mode **modemap;
  174. int err;
  175. if (unlikely(mode->encap >= XFRM_MODE_MAX))
  176. return -EINVAL;
  177. afinfo = xfrm_policy_lock_afinfo(family);
  178. if (unlikely(afinfo == NULL))
  179. return -EAFNOSUPPORT;
  180. err = -ENOENT;
  181. modemap = afinfo->mode_map;
  182. if (likely(modemap[mode->encap] == mode)) {
  183. modemap[mode->encap] = NULL;
  184. err = 0;
  185. }
  186. xfrm_policy_unlock_afinfo(afinfo);
  187. return err;
  188. }
  189. EXPORT_SYMBOL(xfrm_unregister_mode);
  190. struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
  191. {
  192. struct xfrm_policy_afinfo *afinfo;
  193. struct xfrm_mode *mode;
  194. int modload_attempted = 0;
  195. if (unlikely(encap >= XFRM_MODE_MAX))
  196. return NULL;
  197. retry:
  198. afinfo = xfrm_policy_get_afinfo(family);
  199. if (unlikely(afinfo == NULL))
  200. return NULL;
  201. mode = afinfo->mode_map[encap];
  202. if (unlikely(mode && !try_module_get(mode->owner)))
  203. mode = NULL;
  204. if (!mode && !modload_attempted) {
  205. xfrm_policy_put_afinfo(afinfo);
  206. request_module("xfrm-mode-%d-%d", family, encap);
  207. modload_attempted = 1;
  208. goto retry;
  209. }
  210. xfrm_policy_put_afinfo(afinfo);
  211. return mode;
  212. }
  213. void xfrm_put_mode(struct xfrm_mode *mode)
  214. {
  215. module_put(mode->owner);
  216. }
  217. static inline unsigned long make_jiffies(long secs)
  218. {
  219. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  220. return MAX_SCHEDULE_TIMEOUT-1;
  221. else
  222. return secs*HZ;
  223. }
  224. static void xfrm_policy_timer(unsigned long data)
  225. {
  226. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  227. unsigned long now = get_seconds();
  228. long next = LONG_MAX;
  229. int warn = 0;
  230. int dir;
  231. read_lock(&xp->lock);
  232. if (xp->dead)
  233. goto out;
  234. dir = xfrm_policy_id2dir(xp->index);
  235. if (xp->lft.hard_add_expires_seconds) {
  236. long tmo = xp->lft.hard_add_expires_seconds +
  237. xp->curlft.add_time - now;
  238. if (tmo <= 0)
  239. goto expired;
  240. if (tmo < next)
  241. next = tmo;
  242. }
  243. if (xp->lft.hard_use_expires_seconds) {
  244. long tmo = xp->lft.hard_use_expires_seconds +
  245. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  246. if (tmo <= 0)
  247. goto expired;
  248. if (tmo < next)
  249. next = tmo;
  250. }
  251. if (xp->lft.soft_add_expires_seconds) {
  252. long tmo = xp->lft.soft_add_expires_seconds +
  253. xp->curlft.add_time - now;
  254. if (tmo <= 0) {
  255. warn = 1;
  256. tmo = XFRM_KM_TIMEOUT;
  257. }
  258. if (tmo < next)
  259. next = tmo;
  260. }
  261. if (xp->lft.soft_use_expires_seconds) {
  262. long tmo = xp->lft.soft_use_expires_seconds +
  263. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  264. if (tmo <= 0) {
  265. warn = 1;
  266. tmo = XFRM_KM_TIMEOUT;
  267. }
  268. if (tmo < next)
  269. next = tmo;
  270. }
  271. if (warn)
  272. km_policy_expired(xp, dir, 0, 0);
  273. if (next != LONG_MAX &&
  274. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  275. xfrm_pol_hold(xp);
  276. out:
  277. read_unlock(&xp->lock);
  278. xfrm_pol_put(xp);
  279. return;
  280. expired:
  281. read_unlock(&xp->lock);
  282. if (!xfrm_policy_delete(xp, dir))
  283. km_policy_expired(xp, dir, 1, 0);
  284. xfrm_pol_put(xp);
  285. }
  286. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  287. * SPD calls.
  288. */
  289. struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp)
  290. {
  291. struct xfrm_policy *policy;
  292. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  293. if (policy) {
  294. INIT_HLIST_NODE(&policy->bydst);
  295. INIT_HLIST_NODE(&policy->byidx);
  296. rwlock_init(&policy->lock);
  297. atomic_set(&policy->refcnt, 1);
  298. init_timer(&policy->timer);
  299. policy->timer.data = (unsigned long)policy;
  300. policy->timer.function = xfrm_policy_timer;
  301. }
  302. return policy;
  303. }
  304. EXPORT_SYMBOL(xfrm_policy_alloc);
  305. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  306. void __xfrm_policy_destroy(struct xfrm_policy *policy)
  307. {
  308. BUG_ON(!policy->dead);
  309. BUG_ON(policy->bundles);
  310. if (del_timer(&policy->timer))
  311. BUG();
  312. security_xfrm_policy_free(policy);
  313. kfree(policy);
  314. }
  315. EXPORT_SYMBOL(__xfrm_policy_destroy);
  316. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  317. {
  318. struct dst_entry *dst;
  319. while ((dst = policy->bundles) != NULL) {
  320. policy->bundles = dst->next;
  321. dst_free(dst);
  322. }
  323. if (del_timer(&policy->timer))
  324. atomic_dec(&policy->refcnt);
  325. if (atomic_read(&policy->refcnt) > 1)
  326. flow_cache_flush();
  327. xfrm_pol_put(policy);
  328. }
  329. static void xfrm_policy_gc_task(struct work_struct *work)
  330. {
  331. struct xfrm_policy *policy;
  332. struct hlist_node *entry, *tmp;
  333. struct hlist_head gc_list;
  334. spin_lock_bh(&xfrm_policy_gc_lock);
  335. gc_list.first = xfrm_policy_gc_list.first;
  336. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  337. spin_unlock_bh(&xfrm_policy_gc_lock);
  338. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  339. xfrm_policy_gc_kill(policy);
  340. }
  341. /* Rule must be locked. Release descentant resources, announce
  342. * entry dead. The rule must be unlinked from lists to the moment.
  343. */
  344. static void xfrm_policy_kill(struct xfrm_policy *policy)
  345. {
  346. int dead;
  347. write_lock_bh(&policy->lock);
  348. dead = policy->dead;
  349. policy->dead = 1;
  350. write_unlock_bh(&policy->lock);
  351. if (unlikely(dead)) {
  352. WARN_ON(1);
  353. return;
  354. }
  355. spin_lock(&xfrm_policy_gc_lock);
  356. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  357. spin_unlock(&xfrm_policy_gc_lock);
  358. schedule_work(&xfrm_policy_gc_work);
  359. }
  360. struct xfrm_policy_hash {
  361. struct hlist_head *table;
  362. unsigned int hmask;
  363. };
  364. static struct hlist_head xfrm_policy_inexact[XFRM_POLICY_MAX*2];
  365. static struct xfrm_policy_hash xfrm_policy_bydst[XFRM_POLICY_MAX*2] __read_mostly;
  366. static struct hlist_head *xfrm_policy_byidx __read_mostly;
  367. static unsigned int xfrm_idx_hmask __read_mostly;
  368. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  369. static inline unsigned int idx_hash(u32 index)
  370. {
  371. return __idx_hash(index, xfrm_idx_hmask);
  372. }
  373. static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
  374. {
  375. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  376. unsigned int hash = __sel_hash(sel, family, hmask);
  377. return (hash == hmask + 1 ?
  378. &xfrm_policy_inexact[dir] :
  379. xfrm_policy_bydst[dir].table + hash);
  380. }
  381. static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  382. {
  383. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  384. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  385. return xfrm_policy_bydst[dir].table + hash;
  386. }
  387. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  388. struct hlist_head *ndsttable,
  389. unsigned int nhashmask)
  390. {
  391. struct hlist_node *entry, *tmp;
  392. struct xfrm_policy *pol;
  393. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  394. unsigned int h;
  395. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  396. pol->family, nhashmask);
  397. hlist_add_head(&pol->bydst, ndsttable+h);
  398. }
  399. }
  400. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  401. struct hlist_head *nidxtable,
  402. unsigned int nhashmask)
  403. {
  404. struct hlist_node *entry, *tmp;
  405. struct xfrm_policy *pol;
  406. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  407. unsigned int h;
  408. h = __idx_hash(pol->index, nhashmask);
  409. hlist_add_head(&pol->byidx, nidxtable+h);
  410. }
  411. }
  412. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  413. {
  414. return ((old_hmask + 1) << 1) - 1;
  415. }
  416. static void xfrm_bydst_resize(int dir)
  417. {
  418. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  419. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  420. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  421. struct hlist_head *odst = xfrm_policy_bydst[dir].table;
  422. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  423. int i;
  424. if (!ndst)
  425. return;
  426. write_lock_bh(&xfrm_policy_lock);
  427. for (i = hmask; i >= 0; i--)
  428. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  429. xfrm_policy_bydst[dir].table = ndst;
  430. xfrm_policy_bydst[dir].hmask = nhashmask;
  431. write_unlock_bh(&xfrm_policy_lock);
  432. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  433. }
  434. static void xfrm_byidx_resize(int total)
  435. {
  436. unsigned int hmask = xfrm_idx_hmask;
  437. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  438. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  439. struct hlist_head *oidx = xfrm_policy_byidx;
  440. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  441. int i;
  442. if (!nidx)
  443. return;
  444. write_lock_bh(&xfrm_policy_lock);
  445. for (i = hmask; i >= 0; i--)
  446. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  447. xfrm_policy_byidx = nidx;
  448. xfrm_idx_hmask = nhashmask;
  449. write_unlock_bh(&xfrm_policy_lock);
  450. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  451. }
  452. static inline int xfrm_bydst_should_resize(int dir, int *total)
  453. {
  454. unsigned int cnt = xfrm_policy_count[dir];
  455. unsigned int hmask = xfrm_policy_bydst[dir].hmask;
  456. if (total)
  457. *total += cnt;
  458. if ((hmask + 1) < xfrm_policy_hashmax &&
  459. cnt > hmask)
  460. return 1;
  461. return 0;
  462. }
  463. static inline int xfrm_byidx_should_resize(int total)
  464. {
  465. unsigned int hmask = xfrm_idx_hmask;
  466. if ((hmask + 1) < xfrm_policy_hashmax &&
  467. total > hmask)
  468. return 1;
  469. return 0;
  470. }
  471. void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
  472. {
  473. read_lock_bh(&xfrm_policy_lock);
  474. si->incnt = xfrm_policy_count[XFRM_POLICY_IN];
  475. si->outcnt = xfrm_policy_count[XFRM_POLICY_OUT];
  476. si->fwdcnt = xfrm_policy_count[XFRM_POLICY_FWD];
  477. si->inscnt = xfrm_policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  478. si->outscnt = xfrm_policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  479. si->fwdscnt = xfrm_policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  480. si->spdhcnt = xfrm_idx_hmask;
  481. si->spdhmcnt = xfrm_policy_hashmax;
  482. read_unlock_bh(&xfrm_policy_lock);
  483. }
  484. EXPORT_SYMBOL(xfrm_spd_getinfo);
  485. static DEFINE_MUTEX(hash_resize_mutex);
  486. static void xfrm_hash_resize(struct work_struct *__unused)
  487. {
  488. int dir, total;
  489. mutex_lock(&hash_resize_mutex);
  490. total = 0;
  491. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  492. if (xfrm_bydst_should_resize(dir, &total))
  493. xfrm_bydst_resize(dir);
  494. }
  495. if (xfrm_byidx_should_resize(total))
  496. xfrm_byidx_resize(total);
  497. mutex_unlock(&hash_resize_mutex);
  498. }
  499. static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
  500. /* Generate new index... KAME seems to generate them ordered by cost
  501. * of an absolute inpredictability of ordering of rules. This will not pass. */
  502. static u32 xfrm_gen_index(u8 type, int dir)
  503. {
  504. static u32 idx_generator;
  505. for (;;) {
  506. struct hlist_node *entry;
  507. struct hlist_head *list;
  508. struct xfrm_policy *p;
  509. u32 idx;
  510. int found;
  511. idx = (idx_generator | dir);
  512. idx_generator += 8;
  513. if (idx == 0)
  514. idx = 8;
  515. list = xfrm_policy_byidx + idx_hash(idx);
  516. found = 0;
  517. hlist_for_each_entry(p, entry, list, byidx) {
  518. if (p->index == idx) {
  519. found = 1;
  520. break;
  521. }
  522. }
  523. if (!found)
  524. return idx;
  525. }
  526. }
  527. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  528. {
  529. u32 *p1 = (u32 *) s1;
  530. u32 *p2 = (u32 *) s2;
  531. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  532. int i;
  533. for (i = 0; i < len; i++) {
  534. if (p1[i] != p2[i])
  535. return 1;
  536. }
  537. return 0;
  538. }
  539. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  540. {
  541. struct xfrm_policy *pol;
  542. struct xfrm_policy *delpol;
  543. struct hlist_head *chain;
  544. struct hlist_node *entry, *newpos;
  545. struct dst_entry *gc_list;
  546. write_lock_bh(&xfrm_policy_lock);
  547. chain = policy_hash_bysel(&policy->selector, policy->family, dir);
  548. delpol = NULL;
  549. newpos = NULL;
  550. hlist_for_each_entry(pol, entry, chain, bydst) {
  551. if (pol->type == policy->type &&
  552. !selector_cmp(&pol->selector, &policy->selector) &&
  553. xfrm_sec_ctx_match(pol->security, policy->security) &&
  554. !WARN_ON(delpol)) {
  555. if (excl) {
  556. write_unlock_bh(&xfrm_policy_lock);
  557. return -EEXIST;
  558. }
  559. delpol = pol;
  560. if (policy->priority > pol->priority)
  561. continue;
  562. } else if (policy->priority >= pol->priority) {
  563. newpos = &pol->bydst;
  564. continue;
  565. }
  566. if (delpol)
  567. break;
  568. }
  569. if (newpos)
  570. hlist_add_after(newpos, &policy->bydst);
  571. else
  572. hlist_add_head(&policy->bydst, chain);
  573. xfrm_pol_hold(policy);
  574. xfrm_policy_count[dir]++;
  575. atomic_inc(&flow_cache_genid);
  576. if (delpol) {
  577. hlist_del(&delpol->bydst);
  578. hlist_del(&delpol->byidx);
  579. xfrm_policy_count[dir]--;
  580. }
  581. policy->index = delpol ? delpol->index : xfrm_gen_index(policy->type, dir);
  582. hlist_add_head(&policy->byidx, xfrm_policy_byidx+idx_hash(policy->index));
  583. policy->curlft.add_time = get_seconds();
  584. policy->curlft.use_time = 0;
  585. if (!mod_timer(&policy->timer, jiffies + HZ))
  586. xfrm_pol_hold(policy);
  587. write_unlock_bh(&xfrm_policy_lock);
  588. if (delpol)
  589. xfrm_policy_kill(delpol);
  590. else if (xfrm_bydst_should_resize(dir, NULL))
  591. schedule_work(&xfrm_hash_work);
  592. read_lock_bh(&xfrm_policy_lock);
  593. gc_list = NULL;
  594. entry = &policy->bydst;
  595. hlist_for_each_entry_continue(policy, entry, bydst) {
  596. struct dst_entry *dst;
  597. write_lock(&policy->lock);
  598. dst = policy->bundles;
  599. if (dst) {
  600. struct dst_entry *tail = dst;
  601. while (tail->next)
  602. tail = tail->next;
  603. tail->next = gc_list;
  604. gc_list = dst;
  605. policy->bundles = NULL;
  606. }
  607. write_unlock(&policy->lock);
  608. }
  609. read_unlock_bh(&xfrm_policy_lock);
  610. while (gc_list) {
  611. struct dst_entry *dst = gc_list;
  612. gc_list = dst->next;
  613. dst_free(dst);
  614. }
  615. return 0;
  616. }
  617. EXPORT_SYMBOL(xfrm_policy_insert);
  618. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  619. struct xfrm_selector *sel,
  620. struct xfrm_sec_ctx *ctx, int delete,
  621. int *err)
  622. {
  623. struct xfrm_policy *pol, *ret;
  624. struct hlist_head *chain;
  625. struct hlist_node *entry;
  626. *err = 0;
  627. write_lock_bh(&xfrm_policy_lock);
  628. chain = policy_hash_bysel(sel, sel->family, dir);
  629. ret = NULL;
  630. hlist_for_each_entry(pol, entry, chain, bydst) {
  631. if (pol->type == type &&
  632. !selector_cmp(sel, &pol->selector) &&
  633. xfrm_sec_ctx_match(ctx, pol->security)) {
  634. xfrm_pol_hold(pol);
  635. if (delete) {
  636. *err = security_xfrm_policy_delete(pol);
  637. if (*err) {
  638. write_unlock_bh(&xfrm_policy_lock);
  639. return pol;
  640. }
  641. hlist_del(&pol->bydst);
  642. hlist_del(&pol->byidx);
  643. xfrm_policy_count[dir]--;
  644. }
  645. ret = pol;
  646. break;
  647. }
  648. }
  649. write_unlock_bh(&xfrm_policy_lock);
  650. if (ret && delete) {
  651. atomic_inc(&flow_cache_genid);
  652. xfrm_policy_kill(ret);
  653. }
  654. return ret;
  655. }
  656. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  657. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
  658. int *err)
  659. {
  660. struct xfrm_policy *pol, *ret;
  661. struct hlist_head *chain;
  662. struct hlist_node *entry;
  663. *err = -ENOENT;
  664. if (xfrm_policy_id2dir(id) != dir)
  665. return NULL;
  666. *err = 0;
  667. write_lock_bh(&xfrm_policy_lock);
  668. chain = xfrm_policy_byidx + idx_hash(id);
  669. ret = NULL;
  670. hlist_for_each_entry(pol, entry, chain, byidx) {
  671. if (pol->type == type && pol->index == id) {
  672. xfrm_pol_hold(pol);
  673. if (delete) {
  674. *err = security_xfrm_policy_delete(pol);
  675. if (*err) {
  676. write_unlock_bh(&xfrm_policy_lock);
  677. return pol;
  678. }
  679. hlist_del(&pol->bydst);
  680. hlist_del(&pol->byidx);
  681. xfrm_policy_count[dir]--;
  682. }
  683. ret = pol;
  684. break;
  685. }
  686. }
  687. write_unlock_bh(&xfrm_policy_lock);
  688. if (ret && delete) {
  689. atomic_inc(&flow_cache_genid);
  690. xfrm_policy_kill(ret);
  691. }
  692. return ret;
  693. }
  694. EXPORT_SYMBOL(xfrm_policy_byid);
  695. void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
  696. {
  697. int dir;
  698. write_lock_bh(&xfrm_policy_lock);
  699. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  700. struct xfrm_policy *pol;
  701. struct hlist_node *entry;
  702. int i, killed;
  703. killed = 0;
  704. again1:
  705. hlist_for_each_entry(pol, entry,
  706. &xfrm_policy_inexact[dir], bydst) {
  707. if (pol->type != type)
  708. continue;
  709. hlist_del(&pol->bydst);
  710. hlist_del(&pol->byidx);
  711. write_unlock_bh(&xfrm_policy_lock);
  712. xfrm_audit_log(audit_info->loginuid, audit_info->secid,
  713. AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL);
  714. xfrm_policy_kill(pol);
  715. killed++;
  716. write_lock_bh(&xfrm_policy_lock);
  717. goto again1;
  718. }
  719. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  720. again2:
  721. hlist_for_each_entry(pol, entry,
  722. xfrm_policy_bydst[dir].table + i,
  723. bydst) {
  724. if (pol->type != type)
  725. continue;
  726. hlist_del(&pol->bydst);
  727. hlist_del(&pol->byidx);
  728. write_unlock_bh(&xfrm_policy_lock);
  729. xfrm_audit_log(audit_info->loginuid,
  730. audit_info->secid,
  731. AUDIT_MAC_IPSEC_DELSPD, 1,
  732. pol, NULL);
  733. xfrm_policy_kill(pol);
  734. killed++;
  735. write_lock_bh(&xfrm_policy_lock);
  736. goto again2;
  737. }
  738. }
  739. xfrm_policy_count[dir] -= killed;
  740. }
  741. atomic_inc(&flow_cache_genid);
  742. write_unlock_bh(&xfrm_policy_lock);
  743. }
  744. EXPORT_SYMBOL(xfrm_policy_flush);
  745. int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
  746. void *data)
  747. {
  748. struct xfrm_policy *pol, *last = NULL;
  749. struct hlist_node *entry;
  750. int dir, last_dir = 0, count, error;
  751. read_lock_bh(&xfrm_policy_lock);
  752. count = 0;
  753. for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
  754. struct hlist_head *table = xfrm_policy_bydst[dir].table;
  755. int i;
  756. hlist_for_each_entry(pol, entry,
  757. &xfrm_policy_inexact[dir], bydst) {
  758. if (pol->type != type)
  759. continue;
  760. if (last) {
  761. error = func(last, last_dir % XFRM_POLICY_MAX,
  762. count, data);
  763. if (error)
  764. goto out;
  765. }
  766. last = pol;
  767. last_dir = dir;
  768. count++;
  769. }
  770. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  771. hlist_for_each_entry(pol, entry, table + i, bydst) {
  772. if (pol->type != type)
  773. continue;
  774. if (last) {
  775. error = func(last, last_dir % XFRM_POLICY_MAX,
  776. count, data);
  777. if (error)
  778. goto out;
  779. }
  780. last = pol;
  781. last_dir = dir;
  782. count++;
  783. }
  784. }
  785. }
  786. if (count == 0) {
  787. error = -ENOENT;
  788. goto out;
  789. }
  790. error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
  791. out:
  792. read_unlock_bh(&xfrm_policy_lock);
  793. return error;
  794. }
  795. EXPORT_SYMBOL(xfrm_policy_walk);
  796. /*
  797. * Find policy to apply to this flow.
  798. *
  799. * Returns 0 if policy found, else an -errno.
  800. */
  801. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  802. u8 type, u16 family, int dir)
  803. {
  804. struct xfrm_selector *sel = &pol->selector;
  805. int match, ret = -ESRCH;
  806. if (pol->family != family ||
  807. pol->type != type)
  808. return ret;
  809. match = xfrm_selector_match(sel, fl, family);
  810. if (match)
  811. ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
  812. return ret;
  813. }
  814. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  815. u16 family, u8 dir)
  816. {
  817. int err;
  818. struct xfrm_policy *pol, *ret;
  819. xfrm_address_t *daddr, *saddr;
  820. struct hlist_node *entry;
  821. struct hlist_head *chain;
  822. u32 priority = ~0U;
  823. daddr = xfrm_flowi_daddr(fl, family);
  824. saddr = xfrm_flowi_saddr(fl, family);
  825. if (unlikely(!daddr || !saddr))
  826. return NULL;
  827. read_lock_bh(&xfrm_policy_lock);
  828. chain = policy_hash_direct(daddr, saddr, family, dir);
  829. ret = NULL;
  830. hlist_for_each_entry(pol, entry, chain, bydst) {
  831. err = xfrm_policy_match(pol, fl, type, family, dir);
  832. if (err) {
  833. if (err == -ESRCH)
  834. continue;
  835. else {
  836. ret = ERR_PTR(err);
  837. goto fail;
  838. }
  839. } else {
  840. ret = pol;
  841. priority = ret->priority;
  842. break;
  843. }
  844. }
  845. chain = &xfrm_policy_inexact[dir];
  846. hlist_for_each_entry(pol, entry, chain, bydst) {
  847. err = xfrm_policy_match(pol, fl, type, family, dir);
  848. if (err) {
  849. if (err == -ESRCH)
  850. continue;
  851. else {
  852. ret = ERR_PTR(err);
  853. goto fail;
  854. }
  855. } else if (pol->priority < priority) {
  856. ret = pol;
  857. break;
  858. }
  859. }
  860. if (ret)
  861. xfrm_pol_hold(ret);
  862. fail:
  863. read_unlock_bh(&xfrm_policy_lock);
  864. return ret;
  865. }
  866. static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  867. void **objp, atomic_t **obj_refp)
  868. {
  869. struct xfrm_policy *pol;
  870. int err = 0;
  871. #ifdef CONFIG_XFRM_SUB_POLICY
  872. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  873. if (IS_ERR(pol)) {
  874. err = PTR_ERR(pol);
  875. pol = NULL;
  876. }
  877. if (pol || err)
  878. goto end;
  879. #endif
  880. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  881. if (IS_ERR(pol)) {
  882. err = PTR_ERR(pol);
  883. pol = NULL;
  884. }
  885. #ifdef CONFIG_XFRM_SUB_POLICY
  886. end:
  887. #endif
  888. if ((*objp = (void *) pol) != NULL)
  889. *obj_refp = &pol->refcnt;
  890. return err;
  891. }
  892. static inline int policy_to_flow_dir(int dir)
  893. {
  894. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  895. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  896. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  897. return dir;
  898. switch (dir) {
  899. default:
  900. case XFRM_POLICY_IN:
  901. return FLOW_DIR_IN;
  902. case XFRM_POLICY_OUT:
  903. return FLOW_DIR_OUT;
  904. case XFRM_POLICY_FWD:
  905. return FLOW_DIR_FWD;
  906. }
  907. }
  908. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  909. {
  910. struct xfrm_policy *pol;
  911. read_lock_bh(&xfrm_policy_lock);
  912. if ((pol = sk->sk_policy[dir]) != NULL) {
  913. int match = xfrm_selector_match(&pol->selector, fl,
  914. sk->sk_family);
  915. int err = 0;
  916. if (match) {
  917. err = security_xfrm_policy_lookup(pol, fl->secid,
  918. policy_to_flow_dir(dir));
  919. if (!err)
  920. xfrm_pol_hold(pol);
  921. else if (err == -ESRCH)
  922. pol = NULL;
  923. else
  924. pol = ERR_PTR(err);
  925. } else
  926. pol = NULL;
  927. }
  928. read_unlock_bh(&xfrm_policy_lock);
  929. return pol;
  930. }
  931. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  932. {
  933. struct hlist_head *chain = policy_hash_bysel(&pol->selector,
  934. pol->family, dir);
  935. hlist_add_head(&pol->bydst, chain);
  936. hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
  937. xfrm_policy_count[dir]++;
  938. xfrm_pol_hold(pol);
  939. if (xfrm_bydst_should_resize(dir, NULL))
  940. schedule_work(&xfrm_hash_work);
  941. }
  942. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  943. int dir)
  944. {
  945. if (hlist_unhashed(&pol->bydst))
  946. return NULL;
  947. hlist_del(&pol->bydst);
  948. hlist_del(&pol->byidx);
  949. xfrm_policy_count[dir]--;
  950. return pol;
  951. }
  952. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  953. {
  954. write_lock_bh(&xfrm_policy_lock);
  955. pol = __xfrm_policy_unlink(pol, dir);
  956. write_unlock_bh(&xfrm_policy_lock);
  957. if (pol) {
  958. if (dir < XFRM_POLICY_MAX)
  959. atomic_inc(&flow_cache_genid);
  960. xfrm_policy_kill(pol);
  961. return 0;
  962. }
  963. return -ENOENT;
  964. }
  965. EXPORT_SYMBOL(xfrm_policy_delete);
  966. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  967. {
  968. struct xfrm_policy *old_pol;
  969. #ifdef CONFIG_XFRM_SUB_POLICY
  970. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  971. return -EINVAL;
  972. #endif
  973. write_lock_bh(&xfrm_policy_lock);
  974. old_pol = sk->sk_policy[dir];
  975. sk->sk_policy[dir] = pol;
  976. if (pol) {
  977. pol->curlft.add_time = get_seconds();
  978. pol->index = xfrm_gen_index(pol->type, XFRM_POLICY_MAX+dir);
  979. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  980. }
  981. if (old_pol)
  982. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  983. write_unlock_bh(&xfrm_policy_lock);
  984. if (old_pol) {
  985. xfrm_policy_kill(old_pol);
  986. }
  987. return 0;
  988. }
  989. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  990. {
  991. struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
  992. if (newp) {
  993. newp->selector = old->selector;
  994. if (security_xfrm_policy_clone(old, newp)) {
  995. kfree(newp);
  996. return NULL; /* ENOMEM */
  997. }
  998. newp->lft = old->lft;
  999. newp->curlft = old->curlft;
  1000. newp->action = old->action;
  1001. newp->flags = old->flags;
  1002. newp->xfrm_nr = old->xfrm_nr;
  1003. newp->index = old->index;
  1004. newp->type = old->type;
  1005. memcpy(newp->xfrm_vec, old->xfrm_vec,
  1006. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  1007. write_lock_bh(&xfrm_policy_lock);
  1008. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  1009. write_unlock_bh(&xfrm_policy_lock);
  1010. xfrm_pol_put(newp);
  1011. }
  1012. return newp;
  1013. }
  1014. int __xfrm_sk_clone_policy(struct sock *sk)
  1015. {
  1016. struct xfrm_policy *p0 = sk->sk_policy[0],
  1017. *p1 = sk->sk_policy[1];
  1018. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  1019. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  1020. return -ENOMEM;
  1021. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1022. return -ENOMEM;
  1023. return 0;
  1024. }
  1025. static int
  1026. xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
  1027. unsigned short family)
  1028. {
  1029. int err;
  1030. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1031. if (unlikely(afinfo == NULL))
  1032. return -EINVAL;
  1033. err = afinfo->get_saddr(local, remote);
  1034. xfrm_policy_put_afinfo(afinfo);
  1035. return err;
  1036. }
  1037. /* Resolve list of templates for the flow, given policy. */
  1038. static int
  1039. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  1040. struct xfrm_state **xfrm,
  1041. unsigned short family)
  1042. {
  1043. int nx;
  1044. int i, error;
  1045. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1046. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1047. xfrm_address_t tmp;
  1048. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1049. struct xfrm_state *x;
  1050. xfrm_address_t *remote = daddr;
  1051. xfrm_address_t *local = saddr;
  1052. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1053. if (tmpl->mode == XFRM_MODE_TUNNEL) {
  1054. remote = &tmpl->id.daddr;
  1055. local = &tmpl->saddr;
  1056. family = tmpl->encap_family;
  1057. if (xfrm_addr_any(local, family)) {
  1058. error = xfrm_get_saddr(&tmp, remote, family);
  1059. if (error)
  1060. goto fail;
  1061. local = &tmp;
  1062. }
  1063. }
  1064. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1065. if (x && x->km.state == XFRM_STATE_VALID) {
  1066. xfrm[nx++] = x;
  1067. daddr = remote;
  1068. saddr = local;
  1069. continue;
  1070. }
  1071. if (x) {
  1072. error = (x->km.state == XFRM_STATE_ERROR ?
  1073. -EINVAL : -EAGAIN);
  1074. xfrm_state_put(x);
  1075. }
  1076. if (!tmpl->optional)
  1077. goto fail;
  1078. }
  1079. return nx;
  1080. fail:
  1081. for (nx--; nx>=0; nx--)
  1082. xfrm_state_put(xfrm[nx]);
  1083. return error;
  1084. }
  1085. static int
  1086. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1087. struct xfrm_state **xfrm,
  1088. unsigned short family)
  1089. {
  1090. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1091. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1092. int cnx = 0;
  1093. int error;
  1094. int ret;
  1095. int i;
  1096. for (i = 0; i < npols; i++) {
  1097. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1098. error = -ENOBUFS;
  1099. goto fail;
  1100. }
  1101. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1102. if (ret < 0) {
  1103. error = ret;
  1104. goto fail;
  1105. } else
  1106. cnx += ret;
  1107. }
  1108. /* found states are sorted for outbound processing */
  1109. if (npols > 1)
  1110. xfrm_state_sort(xfrm, tpp, cnx, family);
  1111. return cnx;
  1112. fail:
  1113. for (cnx--; cnx>=0; cnx--)
  1114. xfrm_state_put(tpp[cnx]);
  1115. return error;
  1116. }
  1117. /* Check that the bundle accepts the flow and its components are
  1118. * still valid.
  1119. */
  1120. static struct dst_entry *
  1121. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1122. {
  1123. struct dst_entry *x;
  1124. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1125. if (unlikely(afinfo == NULL))
  1126. return ERR_PTR(-EINVAL);
  1127. x = afinfo->find_bundle(fl, policy);
  1128. xfrm_policy_put_afinfo(afinfo);
  1129. return x;
  1130. }
  1131. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1132. * all the metrics... Shortly, bundle a bundle.
  1133. */
  1134. static int
  1135. xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
  1136. struct flowi *fl, struct dst_entry **dst_p,
  1137. unsigned short family)
  1138. {
  1139. int err;
  1140. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1141. if (unlikely(afinfo == NULL))
  1142. return -EINVAL;
  1143. err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
  1144. xfrm_policy_put_afinfo(afinfo);
  1145. return err;
  1146. }
  1147. static int inline
  1148. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1149. {
  1150. if (!*target) {
  1151. *target = kmalloc(size, GFP_ATOMIC);
  1152. if (!*target)
  1153. return -ENOMEM;
  1154. }
  1155. memcpy(*target, src, size);
  1156. return 0;
  1157. }
  1158. static int inline
  1159. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1160. {
  1161. #ifdef CONFIG_XFRM_SUB_POLICY
  1162. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1163. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1164. sel, sizeof(*sel));
  1165. #else
  1166. return 0;
  1167. #endif
  1168. }
  1169. static int inline
  1170. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1171. {
  1172. #ifdef CONFIG_XFRM_SUB_POLICY
  1173. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1174. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1175. #else
  1176. return 0;
  1177. #endif
  1178. }
  1179. static int stale_bundle(struct dst_entry *dst);
  1180. /* Main function: finds/creates a bundle for given flow.
  1181. *
  1182. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1183. * on interfaces with disabled IPsec.
  1184. */
  1185. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1186. struct sock *sk, int flags)
  1187. {
  1188. struct xfrm_policy *policy;
  1189. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1190. int npols;
  1191. int pol_dead;
  1192. int xfrm_nr;
  1193. int pi;
  1194. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1195. struct dst_entry *dst, *dst_orig = *dst_p;
  1196. int nx = 0;
  1197. int err;
  1198. u32 genid;
  1199. u16 family;
  1200. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1201. restart:
  1202. genid = atomic_read(&flow_cache_genid);
  1203. policy = NULL;
  1204. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1205. pols[pi] = NULL;
  1206. npols = 0;
  1207. pol_dead = 0;
  1208. xfrm_nr = 0;
  1209. if (sk && sk->sk_policy[1]) {
  1210. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1211. if (IS_ERR(policy))
  1212. return PTR_ERR(policy);
  1213. }
  1214. if (!policy) {
  1215. /* To accelerate a bit... */
  1216. if ((dst_orig->flags & DST_NOXFRM) ||
  1217. !xfrm_policy_count[XFRM_POLICY_OUT])
  1218. return 0;
  1219. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  1220. dir, xfrm_policy_lookup);
  1221. if (IS_ERR(policy))
  1222. return PTR_ERR(policy);
  1223. }
  1224. if (!policy)
  1225. return 0;
  1226. family = dst_orig->ops->family;
  1227. policy->curlft.use_time = get_seconds();
  1228. pols[0] = policy;
  1229. npols ++;
  1230. xfrm_nr += pols[0]->xfrm_nr;
  1231. switch (policy->action) {
  1232. case XFRM_POLICY_BLOCK:
  1233. /* Prohibit the flow */
  1234. err = -EPERM;
  1235. goto error;
  1236. case XFRM_POLICY_ALLOW:
  1237. #ifndef CONFIG_XFRM_SUB_POLICY
  1238. if (policy->xfrm_nr == 0) {
  1239. /* Flow passes not transformed. */
  1240. xfrm_pol_put(policy);
  1241. return 0;
  1242. }
  1243. #endif
  1244. /* Try to find matching bundle.
  1245. *
  1246. * LATER: help from flow cache. It is optional, this
  1247. * is required only for output policy.
  1248. */
  1249. dst = xfrm_find_bundle(fl, policy, family);
  1250. if (IS_ERR(dst)) {
  1251. err = PTR_ERR(dst);
  1252. goto error;
  1253. }
  1254. if (dst)
  1255. break;
  1256. #ifdef CONFIG_XFRM_SUB_POLICY
  1257. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1258. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1259. fl, family,
  1260. XFRM_POLICY_OUT);
  1261. if (pols[1]) {
  1262. if (IS_ERR(pols[1])) {
  1263. err = PTR_ERR(pols[1]);
  1264. goto error;
  1265. }
  1266. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1267. err = -EPERM;
  1268. goto error;
  1269. }
  1270. npols ++;
  1271. xfrm_nr += pols[1]->xfrm_nr;
  1272. }
  1273. }
  1274. /*
  1275. * Because neither flowi nor bundle information knows about
  1276. * transformation template size. On more than one policy usage
  1277. * we can realize whether all of them is bypass or not after
  1278. * they are searched. See above not-transformed bypass
  1279. * is surrounded by non-sub policy configuration, too.
  1280. */
  1281. if (xfrm_nr == 0) {
  1282. /* Flow passes not transformed. */
  1283. xfrm_pols_put(pols, npols);
  1284. return 0;
  1285. }
  1286. #endif
  1287. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1288. if (unlikely(nx<0)) {
  1289. err = nx;
  1290. if (err == -EAGAIN && flags) {
  1291. DECLARE_WAITQUEUE(wait, current);
  1292. add_wait_queue(&km_waitq, &wait);
  1293. set_current_state(TASK_INTERRUPTIBLE);
  1294. schedule();
  1295. set_current_state(TASK_RUNNING);
  1296. remove_wait_queue(&km_waitq, &wait);
  1297. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1298. if (nx == -EAGAIN && signal_pending(current)) {
  1299. err = -ERESTART;
  1300. goto error;
  1301. }
  1302. if (nx == -EAGAIN ||
  1303. genid != atomic_read(&flow_cache_genid)) {
  1304. xfrm_pols_put(pols, npols);
  1305. goto restart;
  1306. }
  1307. err = nx;
  1308. }
  1309. if (err < 0)
  1310. goto error;
  1311. }
  1312. if (nx == 0) {
  1313. /* Flow passes not transformed. */
  1314. xfrm_pols_put(pols, npols);
  1315. return 0;
  1316. }
  1317. dst = dst_orig;
  1318. err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
  1319. if (unlikely(err)) {
  1320. int i;
  1321. for (i=0; i<nx; i++)
  1322. xfrm_state_put(xfrm[i]);
  1323. goto error;
  1324. }
  1325. for (pi = 0; pi < npols; pi++) {
  1326. read_lock_bh(&pols[pi]->lock);
  1327. pol_dead |= pols[pi]->dead;
  1328. read_unlock_bh(&pols[pi]->lock);
  1329. }
  1330. write_lock_bh(&policy->lock);
  1331. if (unlikely(pol_dead || stale_bundle(dst))) {
  1332. /* Wow! While we worked on resolving, this
  1333. * policy has gone. Retry. It is not paranoia,
  1334. * we just cannot enlist new bundle to dead object.
  1335. * We can't enlist stable bundles either.
  1336. */
  1337. write_unlock_bh(&policy->lock);
  1338. if (dst)
  1339. dst_free(dst);
  1340. err = -EHOSTUNREACH;
  1341. goto error;
  1342. }
  1343. if (npols > 1)
  1344. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1345. else
  1346. err = xfrm_dst_update_origin(dst, fl);
  1347. if (unlikely(err)) {
  1348. write_unlock_bh(&policy->lock);
  1349. if (dst)
  1350. dst_free(dst);
  1351. goto error;
  1352. }
  1353. dst->next = policy->bundles;
  1354. policy->bundles = dst;
  1355. dst_hold(dst);
  1356. write_unlock_bh(&policy->lock);
  1357. }
  1358. *dst_p = dst;
  1359. dst_release(dst_orig);
  1360. xfrm_pols_put(pols, npols);
  1361. return 0;
  1362. error:
  1363. dst_release(dst_orig);
  1364. xfrm_pols_put(pols, npols);
  1365. *dst_p = NULL;
  1366. return err;
  1367. }
  1368. EXPORT_SYMBOL(xfrm_lookup);
  1369. static inline int
  1370. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1371. {
  1372. struct xfrm_state *x;
  1373. int err;
  1374. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1375. return 0;
  1376. x = skb->sp->xvec[idx];
  1377. if (!x->type->reject)
  1378. return 0;
  1379. xfrm_state_hold(x);
  1380. err = x->type->reject(x, skb, fl);
  1381. xfrm_state_put(x);
  1382. return err;
  1383. }
  1384. /* When skb is transformed back to its "native" form, we have to
  1385. * check policy restrictions. At the moment we make this in maximally
  1386. * stupid way. Shame on me. :-) Of course, connected sockets must
  1387. * have policy cached at them.
  1388. */
  1389. static inline int
  1390. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1391. unsigned short family)
  1392. {
  1393. if (xfrm_state_kern(x))
  1394. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1395. return x->id.proto == tmpl->id.proto &&
  1396. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1397. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1398. x->props.mode == tmpl->mode &&
  1399. ((tmpl->aalgos & (1<<x->props.aalgo)) ||
  1400. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1401. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1402. xfrm_state_addr_cmp(tmpl, x, family));
  1403. }
  1404. /*
  1405. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1406. * because of optional transport mode, or next index of the mathced secpath
  1407. * state with the template.
  1408. * -1 is returned when no matching template is found.
  1409. * Otherwise "-2 - errored_index" is returned.
  1410. */
  1411. static inline int
  1412. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1413. unsigned short family)
  1414. {
  1415. int idx = start;
  1416. if (tmpl->optional) {
  1417. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1418. return start;
  1419. } else
  1420. start = -1;
  1421. for (; idx < sp->len; idx++) {
  1422. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1423. return ++idx;
  1424. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1425. if (start == -1)
  1426. start = -2-idx;
  1427. break;
  1428. }
  1429. }
  1430. return start;
  1431. }
  1432. int
  1433. xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
  1434. {
  1435. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1436. int err;
  1437. if (unlikely(afinfo == NULL))
  1438. return -EAFNOSUPPORT;
  1439. afinfo->decode_session(skb, fl);
  1440. err = security_xfrm_decode_session(skb, &fl->secid);
  1441. xfrm_policy_put_afinfo(afinfo);
  1442. return err;
  1443. }
  1444. EXPORT_SYMBOL(xfrm_decode_session);
  1445. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1446. {
  1447. for (; k < sp->len; k++) {
  1448. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1449. *idxp = k;
  1450. return 1;
  1451. }
  1452. }
  1453. return 0;
  1454. }
  1455. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1456. unsigned short family)
  1457. {
  1458. struct xfrm_policy *pol;
  1459. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1460. int npols = 0;
  1461. int xfrm_nr;
  1462. int pi;
  1463. struct flowi fl;
  1464. u8 fl_dir = policy_to_flow_dir(dir);
  1465. int xerr_idx = -1;
  1466. if (xfrm_decode_session(skb, &fl, family) < 0)
  1467. return 0;
  1468. nf_nat_decode_session(skb, &fl, family);
  1469. /* First, check used SA against their selectors. */
  1470. if (skb->sp) {
  1471. int i;
  1472. for (i=skb->sp->len-1; i>=0; i--) {
  1473. struct xfrm_state *x = skb->sp->xvec[i];
  1474. if (!xfrm_selector_match(&x->sel, &fl, family))
  1475. return 0;
  1476. }
  1477. }
  1478. pol = NULL;
  1479. if (sk && sk->sk_policy[dir]) {
  1480. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1481. if (IS_ERR(pol))
  1482. return 0;
  1483. }
  1484. if (!pol)
  1485. pol = flow_cache_lookup(&fl, family, fl_dir,
  1486. xfrm_policy_lookup);
  1487. if (IS_ERR(pol))
  1488. return 0;
  1489. if (!pol) {
  1490. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1491. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1492. return 0;
  1493. }
  1494. return 1;
  1495. }
  1496. pol->curlft.use_time = get_seconds();
  1497. pols[0] = pol;
  1498. npols ++;
  1499. #ifdef CONFIG_XFRM_SUB_POLICY
  1500. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1501. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1502. &fl, family,
  1503. XFRM_POLICY_IN);
  1504. if (pols[1]) {
  1505. if (IS_ERR(pols[1]))
  1506. return 0;
  1507. pols[1]->curlft.use_time = get_seconds();
  1508. npols ++;
  1509. }
  1510. }
  1511. #endif
  1512. if (pol->action == XFRM_POLICY_ALLOW) {
  1513. struct sec_path *sp;
  1514. static struct sec_path dummy;
  1515. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1516. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1517. struct xfrm_tmpl **tpp = tp;
  1518. int ti = 0;
  1519. int i, k;
  1520. if ((sp = skb->sp) == NULL)
  1521. sp = &dummy;
  1522. for (pi = 0; pi < npols; pi++) {
  1523. if (pols[pi] != pol &&
  1524. pols[pi]->action != XFRM_POLICY_ALLOW)
  1525. goto reject;
  1526. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH)
  1527. goto reject_error;
  1528. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1529. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1530. }
  1531. xfrm_nr = ti;
  1532. if (npols > 1) {
  1533. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1534. tpp = stp;
  1535. }
  1536. /* For each tunnel xfrm, find the first matching tmpl.
  1537. * For each tmpl before that, find corresponding xfrm.
  1538. * Order is _important_. Later we will implement
  1539. * some barriers, but at the moment barriers
  1540. * are implied between each two transformations.
  1541. */
  1542. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1543. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1544. if (k < 0) {
  1545. if (k < -1)
  1546. /* "-2 - errored_index" returned */
  1547. xerr_idx = -(2+k);
  1548. goto reject;
  1549. }
  1550. }
  1551. if (secpath_has_nontransport(sp, k, &xerr_idx))
  1552. goto reject;
  1553. xfrm_pols_put(pols, npols);
  1554. return 1;
  1555. }
  1556. reject:
  1557. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1558. reject_error:
  1559. xfrm_pols_put(pols, npols);
  1560. return 0;
  1561. }
  1562. EXPORT_SYMBOL(__xfrm_policy_check);
  1563. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1564. {
  1565. struct flowi fl;
  1566. if (xfrm_decode_session(skb, &fl, family) < 0)
  1567. return 0;
  1568. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1569. }
  1570. EXPORT_SYMBOL(__xfrm_route_forward);
  1571. /* Optimize later using cookies and generation ids. */
  1572. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1573. {
  1574. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1575. * to "-1" to force all XFRM destinations to get validated by
  1576. * dst_ops->check on every use. We do this because when a
  1577. * normal route referenced by an XFRM dst is obsoleted we do
  1578. * not go looking around for all parent referencing XFRM dsts
  1579. * so that we can invalidate them. It is just too much work.
  1580. * Instead we make the checks here on every use. For example:
  1581. *
  1582. * XFRM dst A --> IPv4 dst X
  1583. *
  1584. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1585. * in this example). If X is marked obsolete, "A" will not
  1586. * notice. That's what we are validating here via the
  1587. * stale_bundle() check.
  1588. *
  1589. * When a policy's bundle is pruned, we dst_free() the XFRM
  1590. * dst which causes it's ->obsolete field to be set to a
  1591. * positive non-zero integer. If an XFRM dst has been pruned
  1592. * like this, we want to force a new route lookup.
  1593. */
  1594. if (dst->obsolete < 0 && !stale_bundle(dst))
  1595. return dst;
  1596. return NULL;
  1597. }
  1598. static int stale_bundle(struct dst_entry *dst)
  1599. {
  1600. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1601. }
  1602. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1603. {
  1604. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1605. dst->dev = &loopback_dev;
  1606. dev_hold(&loopback_dev);
  1607. dev_put(dev);
  1608. }
  1609. }
  1610. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1611. static void xfrm_link_failure(struct sk_buff *skb)
  1612. {
  1613. /* Impossible. Such dst must be popped before reaches point of failure. */
  1614. return;
  1615. }
  1616. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1617. {
  1618. if (dst) {
  1619. if (dst->obsolete) {
  1620. dst_release(dst);
  1621. dst = NULL;
  1622. }
  1623. }
  1624. return dst;
  1625. }
  1626. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1627. {
  1628. struct dst_entry *dst, **dstp;
  1629. write_lock(&pol->lock);
  1630. dstp = &pol->bundles;
  1631. while ((dst=*dstp) != NULL) {
  1632. if (func(dst)) {
  1633. *dstp = dst->next;
  1634. dst->next = *gc_list_p;
  1635. *gc_list_p = dst;
  1636. } else {
  1637. dstp = &dst->next;
  1638. }
  1639. }
  1640. write_unlock(&pol->lock);
  1641. }
  1642. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1643. {
  1644. struct dst_entry *gc_list = NULL;
  1645. int dir;
  1646. read_lock_bh(&xfrm_policy_lock);
  1647. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1648. struct xfrm_policy *pol;
  1649. struct hlist_node *entry;
  1650. struct hlist_head *table;
  1651. int i;
  1652. hlist_for_each_entry(pol, entry,
  1653. &xfrm_policy_inexact[dir], bydst)
  1654. prune_one_bundle(pol, func, &gc_list);
  1655. table = xfrm_policy_bydst[dir].table;
  1656. for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
  1657. hlist_for_each_entry(pol, entry, table + i, bydst)
  1658. prune_one_bundle(pol, func, &gc_list);
  1659. }
  1660. }
  1661. read_unlock_bh(&xfrm_policy_lock);
  1662. while (gc_list) {
  1663. struct dst_entry *dst = gc_list;
  1664. gc_list = dst->next;
  1665. dst_free(dst);
  1666. }
  1667. }
  1668. static int unused_bundle(struct dst_entry *dst)
  1669. {
  1670. return !atomic_read(&dst->__refcnt);
  1671. }
  1672. static void __xfrm_garbage_collect(void)
  1673. {
  1674. xfrm_prune_bundles(unused_bundle);
  1675. }
  1676. static int xfrm_flush_bundles(void)
  1677. {
  1678. xfrm_prune_bundles(stale_bundle);
  1679. return 0;
  1680. }
  1681. void xfrm_init_pmtu(struct dst_entry *dst)
  1682. {
  1683. do {
  1684. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1685. u32 pmtu, route_mtu_cached;
  1686. pmtu = dst_mtu(dst->child);
  1687. xdst->child_mtu_cached = pmtu;
  1688. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1689. route_mtu_cached = dst_mtu(xdst->route);
  1690. xdst->route_mtu_cached = route_mtu_cached;
  1691. if (pmtu > route_mtu_cached)
  1692. pmtu = route_mtu_cached;
  1693. dst->metrics[RTAX_MTU-1] = pmtu;
  1694. } while ((dst = dst->next));
  1695. }
  1696. EXPORT_SYMBOL(xfrm_init_pmtu);
  1697. /* Check that the bundle accepts the flow and its components are
  1698. * still valid.
  1699. */
  1700. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1701. struct flowi *fl, int family, int strict)
  1702. {
  1703. struct dst_entry *dst = &first->u.dst;
  1704. struct xfrm_dst *last;
  1705. u32 mtu;
  1706. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1707. (dst->dev && !netif_running(dst->dev)))
  1708. return 0;
  1709. #ifdef CONFIG_XFRM_SUB_POLICY
  1710. if (fl) {
  1711. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1712. return 0;
  1713. if (first->partner &&
  1714. !xfrm_selector_match(first->partner, fl, family))
  1715. return 0;
  1716. }
  1717. #endif
  1718. last = NULL;
  1719. do {
  1720. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1721. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1722. return 0;
  1723. if (fl && pol &&
  1724. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1725. return 0;
  1726. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1727. return 0;
  1728. if (xdst->genid != dst->xfrm->genid)
  1729. return 0;
  1730. if (strict && fl && dst->xfrm->props.mode != XFRM_MODE_TUNNEL &&
  1731. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1732. return 0;
  1733. mtu = dst_mtu(dst->child);
  1734. if (xdst->child_mtu_cached != mtu) {
  1735. last = xdst;
  1736. xdst->child_mtu_cached = mtu;
  1737. }
  1738. if (!dst_check(xdst->route, xdst->route_cookie))
  1739. return 0;
  1740. mtu = dst_mtu(xdst->route);
  1741. if (xdst->route_mtu_cached != mtu) {
  1742. last = xdst;
  1743. xdst->route_mtu_cached = mtu;
  1744. }
  1745. dst = dst->child;
  1746. } while (dst->xfrm);
  1747. if (likely(!last))
  1748. return 1;
  1749. mtu = last->child_mtu_cached;
  1750. for (;;) {
  1751. dst = &last->u.dst;
  1752. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1753. if (mtu > last->route_mtu_cached)
  1754. mtu = last->route_mtu_cached;
  1755. dst->metrics[RTAX_MTU-1] = mtu;
  1756. if (last == first)
  1757. break;
  1758. last = last->u.next;
  1759. last->child_mtu_cached = mtu;
  1760. }
  1761. return 1;
  1762. }
  1763. EXPORT_SYMBOL(xfrm_bundle_ok);
  1764. #ifdef CONFIG_AUDITSYSCALL
  1765. /* Audit addition and deletion of SAs and ipsec policy */
  1766. void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
  1767. struct xfrm_policy *xp, struct xfrm_state *x)
  1768. {
  1769. char *secctx;
  1770. u32 secctx_len;
  1771. struct xfrm_sec_ctx *sctx = NULL;
  1772. struct audit_buffer *audit_buf;
  1773. int family;
  1774. extern int audit_enabled;
  1775. if (audit_enabled == 0)
  1776. return;
  1777. BUG_ON((type == AUDIT_MAC_IPSEC_ADDSA ||
  1778. type == AUDIT_MAC_IPSEC_DELSA) && !x);
  1779. BUG_ON((type == AUDIT_MAC_IPSEC_ADDSPD ||
  1780. type == AUDIT_MAC_IPSEC_DELSPD) && !xp);
  1781. audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
  1782. if (audit_buf == NULL)
  1783. return;
  1784. switch(type) {
  1785. case AUDIT_MAC_IPSEC_ADDSA:
  1786. audit_log_format(audit_buf, "SAD add: auid=%u", auid);
  1787. break;
  1788. case AUDIT_MAC_IPSEC_DELSA:
  1789. audit_log_format(audit_buf, "SAD delete: auid=%u", auid);
  1790. break;
  1791. case AUDIT_MAC_IPSEC_ADDSPD:
  1792. audit_log_format(audit_buf, "SPD add: auid=%u", auid);
  1793. break;
  1794. case AUDIT_MAC_IPSEC_DELSPD:
  1795. audit_log_format(audit_buf, "SPD delete: auid=%u", auid);
  1796. break;
  1797. default:
  1798. return;
  1799. }
  1800. if (sid != 0 &&
  1801. security_secid_to_secctx(sid, &secctx, &secctx_len) == 0)
  1802. audit_log_format(audit_buf, " subj=%s", secctx);
  1803. else
  1804. audit_log_task_context(audit_buf);
  1805. if (xp) {
  1806. family = xp->selector.family;
  1807. if (xp->security)
  1808. sctx = xp->security;
  1809. } else {
  1810. family = x->props.family;
  1811. if (x->security)
  1812. sctx = x->security;
  1813. }
  1814. if (sctx)
  1815. audit_log_format(audit_buf,
  1816. " sec_alg=%u sec_doi=%u sec_obj=%s",
  1817. sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str);
  1818. switch(family) {
  1819. case AF_INET:
  1820. {
  1821. struct in_addr saddr, daddr;
  1822. if (xp) {
  1823. saddr.s_addr = xp->selector.saddr.a4;
  1824. daddr.s_addr = xp->selector.daddr.a4;
  1825. } else {
  1826. saddr.s_addr = x->props.saddr.a4;
  1827. daddr.s_addr = x->id.daddr.a4;
  1828. }
  1829. audit_log_format(audit_buf,
  1830. " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
  1831. NIPQUAD(saddr), NIPQUAD(daddr));
  1832. }
  1833. break;
  1834. case AF_INET6:
  1835. {
  1836. struct in6_addr saddr6, daddr6;
  1837. if (xp) {
  1838. memcpy(&saddr6, xp->selector.saddr.a6,
  1839. sizeof(struct in6_addr));
  1840. memcpy(&daddr6, xp->selector.daddr.a6,
  1841. sizeof(struct in6_addr));
  1842. } else {
  1843. memcpy(&saddr6, x->props.saddr.a6,
  1844. sizeof(struct in6_addr));
  1845. memcpy(&daddr6, x->id.daddr.a6,
  1846. sizeof(struct in6_addr));
  1847. }
  1848. audit_log_format(audit_buf,
  1849. " src=" NIP6_FMT " dst=" NIP6_FMT,
  1850. NIP6(saddr6), NIP6(daddr6));
  1851. }
  1852. break;
  1853. }
  1854. if (x)
  1855. audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s",
  1856. (unsigned long)ntohl(x->id.spi),
  1857. (unsigned long)ntohl(x->id.spi),
  1858. x->id.proto == IPPROTO_AH ? "AH" :
  1859. (x->id.proto == IPPROTO_ESP ?
  1860. "ESP" : "IPCOMP"));
  1861. audit_log_format(audit_buf, " res=%u", result);
  1862. audit_log_end(audit_buf);
  1863. }
  1864. EXPORT_SYMBOL(xfrm_audit_log);
  1865. #endif /* CONFIG_AUDITSYSCALL */
  1866. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1867. {
  1868. int err = 0;
  1869. if (unlikely(afinfo == NULL))
  1870. return -EINVAL;
  1871. if (unlikely(afinfo->family >= NPROTO))
  1872. return -EAFNOSUPPORT;
  1873. write_lock_bh(&xfrm_policy_afinfo_lock);
  1874. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1875. err = -ENOBUFS;
  1876. else {
  1877. struct dst_ops *dst_ops = afinfo->dst_ops;
  1878. if (likely(dst_ops->kmem_cachep == NULL))
  1879. dst_ops->kmem_cachep = xfrm_dst_cache;
  1880. if (likely(dst_ops->check == NULL))
  1881. dst_ops->check = xfrm_dst_check;
  1882. if (likely(dst_ops->negative_advice == NULL))
  1883. dst_ops->negative_advice = xfrm_negative_advice;
  1884. if (likely(dst_ops->link_failure == NULL))
  1885. dst_ops->link_failure = xfrm_link_failure;
  1886. if (likely(afinfo->garbage_collect == NULL))
  1887. afinfo->garbage_collect = __xfrm_garbage_collect;
  1888. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1889. }
  1890. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1891. return err;
  1892. }
  1893. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1894. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1895. {
  1896. int err = 0;
  1897. if (unlikely(afinfo == NULL))
  1898. return -EINVAL;
  1899. if (unlikely(afinfo->family >= NPROTO))
  1900. return -EAFNOSUPPORT;
  1901. write_lock_bh(&xfrm_policy_afinfo_lock);
  1902. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1903. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1904. err = -EINVAL;
  1905. else {
  1906. struct dst_ops *dst_ops = afinfo->dst_ops;
  1907. xfrm_policy_afinfo[afinfo->family] = NULL;
  1908. dst_ops->kmem_cachep = NULL;
  1909. dst_ops->check = NULL;
  1910. dst_ops->negative_advice = NULL;
  1911. dst_ops->link_failure = NULL;
  1912. afinfo->garbage_collect = NULL;
  1913. }
  1914. }
  1915. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1916. return err;
  1917. }
  1918. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1919. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1920. {
  1921. struct xfrm_policy_afinfo *afinfo;
  1922. if (unlikely(family >= NPROTO))
  1923. return NULL;
  1924. read_lock(&xfrm_policy_afinfo_lock);
  1925. afinfo = xfrm_policy_afinfo[family];
  1926. if (unlikely(!afinfo))
  1927. read_unlock(&xfrm_policy_afinfo_lock);
  1928. return afinfo;
  1929. }
  1930. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  1931. {
  1932. read_unlock(&xfrm_policy_afinfo_lock);
  1933. }
  1934. static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family)
  1935. {
  1936. struct xfrm_policy_afinfo *afinfo;
  1937. if (unlikely(family >= NPROTO))
  1938. return NULL;
  1939. write_lock_bh(&xfrm_policy_afinfo_lock);
  1940. afinfo = xfrm_policy_afinfo[family];
  1941. if (unlikely(!afinfo))
  1942. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1943. return afinfo;
  1944. }
  1945. static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo)
  1946. {
  1947. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1948. }
  1949. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  1950. {
  1951. switch (event) {
  1952. case NETDEV_DOWN:
  1953. xfrm_flush_bundles();
  1954. }
  1955. return NOTIFY_DONE;
  1956. }
  1957. static struct notifier_block xfrm_dev_notifier = {
  1958. xfrm_dev_event,
  1959. NULL,
  1960. 0
  1961. };
  1962. static void __init xfrm_policy_init(void)
  1963. {
  1964. unsigned int hmask, sz;
  1965. int dir;
  1966. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  1967. sizeof(struct xfrm_dst),
  1968. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1969. NULL, NULL);
  1970. hmask = 8 - 1;
  1971. sz = (hmask+1) * sizeof(struct hlist_head);
  1972. xfrm_policy_byidx = xfrm_hash_alloc(sz);
  1973. xfrm_idx_hmask = hmask;
  1974. if (!xfrm_policy_byidx)
  1975. panic("XFRM: failed to allocate byidx hash\n");
  1976. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1977. struct xfrm_policy_hash *htab;
  1978. INIT_HLIST_HEAD(&xfrm_policy_inexact[dir]);
  1979. htab = &xfrm_policy_bydst[dir];
  1980. htab->table = xfrm_hash_alloc(sz);
  1981. htab->hmask = hmask;
  1982. if (!htab->table)
  1983. panic("XFRM: failed to allocate bydst hash\n");
  1984. }
  1985. INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
  1986. register_netdevice_notifier(&xfrm_dev_notifier);
  1987. }
  1988. void __init xfrm_init(void)
  1989. {
  1990. xfrm_state_init();
  1991. xfrm_policy_init();
  1992. xfrm_input_init();
  1993. }
  1994. #ifdef CONFIG_XFRM_MIGRATE
  1995. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  1996. struct xfrm_selector *sel_tgt)
  1997. {
  1998. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  1999. if (sel_tgt->family == sel_cmp->family &&
  2000. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2001. sel_cmp->family) == 0 &&
  2002. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2003. sel_cmp->family) == 0 &&
  2004. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2005. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2006. return 1;
  2007. }
  2008. } else {
  2009. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2010. return 1;
  2011. }
  2012. }
  2013. return 0;
  2014. }
  2015. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2016. u8 dir, u8 type)
  2017. {
  2018. struct xfrm_policy *pol, *ret = NULL;
  2019. struct hlist_node *entry;
  2020. struct hlist_head *chain;
  2021. u32 priority = ~0U;
  2022. read_lock_bh(&xfrm_policy_lock);
  2023. chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
  2024. hlist_for_each_entry(pol, entry, chain, bydst) {
  2025. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2026. pol->type == type) {
  2027. ret = pol;
  2028. priority = ret->priority;
  2029. break;
  2030. }
  2031. }
  2032. chain = &xfrm_policy_inexact[dir];
  2033. hlist_for_each_entry(pol, entry, chain, bydst) {
  2034. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2035. pol->type == type &&
  2036. pol->priority < priority) {
  2037. ret = pol;
  2038. break;
  2039. }
  2040. }
  2041. if (ret)
  2042. xfrm_pol_hold(ret);
  2043. read_unlock_bh(&xfrm_policy_lock);
  2044. return ret;
  2045. }
  2046. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2047. {
  2048. int match = 0;
  2049. if (t->mode == m->mode && t->id.proto == m->proto &&
  2050. (m->reqid == 0 || t->reqid == m->reqid)) {
  2051. switch (t->mode) {
  2052. case XFRM_MODE_TUNNEL:
  2053. case XFRM_MODE_BEET:
  2054. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2055. m->old_family) == 0 &&
  2056. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2057. m->old_family) == 0) {
  2058. match = 1;
  2059. }
  2060. break;
  2061. case XFRM_MODE_TRANSPORT:
  2062. /* in case of transport mode, template does not store
  2063. any IP addresses, hence we just compare mode and
  2064. protocol */
  2065. match = 1;
  2066. break;
  2067. default:
  2068. break;
  2069. }
  2070. }
  2071. return match;
  2072. }
  2073. /* update endpoint address(es) of template(s) */
  2074. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2075. struct xfrm_migrate *m, int num_migrate)
  2076. {
  2077. struct xfrm_migrate *mp;
  2078. struct dst_entry *dst;
  2079. int i, j, n = 0;
  2080. write_lock_bh(&pol->lock);
  2081. if (unlikely(pol->dead)) {
  2082. /* target policy has been deleted */
  2083. write_unlock_bh(&pol->lock);
  2084. return -ENOENT;
  2085. }
  2086. for (i = 0; i < pol->xfrm_nr; i++) {
  2087. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2088. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2089. continue;
  2090. n++;
  2091. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL)
  2092. continue;
  2093. /* update endpoints */
  2094. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2095. sizeof(pol->xfrm_vec[i].id.daddr));
  2096. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2097. sizeof(pol->xfrm_vec[i].saddr));
  2098. pol->xfrm_vec[i].encap_family = mp->new_family;
  2099. /* flush bundles */
  2100. while ((dst = pol->bundles) != NULL) {
  2101. pol->bundles = dst->next;
  2102. dst_free(dst);
  2103. }
  2104. }
  2105. }
  2106. write_unlock_bh(&pol->lock);
  2107. if (!n)
  2108. return -ENODATA;
  2109. return 0;
  2110. }
  2111. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2112. {
  2113. int i, j;
  2114. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2115. return -EINVAL;
  2116. for (i = 0; i < num_migrate; i++) {
  2117. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2118. m[i].old_family) == 0) &&
  2119. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2120. m[i].old_family) == 0))
  2121. return -EINVAL;
  2122. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2123. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2124. return -EINVAL;
  2125. /* check if there is any duplicated entry */
  2126. for (j = i + 1; j < num_migrate; j++) {
  2127. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2128. sizeof(m[i].old_daddr)) &&
  2129. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2130. sizeof(m[i].old_saddr)) &&
  2131. m[i].proto == m[j].proto &&
  2132. m[i].mode == m[j].mode &&
  2133. m[i].reqid == m[j].reqid &&
  2134. m[i].old_family == m[j].old_family)
  2135. return -EINVAL;
  2136. }
  2137. }
  2138. return 0;
  2139. }
  2140. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2141. struct xfrm_migrate *m, int num_migrate)
  2142. {
  2143. int i, err, nx_cur = 0, nx_new = 0;
  2144. struct xfrm_policy *pol = NULL;
  2145. struct xfrm_state *x, *xc;
  2146. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2147. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2148. struct xfrm_migrate *mp;
  2149. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2150. goto out;
  2151. /* Stage 1 - find policy */
  2152. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2153. err = -ENOENT;
  2154. goto out;
  2155. }
  2156. /* Stage 2 - find and update state(s) */
  2157. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2158. if ((x = xfrm_migrate_state_find(mp))) {
  2159. x_cur[nx_cur] = x;
  2160. nx_cur++;
  2161. if ((xc = xfrm_state_migrate(x, mp))) {
  2162. x_new[nx_new] = xc;
  2163. nx_new++;
  2164. } else {
  2165. err = -ENODATA;
  2166. goto restore_state;
  2167. }
  2168. }
  2169. }
  2170. /* Stage 3 - update policy */
  2171. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2172. goto restore_state;
  2173. /* Stage 4 - delete old state(s) */
  2174. if (nx_cur) {
  2175. xfrm_states_put(x_cur, nx_cur);
  2176. xfrm_states_delete(x_cur, nx_cur);
  2177. }
  2178. /* Stage 5 - announce */
  2179. km_migrate(sel, dir, type, m, num_migrate);
  2180. xfrm_pol_put(pol);
  2181. return 0;
  2182. out:
  2183. return err;
  2184. restore_state:
  2185. if (pol)
  2186. xfrm_pol_put(pol);
  2187. if (nx_cur)
  2188. xfrm_states_put(x_cur, nx_cur);
  2189. if (nx_new)
  2190. xfrm_states_delete(x_new, nx_new);
  2191. return err;
  2192. }
  2193. EXPORT_SYMBOL(xfrm_migrate);
  2194. #endif