xfrm_policy.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/xfrm.h>
  29. #include <net/ip.h>
  30. #ifdef CONFIG_XFRM_STATISTICS
  31. #include <net/snmp.h>
  32. #endif
  33. #include "xfrm_hash.h"
  34. int sysctl_xfrm_larval_drop __read_mostly = 1;
  35. #ifdef CONFIG_XFRM_STATISTICS
  36. DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
  37. EXPORT_SYMBOL(xfrm_statistics);
  38. #endif
  39. DEFINE_MUTEX(xfrm_cfg_mutex);
  40. EXPORT_SYMBOL(xfrm_cfg_mutex);
  41. static DEFINE_RWLOCK(xfrm_policy_lock);
  42. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  43. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  44. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  45. static HLIST_HEAD(xfrm_policy_gc_list);
  46. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  47. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  48. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  49. static void xfrm_init_pmtu(struct dst_entry *dst);
  50. static inline int
  51. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  52. {
  53. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  54. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  55. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  56. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  57. (fl->proto == sel->proto || !sel->proto) &&
  58. (fl->oif == sel->ifindex || !sel->ifindex);
  59. }
  60. static inline int
  61. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  62. {
  63. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  64. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  65. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  66. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  67. (fl->proto == sel->proto || !sel->proto) &&
  68. (fl->oif == sel->ifindex || !sel->ifindex);
  69. }
  70. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  71. unsigned short family)
  72. {
  73. switch (family) {
  74. case AF_INET:
  75. return __xfrm4_selector_match(sel, fl);
  76. case AF_INET6:
  77. return __xfrm6_selector_match(sel, fl);
  78. }
  79. return 0;
  80. }
  81. static inline struct dst_entry *__xfrm_dst_lookup(int tos,
  82. xfrm_address_t *saddr,
  83. xfrm_address_t *daddr,
  84. int family)
  85. {
  86. struct xfrm_policy_afinfo *afinfo;
  87. struct dst_entry *dst;
  88. afinfo = xfrm_policy_get_afinfo(family);
  89. if (unlikely(afinfo == NULL))
  90. return ERR_PTR(-EAFNOSUPPORT);
  91. dst = afinfo->dst_lookup(tos, saddr, daddr);
  92. xfrm_policy_put_afinfo(afinfo);
  93. return dst;
  94. }
  95. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  96. xfrm_address_t *prev_saddr,
  97. xfrm_address_t *prev_daddr,
  98. int family)
  99. {
  100. xfrm_address_t *saddr = &x->props.saddr;
  101. xfrm_address_t *daddr = &x->id.daddr;
  102. struct dst_entry *dst;
  103. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  104. saddr = x->coaddr;
  105. daddr = prev_daddr;
  106. }
  107. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  108. saddr = prev_saddr;
  109. daddr = x->coaddr;
  110. }
  111. dst = __xfrm_dst_lookup(tos, saddr, daddr, family);
  112. if (!IS_ERR(dst)) {
  113. if (prev_saddr != saddr)
  114. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  115. if (prev_daddr != daddr)
  116. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  117. }
  118. return dst;
  119. }
  120. static inline unsigned long make_jiffies(long secs)
  121. {
  122. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  123. return MAX_SCHEDULE_TIMEOUT-1;
  124. else
  125. return secs*HZ;
  126. }
  127. static void xfrm_policy_timer(unsigned long data)
  128. {
  129. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  130. unsigned long now = get_seconds();
  131. long next = LONG_MAX;
  132. int warn = 0;
  133. int dir;
  134. read_lock(&xp->lock);
  135. if (xp->walk.dead)
  136. goto out;
  137. dir = xfrm_policy_id2dir(xp->index);
  138. if (xp->lft.hard_add_expires_seconds) {
  139. long tmo = xp->lft.hard_add_expires_seconds +
  140. xp->curlft.add_time - now;
  141. if (tmo <= 0)
  142. goto expired;
  143. if (tmo < next)
  144. next = tmo;
  145. }
  146. if (xp->lft.hard_use_expires_seconds) {
  147. long tmo = xp->lft.hard_use_expires_seconds +
  148. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  149. if (tmo <= 0)
  150. goto expired;
  151. if (tmo < next)
  152. next = tmo;
  153. }
  154. if (xp->lft.soft_add_expires_seconds) {
  155. long tmo = xp->lft.soft_add_expires_seconds +
  156. xp->curlft.add_time - now;
  157. if (tmo <= 0) {
  158. warn = 1;
  159. tmo = XFRM_KM_TIMEOUT;
  160. }
  161. if (tmo < next)
  162. next = tmo;
  163. }
  164. if (xp->lft.soft_use_expires_seconds) {
  165. long tmo = xp->lft.soft_use_expires_seconds +
  166. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  167. if (tmo <= 0) {
  168. warn = 1;
  169. tmo = XFRM_KM_TIMEOUT;
  170. }
  171. if (tmo < next)
  172. next = tmo;
  173. }
  174. if (warn)
  175. km_policy_expired(xp, dir, 0, 0);
  176. if (next != LONG_MAX &&
  177. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  178. xfrm_pol_hold(xp);
  179. out:
  180. read_unlock(&xp->lock);
  181. xfrm_pol_put(xp);
  182. return;
  183. expired:
  184. read_unlock(&xp->lock);
  185. if (!xfrm_policy_delete(xp, dir))
  186. km_policy_expired(xp, dir, 1, 0);
  187. xfrm_pol_put(xp);
  188. }
  189. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  190. * SPD calls.
  191. */
  192. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  193. {
  194. struct xfrm_policy *policy;
  195. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  196. if (policy) {
  197. write_pnet(&policy->xp_net, net);
  198. INIT_LIST_HEAD(&policy->walk.all);
  199. INIT_HLIST_NODE(&policy->bydst);
  200. INIT_HLIST_NODE(&policy->byidx);
  201. rwlock_init(&policy->lock);
  202. atomic_set(&policy->refcnt, 1);
  203. setup_timer(&policy->timer, xfrm_policy_timer,
  204. (unsigned long)policy);
  205. }
  206. return policy;
  207. }
  208. EXPORT_SYMBOL(xfrm_policy_alloc);
  209. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  210. void xfrm_policy_destroy(struct xfrm_policy *policy)
  211. {
  212. BUG_ON(!policy->walk.dead);
  213. BUG_ON(policy->bundles);
  214. if (del_timer(&policy->timer))
  215. BUG();
  216. security_xfrm_policy_free(policy->security);
  217. kfree(policy);
  218. }
  219. EXPORT_SYMBOL(xfrm_policy_destroy);
  220. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  221. {
  222. struct dst_entry *dst;
  223. while ((dst = policy->bundles) != NULL) {
  224. policy->bundles = dst->next;
  225. dst_free(dst);
  226. }
  227. if (del_timer(&policy->timer))
  228. atomic_dec(&policy->refcnt);
  229. if (atomic_read(&policy->refcnt) > 1)
  230. flow_cache_flush();
  231. xfrm_pol_put(policy);
  232. }
  233. static void xfrm_policy_gc_task(struct work_struct *work)
  234. {
  235. struct xfrm_policy *policy;
  236. struct hlist_node *entry, *tmp;
  237. struct hlist_head gc_list;
  238. spin_lock_bh(&xfrm_policy_gc_lock);
  239. gc_list.first = xfrm_policy_gc_list.first;
  240. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  241. spin_unlock_bh(&xfrm_policy_gc_lock);
  242. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  243. xfrm_policy_gc_kill(policy);
  244. }
  245. static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
  246. /* Rule must be locked. Release descentant resources, announce
  247. * entry dead. The rule must be unlinked from lists to the moment.
  248. */
  249. static void xfrm_policy_kill(struct xfrm_policy *policy)
  250. {
  251. int dead;
  252. write_lock_bh(&policy->lock);
  253. dead = policy->walk.dead;
  254. policy->walk.dead = 1;
  255. write_unlock_bh(&policy->lock);
  256. if (unlikely(dead)) {
  257. WARN_ON(1);
  258. return;
  259. }
  260. spin_lock_bh(&xfrm_policy_gc_lock);
  261. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  262. spin_unlock_bh(&xfrm_policy_gc_lock);
  263. schedule_work(&xfrm_policy_gc_work);
  264. }
  265. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  266. static inline unsigned int idx_hash(u32 index)
  267. {
  268. return __idx_hash(index, init_net.xfrm.policy_idx_hmask);
  269. }
  270. static struct hlist_head *policy_hash_bysel(struct xfrm_selector *sel, unsigned short family, int dir)
  271. {
  272. unsigned int hmask = init_net.xfrm.policy_bydst[dir].hmask;
  273. unsigned int hash = __sel_hash(sel, family, hmask);
  274. return (hash == hmask + 1 ?
  275. &init_net.xfrm.policy_inexact[dir] :
  276. init_net.xfrm.policy_bydst[dir].table + hash);
  277. }
  278. static struct hlist_head *policy_hash_direct(xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  279. {
  280. unsigned int hmask = init_net.xfrm.policy_bydst[dir].hmask;
  281. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  282. return init_net.xfrm.policy_bydst[dir].table + hash;
  283. }
  284. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  285. struct hlist_head *ndsttable,
  286. unsigned int nhashmask)
  287. {
  288. struct hlist_node *entry, *tmp, *entry0 = NULL;
  289. struct xfrm_policy *pol;
  290. unsigned int h0 = 0;
  291. redo:
  292. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  293. unsigned int h;
  294. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  295. pol->family, nhashmask);
  296. if (!entry0) {
  297. hlist_del(entry);
  298. hlist_add_head(&pol->bydst, ndsttable+h);
  299. h0 = h;
  300. } else {
  301. if (h != h0)
  302. continue;
  303. hlist_del(entry);
  304. hlist_add_after(entry0, &pol->bydst);
  305. }
  306. entry0 = entry;
  307. }
  308. if (!hlist_empty(list)) {
  309. entry0 = NULL;
  310. goto redo;
  311. }
  312. }
  313. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  314. struct hlist_head *nidxtable,
  315. unsigned int nhashmask)
  316. {
  317. struct hlist_node *entry, *tmp;
  318. struct xfrm_policy *pol;
  319. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  320. unsigned int h;
  321. h = __idx_hash(pol->index, nhashmask);
  322. hlist_add_head(&pol->byidx, nidxtable+h);
  323. }
  324. }
  325. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  326. {
  327. return ((old_hmask + 1) << 1) - 1;
  328. }
  329. static void xfrm_bydst_resize(struct net *net, int dir)
  330. {
  331. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  332. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  333. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  334. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  335. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  336. int i;
  337. if (!ndst)
  338. return;
  339. write_lock_bh(&xfrm_policy_lock);
  340. for (i = hmask; i >= 0; i--)
  341. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  342. net->xfrm.policy_bydst[dir].table = ndst;
  343. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  344. write_unlock_bh(&xfrm_policy_lock);
  345. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  346. }
  347. static void xfrm_byidx_resize(struct net *net, int total)
  348. {
  349. unsigned int hmask = net->xfrm.policy_idx_hmask;
  350. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  351. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  352. struct hlist_head *oidx = net->xfrm.policy_byidx;
  353. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  354. int i;
  355. if (!nidx)
  356. return;
  357. write_lock_bh(&xfrm_policy_lock);
  358. for (i = hmask; i >= 0; i--)
  359. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  360. net->xfrm.policy_byidx = nidx;
  361. net->xfrm.policy_idx_hmask = nhashmask;
  362. write_unlock_bh(&xfrm_policy_lock);
  363. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  364. }
  365. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  366. {
  367. unsigned int cnt = net->xfrm.policy_count[dir];
  368. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  369. if (total)
  370. *total += cnt;
  371. if ((hmask + 1) < xfrm_policy_hashmax &&
  372. cnt > hmask)
  373. return 1;
  374. return 0;
  375. }
  376. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  377. {
  378. unsigned int hmask = net->xfrm.policy_idx_hmask;
  379. if ((hmask + 1) < xfrm_policy_hashmax &&
  380. total > hmask)
  381. return 1;
  382. return 0;
  383. }
  384. void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
  385. {
  386. read_lock_bh(&xfrm_policy_lock);
  387. si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN];
  388. si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT];
  389. si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD];
  390. si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  391. si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  392. si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  393. si->spdhcnt = init_net.xfrm.policy_idx_hmask;
  394. si->spdhmcnt = xfrm_policy_hashmax;
  395. read_unlock_bh(&xfrm_policy_lock);
  396. }
  397. EXPORT_SYMBOL(xfrm_spd_getinfo);
  398. static DEFINE_MUTEX(hash_resize_mutex);
  399. static void xfrm_hash_resize(struct work_struct *work)
  400. {
  401. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  402. int dir, total;
  403. mutex_lock(&hash_resize_mutex);
  404. total = 0;
  405. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  406. if (xfrm_bydst_should_resize(net, dir, &total))
  407. xfrm_bydst_resize(net, dir);
  408. }
  409. if (xfrm_byidx_should_resize(net, total))
  410. xfrm_byidx_resize(net, total);
  411. mutex_unlock(&hash_resize_mutex);
  412. }
  413. /* Generate new index... KAME seems to generate them ordered by cost
  414. * of an absolute inpredictability of ordering of rules. This will not pass. */
  415. static u32 xfrm_gen_index(int dir)
  416. {
  417. static u32 idx_generator;
  418. for (;;) {
  419. struct hlist_node *entry;
  420. struct hlist_head *list;
  421. struct xfrm_policy *p;
  422. u32 idx;
  423. int found;
  424. idx = (idx_generator | dir);
  425. idx_generator += 8;
  426. if (idx == 0)
  427. idx = 8;
  428. list = init_net.xfrm.policy_byidx + idx_hash(idx);
  429. found = 0;
  430. hlist_for_each_entry(p, entry, list, byidx) {
  431. if (p->index == idx) {
  432. found = 1;
  433. break;
  434. }
  435. }
  436. if (!found)
  437. return idx;
  438. }
  439. }
  440. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  441. {
  442. u32 *p1 = (u32 *) s1;
  443. u32 *p2 = (u32 *) s2;
  444. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  445. int i;
  446. for (i = 0; i < len; i++) {
  447. if (p1[i] != p2[i])
  448. return 1;
  449. }
  450. return 0;
  451. }
  452. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  453. {
  454. struct xfrm_policy *pol;
  455. struct xfrm_policy *delpol;
  456. struct hlist_head *chain;
  457. struct hlist_node *entry, *newpos;
  458. struct dst_entry *gc_list;
  459. write_lock_bh(&xfrm_policy_lock);
  460. chain = policy_hash_bysel(&policy->selector, policy->family, dir);
  461. delpol = NULL;
  462. newpos = NULL;
  463. hlist_for_each_entry(pol, entry, chain, bydst) {
  464. if (pol->type == policy->type &&
  465. !selector_cmp(&pol->selector, &policy->selector) &&
  466. xfrm_sec_ctx_match(pol->security, policy->security) &&
  467. !WARN_ON(delpol)) {
  468. if (excl) {
  469. write_unlock_bh(&xfrm_policy_lock);
  470. return -EEXIST;
  471. }
  472. delpol = pol;
  473. if (policy->priority > pol->priority)
  474. continue;
  475. } else if (policy->priority >= pol->priority) {
  476. newpos = &pol->bydst;
  477. continue;
  478. }
  479. if (delpol)
  480. break;
  481. }
  482. if (newpos)
  483. hlist_add_after(newpos, &policy->bydst);
  484. else
  485. hlist_add_head(&policy->bydst, chain);
  486. xfrm_pol_hold(policy);
  487. init_net.xfrm.policy_count[dir]++;
  488. atomic_inc(&flow_cache_genid);
  489. if (delpol) {
  490. hlist_del(&delpol->bydst);
  491. hlist_del(&delpol->byidx);
  492. list_del(&delpol->walk.all);
  493. init_net.xfrm.policy_count[dir]--;
  494. }
  495. policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
  496. hlist_add_head(&policy->byidx, init_net.xfrm.policy_byidx+idx_hash(policy->index));
  497. policy->curlft.add_time = get_seconds();
  498. policy->curlft.use_time = 0;
  499. if (!mod_timer(&policy->timer, jiffies + HZ))
  500. xfrm_pol_hold(policy);
  501. list_add(&policy->walk.all, &init_net.xfrm.policy_all);
  502. write_unlock_bh(&xfrm_policy_lock);
  503. if (delpol)
  504. xfrm_policy_kill(delpol);
  505. else if (xfrm_bydst_should_resize(&init_net, dir, NULL))
  506. schedule_work(&init_net.xfrm.policy_hash_work);
  507. read_lock_bh(&xfrm_policy_lock);
  508. gc_list = NULL;
  509. entry = &policy->bydst;
  510. hlist_for_each_entry_continue(policy, entry, bydst) {
  511. struct dst_entry *dst;
  512. write_lock(&policy->lock);
  513. dst = policy->bundles;
  514. if (dst) {
  515. struct dst_entry *tail = dst;
  516. while (tail->next)
  517. tail = tail->next;
  518. tail->next = gc_list;
  519. gc_list = dst;
  520. policy->bundles = NULL;
  521. }
  522. write_unlock(&policy->lock);
  523. }
  524. read_unlock_bh(&xfrm_policy_lock);
  525. while (gc_list) {
  526. struct dst_entry *dst = gc_list;
  527. gc_list = dst->next;
  528. dst_free(dst);
  529. }
  530. return 0;
  531. }
  532. EXPORT_SYMBOL(xfrm_policy_insert);
  533. struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
  534. struct xfrm_selector *sel,
  535. struct xfrm_sec_ctx *ctx, int delete,
  536. int *err)
  537. {
  538. struct xfrm_policy *pol, *ret;
  539. struct hlist_head *chain;
  540. struct hlist_node *entry;
  541. *err = 0;
  542. write_lock_bh(&xfrm_policy_lock);
  543. chain = policy_hash_bysel(sel, sel->family, dir);
  544. ret = NULL;
  545. hlist_for_each_entry(pol, entry, chain, bydst) {
  546. if (pol->type == type &&
  547. !selector_cmp(sel, &pol->selector) &&
  548. xfrm_sec_ctx_match(ctx, pol->security)) {
  549. xfrm_pol_hold(pol);
  550. if (delete) {
  551. *err = security_xfrm_policy_delete(
  552. pol->security);
  553. if (*err) {
  554. write_unlock_bh(&xfrm_policy_lock);
  555. return pol;
  556. }
  557. hlist_del(&pol->bydst);
  558. hlist_del(&pol->byidx);
  559. list_del(&pol->walk.all);
  560. init_net.xfrm.policy_count[dir]--;
  561. }
  562. ret = pol;
  563. break;
  564. }
  565. }
  566. write_unlock_bh(&xfrm_policy_lock);
  567. if (ret && delete) {
  568. atomic_inc(&flow_cache_genid);
  569. xfrm_policy_kill(ret);
  570. }
  571. return ret;
  572. }
  573. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  574. struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
  575. int *err)
  576. {
  577. struct xfrm_policy *pol, *ret;
  578. struct hlist_head *chain;
  579. struct hlist_node *entry;
  580. *err = -ENOENT;
  581. if (xfrm_policy_id2dir(id) != dir)
  582. return NULL;
  583. *err = 0;
  584. write_lock_bh(&xfrm_policy_lock);
  585. chain = init_net.xfrm.policy_byidx + idx_hash(id);
  586. ret = NULL;
  587. hlist_for_each_entry(pol, entry, chain, byidx) {
  588. if (pol->type == type && pol->index == id) {
  589. xfrm_pol_hold(pol);
  590. if (delete) {
  591. *err = security_xfrm_policy_delete(
  592. pol->security);
  593. if (*err) {
  594. write_unlock_bh(&xfrm_policy_lock);
  595. return pol;
  596. }
  597. hlist_del(&pol->bydst);
  598. hlist_del(&pol->byidx);
  599. list_del(&pol->walk.all);
  600. init_net.xfrm.policy_count[dir]--;
  601. }
  602. ret = pol;
  603. break;
  604. }
  605. }
  606. write_unlock_bh(&xfrm_policy_lock);
  607. if (ret && delete) {
  608. atomic_inc(&flow_cache_genid);
  609. xfrm_policy_kill(ret);
  610. }
  611. return ret;
  612. }
  613. EXPORT_SYMBOL(xfrm_policy_byid);
  614. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  615. static inline int
  616. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  617. {
  618. int dir, err = 0;
  619. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  620. struct xfrm_policy *pol;
  621. struct hlist_node *entry;
  622. int i;
  623. hlist_for_each_entry(pol, entry,
  624. &init_net.xfrm.policy_inexact[dir], bydst) {
  625. if (pol->type != type)
  626. continue;
  627. err = security_xfrm_policy_delete(pol->security);
  628. if (err) {
  629. xfrm_audit_policy_delete(pol, 0,
  630. audit_info->loginuid,
  631. audit_info->sessionid,
  632. audit_info->secid);
  633. return err;
  634. }
  635. }
  636. for (i = init_net.xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  637. hlist_for_each_entry(pol, entry,
  638. init_net.xfrm.policy_bydst[dir].table + i,
  639. bydst) {
  640. if (pol->type != type)
  641. continue;
  642. err = security_xfrm_policy_delete(
  643. pol->security);
  644. if (err) {
  645. xfrm_audit_policy_delete(pol, 0,
  646. audit_info->loginuid,
  647. audit_info->sessionid,
  648. audit_info->secid);
  649. return err;
  650. }
  651. }
  652. }
  653. }
  654. return err;
  655. }
  656. #else
  657. static inline int
  658. xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
  659. {
  660. return 0;
  661. }
  662. #endif
  663. int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
  664. {
  665. int dir, err = 0;
  666. write_lock_bh(&xfrm_policy_lock);
  667. err = xfrm_policy_flush_secctx_check(type, audit_info);
  668. if (err)
  669. goto out;
  670. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  671. struct xfrm_policy *pol;
  672. struct hlist_node *entry;
  673. int i, killed;
  674. killed = 0;
  675. again1:
  676. hlist_for_each_entry(pol, entry,
  677. &init_net.xfrm.policy_inexact[dir], bydst) {
  678. if (pol->type != type)
  679. continue;
  680. hlist_del(&pol->bydst);
  681. hlist_del(&pol->byidx);
  682. write_unlock_bh(&xfrm_policy_lock);
  683. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  684. audit_info->sessionid,
  685. audit_info->secid);
  686. xfrm_policy_kill(pol);
  687. killed++;
  688. write_lock_bh(&xfrm_policy_lock);
  689. goto again1;
  690. }
  691. for (i = init_net.xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  692. again2:
  693. hlist_for_each_entry(pol, entry,
  694. init_net.xfrm.policy_bydst[dir].table + i,
  695. bydst) {
  696. if (pol->type != type)
  697. continue;
  698. hlist_del(&pol->bydst);
  699. hlist_del(&pol->byidx);
  700. list_del(&pol->walk.all);
  701. write_unlock_bh(&xfrm_policy_lock);
  702. xfrm_audit_policy_delete(pol, 1,
  703. audit_info->loginuid,
  704. audit_info->sessionid,
  705. audit_info->secid);
  706. xfrm_policy_kill(pol);
  707. killed++;
  708. write_lock_bh(&xfrm_policy_lock);
  709. goto again2;
  710. }
  711. }
  712. init_net.xfrm.policy_count[dir] -= killed;
  713. }
  714. atomic_inc(&flow_cache_genid);
  715. out:
  716. write_unlock_bh(&xfrm_policy_lock);
  717. return err;
  718. }
  719. EXPORT_SYMBOL(xfrm_policy_flush);
  720. int xfrm_policy_walk(struct xfrm_policy_walk *walk,
  721. int (*func)(struct xfrm_policy *, int, int, void*),
  722. void *data)
  723. {
  724. struct xfrm_policy *pol;
  725. struct xfrm_policy_walk_entry *x;
  726. int error = 0;
  727. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  728. walk->type != XFRM_POLICY_TYPE_ANY)
  729. return -EINVAL;
  730. if (list_empty(&walk->walk.all) && walk->seq != 0)
  731. return 0;
  732. write_lock_bh(&xfrm_policy_lock);
  733. if (list_empty(&walk->walk.all))
  734. x = list_first_entry(&init_net.xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  735. else
  736. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  737. list_for_each_entry_from(x, &init_net.xfrm.policy_all, all) {
  738. if (x->dead)
  739. continue;
  740. pol = container_of(x, struct xfrm_policy, walk);
  741. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  742. walk->type != pol->type)
  743. continue;
  744. error = func(pol, xfrm_policy_id2dir(pol->index),
  745. walk->seq, data);
  746. if (error) {
  747. list_move_tail(&walk->walk.all, &x->all);
  748. goto out;
  749. }
  750. walk->seq++;
  751. }
  752. if (walk->seq == 0) {
  753. error = -ENOENT;
  754. goto out;
  755. }
  756. list_del_init(&walk->walk.all);
  757. out:
  758. write_unlock_bh(&xfrm_policy_lock);
  759. return error;
  760. }
  761. EXPORT_SYMBOL(xfrm_policy_walk);
  762. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  763. {
  764. INIT_LIST_HEAD(&walk->walk.all);
  765. walk->walk.dead = 1;
  766. walk->type = type;
  767. walk->seq = 0;
  768. }
  769. EXPORT_SYMBOL(xfrm_policy_walk_init);
  770. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  771. {
  772. if (list_empty(&walk->walk.all))
  773. return;
  774. write_lock_bh(&xfrm_policy_lock);
  775. list_del(&walk->walk.all);
  776. write_unlock_bh(&xfrm_policy_lock);
  777. }
  778. EXPORT_SYMBOL(xfrm_policy_walk_done);
  779. /*
  780. * Find policy to apply to this flow.
  781. *
  782. * Returns 0 if policy found, else an -errno.
  783. */
  784. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  785. u8 type, u16 family, int dir)
  786. {
  787. struct xfrm_selector *sel = &pol->selector;
  788. int match, ret = -ESRCH;
  789. if (pol->family != family ||
  790. pol->type != type)
  791. return ret;
  792. match = xfrm_selector_match(sel, fl, family);
  793. if (match)
  794. ret = security_xfrm_policy_lookup(pol->security, fl->secid,
  795. dir);
  796. return ret;
  797. }
  798. static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
  799. u16 family, u8 dir)
  800. {
  801. int err;
  802. struct xfrm_policy *pol, *ret;
  803. xfrm_address_t *daddr, *saddr;
  804. struct hlist_node *entry;
  805. struct hlist_head *chain;
  806. u32 priority = ~0U;
  807. daddr = xfrm_flowi_daddr(fl, family);
  808. saddr = xfrm_flowi_saddr(fl, family);
  809. if (unlikely(!daddr || !saddr))
  810. return NULL;
  811. read_lock_bh(&xfrm_policy_lock);
  812. chain = policy_hash_direct(daddr, saddr, family, dir);
  813. ret = NULL;
  814. hlist_for_each_entry(pol, entry, chain, bydst) {
  815. err = xfrm_policy_match(pol, fl, type, family, dir);
  816. if (err) {
  817. if (err == -ESRCH)
  818. continue;
  819. else {
  820. ret = ERR_PTR(err);
  821. goto fail;
  822. }
  823. } else {
  824. ret = pol;
  825. priority = ret->priority;
  826. break;
  827. }
  828. }
  829. chain = &init_net.xfrm.policy_inexact[dir];
  830. hlist_for_each_entry(pol, entry, chain, bydst) {
  831. err = xfrm_policy_match(pol, fl, type, family, dir);
  832. if (err) {
  833. if (err == -ESRCH)
  834. continue;
  835. else {
  836. ret = ERR_PTR(err);
  837. goto fail;
  838. }
  839. } else if (pol->priority < priority) {
  840. ret = pol;
  841. break;
  842. }
  843. }
  844. if (ret)
  845. xfrm_pol_hold(ret);
  846. fail:
  847. read_unlock_bh(&xfrm_policy_lock);
  848. return ret;
  849. }
  850. static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
  851. void **objp, atomic_t **obj_refp)
  852. {
  853. struct xfrm_policy *pol;
  854. int err = 0;
  855. #ifdef CONFIG_XFRM_SUB_POLICY
  856. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
  857. if (IS_ERR(pol)) {
  858. err = PTR_ERR(pol);
  859. pol = NULL;
  860. }
  861. if (pol || err)
  862. goto end;
  863. #endif
  864. pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  865. if (IS_ERR(pol)) {
  866. err = PTR_ERR(pol);
  867. pol = NULL;
  868. }
  869. #ifdef CONFIG_XFRM_SUB_POLICY
  870. end:
  871. #endif
  872. if ((*objp = (void *) pol) != NULL)
  873. *obj_refp = &pol->refcnt;
  874. return err;
  875. }
  876. static inline int policy_to_flow_dir(int dir)
  877. {
  878. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  879. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  880. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  881. return dir;
  882. switch (dir) {
  883. default:
  884. case XFRM_POLICY_IN:
  885. return FLOW_DIR_IN;
  886. case XFRM_POLICY_OUT:
  887. return FLOW_DIR_OUT;
  888. case XFRM_POLICY_FWD:
  889. return FLOW_DIR_FWD;
  890. }
  891. }
  892. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  893. {
  894. struct xfrm_policy *pol;
  895. read_lock_bh(&xfrm_policy_lock);
  896. if ((pol = sk->sk_policy[dir]) != NULL) {
  897. int match = xfrm_selector_match(&pol->selector, fl,
  898. sk->sk_family);
  899. int err = 0;
  900. if (match) {
  901. err = security_xfrm_policy_lookup(pol->security,
  902. fl->secid,
  903. policy_to_flow_dir(dir));
  904. if (!err)
  905. xfrm_pol_hold(pol);
  906. else if (err == -ESRCH)
  907. pol = NULL;
  908. else
  909. pol = ERR_PTR(err);
  910. } else
  911. pol = NULL;
  912. }
  913. read_unlock_bh(&xfrm_policy_lock);
  914. return pol;
  915. }
  916. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  917. {
  918. struct hlist_head *chain = policy_hash_bysel(&pol->selector,
  919. pol->family, dir);
  920. list_add(&pol->walk.all, &init_net.xfrm.policy_all);
  921. hlist_add_head(&pol->bydst, chain);
  922. hlist_add_head(&pol->byidx, init_net.xfrm.policy_byidx+idx_hash(pol->index));
  923. init_net.xfrm.policy_count[dir]++;
  924. xfrm_pol_hold(pol);
  925. if (xfrm_bydst_should_resize(&init_net, dir, NULL))
  926. schedule_work(&init_net.xfrm.policy_hash_work);
  927. }
  928. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  929. int dir)
  930. {
  931. if (hlist_unhashed(&pol->bydst))
  932. return NULL;
  933. hlist_del(&pol->bydst);
  934. hlist_del(&pol->byidx);
  935. list_del(&pol->walk.all);
  936. init_net.xfrm.policy_count[dir]--;
  937. return pol;
  938. }
  939. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  940. {
  941. write_lock_bh(&xfrm_policy_lock);
  942. pol = __xfrm_policy_unlink(pol, dir);
  943. write_unlock_bh(&xfrm_policy_lock);
  944. if (pol) {
  945. if (dir < XFRM_POLICY_MAX)
  946. atomic_inc(&flow_cache_genid);
  947. xfrm_policy_kill(pol);
  948. return 0;
  949. }
  950. return -ENOENT;
  951. }
  952. EXPORT_SYMBOL(xfrm_policy_delete);
  953. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  954. {
  955. struct xfrm_policy *old_pol;
  956. #ifdef CONFIG_XFRM_SUB_POLICY
  957. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  958. return -EINVAL;
  959. #endif
  960. write_lock_bh(&xfrm_policy_lock);
  961. old_pol = sk->sk_policy[dir];
  962. sk->sk_policy[dir] = pol;
  963. if (pol) {
  964. pol->curlft.add_time = get_seconds();
  965. pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
  966. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  967. }
  968. if (old_pol)
  969. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  970. write_unlock_bh(&xfrm_policy_lock);
  971. if (old_pol) {
  972. xfrm_policy_kill(old_pol);
  973. }
  974. return 0;
  975. }
  976. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  977. {
  978. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  979. if (newp) {
  980. newp->selector = old->selector;
  981. if (security_xfrm_policy_clone(old->security,
  982. &newp->security)) {
  983. kfree(newp);
  984. return NULL; /* ENOMEM */
  985. }
  986. newp->lft = old->lft;
  987. newp->curlft = old->curlft;
  988. newp->action = old->action;
  989. newp->flags = old->flags;
  990. newp->xfrm_nr = old->xfrm_nr;
  991. newp->index = old->index;
  992. newp->type = old->type;
  993. memcpy(newp->xfrm_vec, old->xfrm_vec,
  994. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  995. write_lock_bh(&xfrm_policy_lock);
  996. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  997. write_unlock_bh(&xfrm_policy_lock);
  998. xfrm_pol_put(newp);
  999. }
  1000. return newp;
  1001. }
  1002. int __xfrm_sk_clone_policy(struct sock *sk)
  1003. {
  1004. struct xfrm_policy *p0 = sk->sk_policy[0],
  1005. *p1 = sk->sk_policy[1];
  1006. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  1007. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  1008. return -ENOMEM;
  1009. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1010. return -ENOMEM;
  1011. return 0;
  1012. }
  1013. static int
  1014. xfrm_get_saddr(xfrm_address_t *local, xfrm_address_t *remote,
  1015. unsigned short family)
  1016. {
  1017. int err;
  1018. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1019. if (unlikely(afinfo == NULL))
  1020. return -EINVAL;
  1021. err = afinfo->get_saddr(local, remote);
  1022. xfrm_policy_put_afinfo(afinfo);
  1023. return err;
  1024. }
  1025. /* Resolve list of templates for the flow, given policy. */
  1026. static int
  1027. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  1028. struct xfrm_state **xfrm,
  1029. unsigned short family)
  1030. {
  1031. int nx;
  1032. int i, error;
  1033. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1034. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1035. xfrm_address_t tmp;
  1036. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1037. struct xfrm_state *x;
  1038. xfrm_address_t *remote = daddr;
  1039. xfrm_address_t *local = saddr;
  1040. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1041. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1042. tmpl->mode == XFRM_MODE_BEET) {
  1043. remote = &tmpl->id.daddr;
  1044. local = &tmpl->saddr;
  1045. family = tmpl->encap_family;
  1046. if (xfrm_addr_any(local, family)) {
  1047. error = xfrm_get_saddr(&tmp, remote, family);
  1048. if (error)
  1049. goto fail;
  1050. local = &tmp;
  1051. }
  1052. }
  1053. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1054. if (x && x->km.state == XFRM_STATE_VALID) {
  1055. xfrm[nx++] = x;
  1056. daddr = remote;
  1057. saddr = local;
  1058. continue;
  1059. }
  1060. if (x) {
  1061. error = (x->km.state == XFRM_STATE_ERROR ?
  1062. -EINVAL : -EAGAIN);
  1063. xfrm_state_put(x);
  1064. }
  1065. else if (error == -ESRCH)
  1066. error = -EAGAIN;
  1067. if (!tmpl->optional)
  1068. goto fail;
  1069. }
  1070. return nx;
  1071. fail:
  1072. for (nx--; nx>=0; nx--)
  1073. xfrm_state_put(xfrm[nx]);
  1074. return error;
  1075. }
  1076. static int
  1077. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1078. struct xfrm_state **xfrm,
  1079. unsigned short family)
  1080. {
  1081. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1082. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1083. int cnx = 0;
  1084. int error;
  1085. int ret;
  1086. int i;
  1087. for (i = 0; i < npols; i++) {
  1088. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1089. error = -ENOBUFS;
  1090. goto fail;
  1091. }
  1092. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1093. if (ret < 0) {
  1094. error = ret;
  1095. goto fail;
  1096. } else
  1097. cnx += ret;
  1098. }
  1099. /* found states are sorted for outbound processing */
  1100. if (npols > 1)
  1101. xfrm_state_sort(xfrm, tpp, cnx, family);
  1102. return cnx;
  1103. fail:
  1104. for (cnx--; cnx>=0; cnx--)
  1105. xfrm_state_put(tpp[cnx]);
  1106. return error;
  1107. }
  1108. /* Check that the bundle accepts the flow and its components are
  1109. * still valid.
  1110. */
  1111. static struct dst_entry *
  1112. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1113. {
  1114. struct dst_entry *x;
  1115. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1116. if (unlikely(afinfo == NULL))
  1117. return ERR_PTR(-EINVAL);
  1118. x = afinfo->find_bundle(fl, policy);
  1119. xfrm_policy_put_afinfo(afinfo);
  1120. return x;
  1121. }
  1122. static inline int xfrm_get_tos(struct flowi *fl, int family)
  1123. {
  1124. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1125. int tos;
  1126. if (!afinfo)
  1127. return -EINVAL;
  1128. tos = afinfo->get_tos(fl);
  1129. xfrm_policy_put_afinfo(afinfo);
  1130. return tos;
  1131. }
  1132. static inline struct xfrm_dst *xfrm_alloc_dst(int family)
  1133. {
  1134. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1135. struct xfrm_dst *xdst;
  1136. if (!afinfo)
  1137. return ERR_PTR(-EINVAL);
  1138. xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
  1139. xfrm_policy_put_afinfo(afinfo);
  1140. return xdst;
  1141. }
  1142. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1143. int nfheader_len)
  1144. {
  1145. struct xfrm_policy_afinfo *afinfo =
  1146. xfrm_policy_get_afinfo(dst->ops->family);
  1147. int err;
  1148. if (!afinfo)
  1149. return -EINVAL;
  1150. err = afinfo->init_path(path, dst, nfheader_len);
  1151. xfrm_policy_put_afinfo(afinfo);
  1152. return err;
  1153. }
  1154. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
  1155. {
  1156. struct xfrm_policy_afinfo *afinfo =
  1157. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1158. int err;
  1159. if (!afinfo)
  1160. return -EINVAL;
  1161. err = afinfo->fill_dst(xdst, dev);
  1162. xfrm_policy_put_afinfo(afinfo);
  1163. return err;
  1164. }
  1165. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1166. * all the metrics... Shortly, bundle a bundle.
  1167. */
  1168. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1169. struct xfrm_state **xfrm, int nx,
  1170. struct flowi *fl,
  1171. struct dst_entry *dst)
  1172. {
  1173. unsigned long now = jiffies;
  1174. struct net_device *dev;
  1175. struct dst_entry *dst_prev = NULL;
  1176. struct dst_entry *dst0 = NULL;
  1177. int i = 0;
  1178. int err;
  1179. int header_len = 0;
  1180. int nfheader_len = 0;
  1181. int trailer_len = 0;
  1182. int tos;
  1183. int family = policy->selector.family;
  1184. xfrm_address_t saddr, daddr;
  1185. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1186. tos = xfrm_get_tos(fl, family);
  1187. err = tos;
  1188. if (tos < 0)
  1189. goto put_states;
  1190. dst_hold(dst);
  1191. for (; i < nx; i++) {
  1192. struct xfrm_dst *xdst = xfrm_alloc_dst(family);
  1193. struct dst_entry *dst1 = &xdst->u.dst;
  1194. err = PTR_ERR(xdst);
  1195. if (IS_ERR(xdst)) {
  1196. dst_release(dst);
  1197. goto put_states;
  1198. }
  1199. if (!dst_prev)
  1200. dst0 = dst1;
  1201. else {
  1202. dst_prev->child = dst_clone(dst1);
  1203. dst1->flags |= DST_NOHASH;
  1204. }
  1205. xdst->route = dst;
  1206. memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
  1207. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1208. family = xfrm[i]->props.family;
  1209. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1210. family);
  1211. err = PTR_ERR(dst);
  1212. if (IS_ERR(dst))
  1213. goto put_states;
  1214. } else
  1215. dst_hold(dst);
  1216. dst1->xfrm = xfrm[i];
  1217. xdst->genid = xfrm[i]->genid;
  1218. dst1->obsolete = -1;
  1219. dst1->flags |= DST_HOST;
  1220. dst1->lastuse = now;
  1221. dst1->input = dst_discard;
  1222. dst1->output = xfrm[i]->outer_mode->afinfo->output;
  1223. dst1->next = dst_prev;
  1224. dst_prev = dst1;
  1225. header_len += xfrm[i]->props.header_len;
  1226. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1227. nfheader_len += xfrm[i]->props.header_len;
  1228. trailer_len += xfrm[i]->props.trailer_len;
  1229. }
  1230. dst_prev->child = dst;
  1231. dst0->path = dst;
  1232. err = -ENODEV;
  1233. dev = dst->dev;
  1234. if (!dev)
  1235. goto free_dst;
  1236. /* Copy neighbout for reachability confirmation */
  1237. dst0->neighbour = neigh_clone(dst->neighbour);
  1238. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1239. xfrm_init_pmtu(dst_prev);
  1240. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1241. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1242. err = xfrm_fill_dst(xdst, dev);
  1243. if (err)
  1244. goto free_dst;
  1245. dst_prev->header_len = header_len;
  1246. dst_prev->trailer_len = trailer_len;
  1247. header_len -= xdst->u.dst.xfrm->props.header_len;
  1248. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1249. }
  1250. out:
  1251. return dst0;
  1252. put_states:
  1253. for (; i < nx; i++)
  1254. xfrm_state_put(xfrm[i]);
  1255. free_dst:
  1256. if (dst0)
  1257. dst_free(dst0);
  1258. dst0 = ERR_PTR(err);
  1259. goto out;
  1260. }
  1261. static int inline
  1262. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1263. {
  1264. if (!*target) {
  1265. *target = kmalloc(size, GFP_ATOMIC);
  1266. if (!*target)
  1267. return -ENOMEM;
  1268. }
  1269. memcpy(*target, src, size);
  1270. return 0;
  1271. }
  1272. static int inline
  1273. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1274. {
  1275. #ifdef CONFIG_XFRM_SUB_POLICY
  1276. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1277. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1278. sel, sizeof(*sel));
  1279. #else
  1280. return 0;
  1281. #endif
  1282. }
  1283. static int inline
  1284. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1285. {
  1286. #ifdef CONFIG_XFRM_SUB_POLICY
  1287. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1288. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1289. #else
  1290. return 0;
  1291. #endif
  1292. }
  1293. static int stale_bundle(struct dst_entry *dst);
  1294. /* Main function: finds/creates a bundle for given flow.
  1295. *
  1296. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1297. * on interfaces with disabled IPsec.
  1298. */
  1299. int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1300. struct sock *sk, int flags)
  1301. {
  1302. struct xfrm_policy *policy;
  1303. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1304. int npols;
  1305. int pol_dead;
  1306. int xfrm_nr;
  1307. int pi;
  1308. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1309. struct dst_entry *dst, *dst_orig = *dst_p;
  1310. int nx = 0;
  1311. int err;
  1312. u32 genid;
  1313. u16 family;
  1314. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1315. restart:
  1316. genid = atomic_read(&flow_cache_genid);
  1317. policy = NULL;
  1318. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1319. pols[pi] = NULL;
  1320. npols = 0;
  1321. pol_dead = 0;
  1322. xfrm_nr = 0;
  1323. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1324. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1325. err = PTR_ERR(policy);
  1326. if (IS_ERR(policy)) {
  1327. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1328. goto dropdst;
  1329. }
  1330. }
  1331. if (!policy) {
  1332. /* To accelerate a bit... */
  1333. if ((dst_orig->flags & DST_NOXFRM) ||
  1334. !init_net.xfrm.policy_count[XFRM_POLICY_OUT])
  1335. goto nopol;
  1336. policy = flow_cache_lookup(fl, dst_orig->ops->family,
  1337. dir, xfrm_policy_lookup);
  1338. err = PTR_ERR(policy);
  1339. if (IS_ERR(policy)) {
  1340. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1341. goto dropdst;
  1342. }
  1343. }
  1344. if (!policy)
  1345. goto nopol;
  1346. family = dst_orig->ops->family;
  1347. pols[0] = policy;
  1348. npols ++;
  1349. xfrm_nr += pols[0]->xfrm_nr;
  1350. err = -ENOENT;
  1351. if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
  1352. goto error;
  1353. policy->curlft.use_time = get_seconds();
  1354. switch (policy->action) {
  1355. default:
  1356. case XFRM_POLICY_BLOCK:
  1357. /* Prohibit the flow */
  1358. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
  1359. err = -EPERM;
  1360. goto error;
  1361. case XFRM_POLICY_ALLOW:
  1362. #ifndef CONFIG_XFRM_SUB_POLICY
  1363. if (policy->xfrm_nr == 0) {
  1364. /* Flow passes not transformed. */
  1365. xfrm_pol_put(policy);
  1366. return 0;
  1367. }
  1368. #endif
  1369. /* Try to find matching bundle.
  1370. *
  1371. * LATER: help from flow cache. It is optional, this
  1372. * is required only for output policy.
  1373. */
  1374. dst = xfrm_find_bundle(fl, policy, family);
  1375. if (IS_ERR(dst)) {
  1376. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1377. err = PTR_ERR(dst);
  1378. goto error;
  1379. }
  1380. if (dst)
  1381. break;
  1382. #ifdef CONFIG_XFRM_SUB_POLICY
  1383. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1384. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1385. fl, family,
  1386. XFRM_POLICY_OUT);
  1387. if (pols[1]) {
  1388. if (IS_ERR(pols[1])) {
  1389. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLERROR);
  1390. err = PTR_ERR(pols[1]);
  1391. goto error;
  1392. }
  1393. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1394. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLBLOCK);
  1395. err = -EPERM;
  1396. goto error;
  1397. }
  1398. npols ++;
  1399. xfrm_nr += pols[1]->xfrm_nr;
  1400. }
  1401. }
  1402. /*
  1403. * Because neither flowi nor bundle information knows about
  1404. * transformation template size. On more than one policy usage
  1405. * we can realize whether all of them is bypass or not after
  1406. * they are searched. See above not-transformed bypass
  1407. * is surrounded by non-sub policy configuration, too.
  1408. */
  1409. if (xfrm_nr == 0) {
  1410. /* Flow passes not transformed. */
  1411. xfrm_pols_put(pols, npols);
  1412. return 0;
  1413. }
  1414. #endif
  1415. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1416. if (unlikely(nx<0)) {
  1417. err = nx;
  1418. if (err == -EAGAIN && sysctl_xfrm_larval_drop) {
  1419. /* EREMOTE tells the caller to generate
  1420. * a one-shot blackhole route.
  1421. */
  1422. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1423. xfrm_pol_put(policy);
  1424. return -EREMOTE;
  1425. }
  1426. if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
  1427. DECLARE_WAITQUEUE(wait, current);
  1428. add_wait_queue(&init_net.xfrm.km_waitq, &wait);
  1429. set_current_state(TASK_INTERRUPTIBLE);
  1430. schedule();
  1431. set_current_state(TASK_RUNNING);
  1432. remove_wait_queue(&init_net.xfrm.km_waitq, &wait);
  1433. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1434. if (nx == -EAGAIN && signal_pending(current)) {
  1435. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1436. err = -ERESTART;
  1437. goto error;
  1438. }
  1439. if (nx == -EAGAIN ||
  1440. genid != atomic_read(&flow_cache_genid)) {
  1441. xfrm_pols_put(pols, npols);
  1442. goto restart;
  1443. }
  1444. err = nx;
  1445. }
  1446. if (err < 0) {
  1447. XFRM_INC_STATS(LINUX_MIB_XFRMOUTNOSTATES);
  1448. goto error;
  1449. }
  1450. }
  1451. if (nx == 0) {
  1452. /* Flow passes not transformed. */
  1453. xfrm_pols_put(pols, npols);
  1454. return 0;
  1455. }
  1456. dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
  1457. err = PTR_ERR(dst);
  1458. if (IS_ERR(dst)) {
  1459. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1460. goto error;
  1461. }
  1462. for (pi = 0; pi < npols; pi++) {
  1463. read_lock_bh(&pols[pi]->lock);
  1464. pol_dead |= pols[pi]->walk.dead;
  1465. read_unlock_bh(&pols[pi]->lock);
  1466. }
  1467. write_lock_bh(&policy->lock);
  1468. if (unlikely(pol_dead || stale_bundle(dst))) {
  1469. /* Wow! While we worked on resolving, this
  1470. * policy has gone. Retry. It is not paranoia,
  1471. * we just cannot enlist new bundle to dead object.
  1472. * We can't enlist stable bundles either.
  1473. */
  1474. write_unlock_bh(&policy->lock);
  1475. dst_free(dst);
  1476. if (pol_dead)
  1477. XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD);
  1478. else
  1479. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1480. err = -EHOSTUNREACH;
  1481. goto error;
  1482. }
  1483. if (npols > 1)
  1484. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1485. else
  1486. err = xfrm_dst_update_origin(dst, fl);
  1487. if (unlikely(err)) {
  1488. write_unlock_bh(&policy->lock);
  1489. dst_free(dst);
  1490. XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1491. goto error;
  1492. }
  1493. dst->next = policy->bundles;
  1494. policy->bundles = dst;
  1495. dst_hold(dst);
  1496. write_unlock_bh(&policy->lock);
  1497. }
  1498. *dst_p = dst;
  1499. dst_release(dst_orig);
  1500. xfrm_pols_put(pols, npols);
  1501. return 0;
  1502. error:
  1503. xfrm_pols_put(pols, npols);
  1504. dropdst:
  1505. dst_release(dst_orig);
  1506. *dst_p = NULL;
  1507. return err;
  1508. nopol:
  1509. err = -ENOENT;
  1510. if (flags & XFRM_LOOKUP_ICMP)
  1511. goto dropdst;
  1512. return 0;
  1513. }
  1514. EXPORT_SYMBOL(__xfrm_lookup);
  1515. int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
  1516. struct sock *sk, int flags)
  1517. {
  1518. int err = __xfrm_lookup(dst_p, fl, sk, flags);
  1519. if (err == -EREMOTE) {
  1520. dst_release(*dst_p);
  1521. *dst_p = NULL;
  1522. err = -EAGAIN;
  1523. }
  1524. return err;
  1525. }
  1526. EXPORT_SYMBOL(xfrm_lookup);
  1527. static inline int
  1528. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1529. {
  1530. struct xfrm_state *x;
  1531. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1532. return 0;
  1533. x = skb->sp->xvec[idx];
  1534. if (!x->type->reject)
  1535. return 0;
  1536. return x->type->reject(x, skb, fl);
  1537. }
  1538. /* When skb is transformed back to its "native" form, we have to
  1539. * check policy restrictions. At the moment we make this in maximally
  1540. * stupid way. Shame on me. :-) Of course, connected sockets must
  1541. * have policy cached at them.
  1542. */
  1543. static inline int
  1544. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1545. unsigned short family)
  1546. {
  1547. if (xfrm_state_kern(x))
  1548. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1549. return x->id.proto == tmpl->id.proto &&
  1550. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1551. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1552. x->props.mode == tmpl->mode &&
  1553. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1554. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1555. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1556. xfrm_state_addr_cmp(tmpl, x, family));
  1557. }
  1558. /*
  1559. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1560. * because of optional transport mode, or next index of the mathced secpath
  1561. * state with the template.
  1562. * -1 is returned when no matching template is found.
  1563. * Otherwise "-2 - errored_index" is returned.
  1564. */
  1565. static inline int
  1566. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1567. unsigned short family)
  1568. {
  1569. int idx = start;
  1570. if (tmpl->optional) {
  1571. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1572. return start;
  1573. } else
  1574. start = -1;
  1575. for (; idx < sp->len; idx++) {
  1576. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1577. return ++idx;
  1578. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1579. if (start == -1)
  1580. start = -2-idx;
  1581. break;
  1582. }
  1583. }
  1584. return start;
  1585. }
  1586. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1587. unsigned int family, int reverse)
  1588. {
  1589. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1590. int err;
  1591. if (unlikely(afinfo == NULL))
  1592. return -EAFNOSUPPORT;
  1593. afinfo->decode_session(skb, fl, reverse);
  1594. err = security_xfrm_decode_session(skb, &fl->secid);
  1595. xfrm_policy_put_afinfo(afinfo);
  1596. return err;
  1597. }
  1598. EXPORT_SYMBOL(__xfrm_decode_session);
  1599. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1600. {
  1601. for (; k < sp->len; k++) {
  1602. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1603. *idxp = k;
  1604. return 1;
  1605. }
  1606. }
  1607. return 0;
  1608. }
  1609. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1610. unsigned short family)
  1611. {
  1612. struct xfrm_policy *pol;
  1613. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1614. int npols = 0;
  1615. int xfrm_nr;
  1616. int pi;
  1617. int reverse;
  1618. struct flowi fl;
  1619. u8 fl_dir;
  1620. int xerr_idx = -1;
  1621. reverse = dir & ~XFRM_POLICY_MASK;
  1622. dir &= XFRM_POLICY_MASK;
  1623. fl_dir = policy_to_flow_dir(dir);
  1624. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1625. XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
  1626. return 0;
  1627. }
  1628. nf_nat_decode_session(skb, &fl, family);
  1629. /* First, check used SA against their selectors. */
  1630. if (skb->sp) {
  1631. int i;
  1632. for (i=skb->sp->len-1; i>=0; i--) {
  1633. struct xfrm_state *x = skb->sp->xvec[i];
  1634. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1635. XFRM_INC_STATS(LINUX_MIB_XFRMINSTATEMISMATCH);
  1636. return 0;
  1637. }
  1638. }
  1639. }
  1640. pol = NULL;
  1641. if (sk && sk->sk_policy[dir]) {
  1642. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1643. if (IS_ERR(pol)) {
  1644. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1645. return 0;
  1646. }
  1647. }
  1648. if (!pol)
  1649. pol = flow_cache_lookup(&fl, family, fl_dir,
  1650. xfrm_policy_lookup);
  1651. if (IS_ERR(pol)) {
  1652. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1653. return 0;
  1654. }
  1655. if (!pol) {
  1656. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1657. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1658. XFRM_INC_STATS(LINUX_MIB_XFRMINNOPOLS);
  1659. return 0;
  1660. }
  1661. return 1;
  1662. }
  1663. pol->curlft.use_time = get_seconds();
  1664. pols[0] = pol;
  1665. npols ++;
  1666. #ifdef CONFIG_XFRM_SUB_POLICY
  1667. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1668. pols[1] = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN,
  1669. &fl, family,
  1670. XFRM_POLICY_IN);
  1671. if (pols[1]) {
  1672. if (IS_ERR(pols[1])) {
  1673. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLERROR);
  1674. return 0;
  1675. }
  1676. pols[1]->curlft.use_time = get_seconds();
  1677. npols ++;
  1678. }
  1679. }
  1680. #endif
  1681. if (pol->action == XFRM_POLICY_ALLOW) {
  1682. struct sec_path *sp;
  1683. static struct sec_path dummy;
  1684. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1685. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1686. struct xfrm_tmpl **tpp = tp;
  1687. int ti = 0;
  1688. int i, k;
  1689. if ((sp = skb->sp) == NULL)
  1690. sp = &dummy;
  1691. for (pi = 0; pi < npols; pi++) {
  1692. if (pols[pi] != pol &&
  1693. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1694. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
  1695. goto reject;
  1696. }
  1697. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1698. XFRM_INC_STATS(LINUX_MIB_XFRMINBUFFERERROR);
  1699. goto reject_error;
  1700. }
  1701. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1702. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1703. }
  1704. xfrm_nr = ti;
  1705. if (npols > 1) {
  1706. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1707. tpp = stp;
  1708. }
  1709. /* For each tunnel xfrm, find the first matching tmpl.
  1710. * For each tmpl before that, find corresponding xfrm.
  1711. * Order is _important_. Later we will implement
  1712. * some barriers, but at the moment barriers
  1713. * are implied between each two transformations.
  1714. */
  1715. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1716. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1717. if (k < 0) {
  1718. if (k < -1)
  1719. /* "-2 - errored_index" returned */
  1720. xerr_idx = -(2+k);
  1721. XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
  1722. goto reject;
  1723. }
  1724. }
  1725. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1726. XFRM_INC_STATS(LINUX_MIB_XFRMINTMPLMISMATCH);
  1727. goto reject;
  1728. }
  1729. xfrm_pols_put(pols, npols);
  1730. return 1;
  1731. }
  1732. XFRM_INC_STATS(LINUX_MIB_XFRMINPOLBLOCK);
  1733. reject:
  1734. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1735. reject_error:
  1736. xfrm_pols_put(pols, npols);
  1737. return 0;
  1738. }
  1739. EXPORT_SYMBOL(__xfrm_policy_check);
  1740. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1741. {
  1742. struct flowi fl;
  1743. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1744. /* XXX: we should have something like FWDHDRERROR here. */
  1745. XFRM_INC_STATS(LINUX_MIB_XFRMINHDRERROR);
  1746. return 0;
  1747. }
  1748. return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
  1749. }
  1750. EXPORT_SYMBOL(__xfrm_route_forward);
  1751. /* Optimize later using cookies and generation ids. */
  1752. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1753. {
  1754. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1755. * to "-1" to force all XFRM destinations to get validated by
  1756. * dst_ops->check on every use. We do this because when a
  1757. * normal route referenced by an XFRM dst is obsoleted we do
  1758. * not go looking around for all parent referencing XFRM dsts
  1759. * so that we can invalidate them. It is just too much work.
  1760. * Instead we make the checks here on every use. For example:
  1761. *
  1762. * XFRM dst A --> IPv4 dst X
  1763. *
  1764. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1765. * in this example). If X is marked obsolete, "A" will not
  1766. * notice. That's what we are validating here via the
  1767. * stale_bundle() check.
  1768. *
  1769. * When a policy's bundle is pruned, we dst_free() the XFRM
  1770. * dst which causes it's ->obsolete field to be set to a
  1771. * positive non-zero integer. If an XFRM dst has been pruned
  1772. * like this, we want to force a new route lookup.
  1773. */
  1774. if (dst->obsolete < 0 && !stale_bundle(dst))
  1775. return dst;
  1776. return NULL;
  1777. }
  1778. static int stale_bundle(struct dst_entry *dst)
  1779. {
  1780. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1781. }
  1782. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1783. {
  1784. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1785. dst->dev = dev_net(dev)->loopback_dev;
  1786. dev_hold(dst->dev);
  1787. dev_put(dev);
  1788. }
  1789. }
  1790. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1791. static void xfrm_link_failure(struct sk_buff *skb)
  1792. {
  1793. /* Impossible. Such dst must be popped before reaches point of failure. */
  1794. return;
  1795. }
  1796. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1797. {
  1798. if (dst) {
  1799. if (dst->obsolete) {
  1800. dst_release(dst);
  1801. dst = NULL;
  1802. }
  1803. }
  1804. return dst;
  1805. }
  1806. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1807. {
  1808. struct dst_entry *dst, **dstp;
  1809. write_lock(&pol->lock);
  1810. dstp = &pol->bundles;
  1811. while ((dst=*dstp) != NULL) {
  1812. if (func(dst)) {
  1813. *dstp = dst->next;
  1814. dst->next = *gc_list_p;
  1815. *gc_list_p = dst;
  1816. } else {
  1817. dstp = &dst->next;
  1818. }
  1819. }
  1820. write_unlock(&pol->lock);
  1821. }
  1822. static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
  1823. {
  1824. struct dst_entry *gc_list = NULL;
  1825. int dir;
  1826. read_lock_bh(&xfrm_policy_lock);
  1827. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1828. struct xfrm_policy *pol;
  1829. struct hlist_node *entry;
  1830. struct hlist_head *table;
  1831. int i;
  1832. hlist_for_each_entry(pol, entry,
  1833. &init_net.xfrm.policy_inexact[dir], bydst)
  1834. prune_one_bundle(pol, func, &gc_list);
  1835. table = init_net.xfrm.policy_bydst[dir].table;
  1836. for (i = init_net.xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  1837. hlist_for_each_entry(pol, entry, table + i, bydst)
  1838. prune_one_bundle(pol, func, &gc_list);
  1839. }
  1840. }
  1841. read_unlock_bh(&xfrm_policy_lock);
  1842. while (gc_list) {
  1843. struct dst_entry *dst = gc_list;
  1844. gc_list = dst->next;
  1845. dst_free(dst);
  1846. }
  1847. }
  1848. static int unused_bundle(struct dst_entry *dst)
  1849. {
  1850. return !atomic_read(&dst->__refcnt);
  1851. }
  1852. static void __xfrm_garbage_collect(void)
  1853. {
  1854. xfrm_prune_bundles(unused_bundle);
  1855. }
  1856. static int xfrm_flush_bundles(void)
  1857. {
  1858. xfrm_prune_bundles(stale_bundle);
  1859. return 0;
  1860. }
  1861. static void xfrm_init_pmtu(struct dst_entry *dst)
  1862. {
  1863. do {
  1864. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1865. u32 pmtu, route_mtu_cached;
  1866. pmtu = dst_mtu(dst->child);
  1867. xdst->child_mtu_cached = pmtu;
  1868. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1869. route_mtu_cached = dst_mtu(xdst->route);
  1870. xdst->route_mtu_cached = route_mtu_cached;
  1871. if (pmtu > route_mtu_cached)
  1872. pmtu = route_mtu_cached;
  1873. dst->metrics[RTAX_MTU-1] = pmtu;
  1874. } while ((dst = dst->next));
  1875. }
  1876. /* Check that the bundle accepts the flow and its components are
  1877. * still valid.
  1878. */
  1879. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1880. struct flowi *fl, int family, int strict)
  1881. {
  1882. struct dst_entry *dst = &first->u.dst;
  1883. struct xfrm_dst *last;
  1884. u32 mtu;
  1885. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1886. (dst->dev && !netif_running(dst->dev)))
  1887. return 0;
  1888. #ifdef CONFIG_XFRM_SUB_POLICY
  1889. if (fl) {
  1890. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1891. return 0;
  1892. if (first->partner &&
  1893. !xfrm_selector_match(first->partner, fl, family))
  1894. return 0;
  1895. }
  1896. #endif
  1897. last = NULL;
  1898. do {
  1899. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1900. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1901. return 0;
  1902. if (fl && pol &&
  1903. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1904. return 0;
  1905. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1906. return 0;
  1907. if (xdst->genid != dst->xfrm->genid)
  1908. return 0;
  1909. if (strict && fl &&
  1910. !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
  1911. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1912. return 0;
  1913. mtu = dst_mtu(dst->child);
  1914. if (xdst->child_mtu_cached != mtu) {
  1915. last = xdst;
  1916. xdst->child_mtu_cached = mtu;
  1917. }
  1918. if (!dst_check(xdst->route, xdst->route_cookie))
  1919. return 0;
  1920. mtu = dst_mtu(xdst->route);
  1921. if (xdst->route_mtu_cached != mtu) {
  1922. last = xdst;
  1923. xdst->route_mtu_cached = mtu;
  1924. }
  1925. dst = dst->child;
  1926. } while (dst->xfrm);
  1927. if (likely(!last))
  1928. return 1;
  1929. mtu = last->child_mtu_cached;
  1930. for (;;) {
  1931. dst = &last->u.dst;
  1932. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1933. if (mtu > last->route_mtu_cached)
  1934. mtu = last->route_mtu_cached;
  1935. dst->metrics[RTAX_MTU-1] = mtu;
  1936. if (last == first)
  1937. break;
  1938. last = (struct xfrm_dst *)last->u.dst.next;
  1939. last->child_mtu_cached = mtu;
  1940. }
  1941. return 1;
  1942. }
  1943. EXPORT_SYMBOL(xfrm_bundle_ok);
  1944. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1945. {
  1946. int err = 0;
  1947. if (unlikely(afinfo == NULL))
  1948. return -EINVAL;
  1949. if (unlikely(afinfo->family >= NPROTO))
  1950. return -EAFNOSUPPORT;
  1951. write_lock_bh(&xfrm_policy_afinfo_lock);
  1952. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1953. err = -ENOBUFS;
  1954. else {
  1955. struct dst_ops *dst_ops = afinfo->dst_ops;
  1956. if (likely(dst_ops->kmem_cachep == NULL))
  1957. dst_ops->kmem_cachep = xfrm_dst_cache;
  1958. if (likely(dst_ops->check == NULL))
  1959. dst_ops->check = xfrm_dst_check;
  1960. if (likely(dst_ops->negative_advice == NULL))
  1961. dst_ops->negative_advice = xfrm_negative_advice;
  1962. if (likely(dst_ops->link_failure == NULL))
  1963. dst_ops->link_failure = xfrm_link_failure;
  1964. if (likely(afinfo->garbage_collect == NULL))
  1965. afinfo->garbage_collect = __xfrm_garbage_collect;
  1966. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1967. }
  1968. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1969. return err;
  1970. }
  1971. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  1972. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  1973. {
  1974. int err = 0;
  1975. if (unlikely(afinfo == NULL))
  1976. return -EINVAL;
  1977. if (unlikely(afinfo->family >= NPROTO))
  1978. return -EAFNOSUPPORT;
  1979. write_lock_bh(&xfrm_policy_afinfo_lock);
  1980. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  1981. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  1982. err = -EINVAL;
  1983. else {
  1984. struct dst_ops *dst_ops = afinfo->dst_ops;
  1985. xfrm_policy_afinfo[afinfo->family] = NULL;
  1986. dst_ops->kmem_cachep = NULL;
  1987. dst_ops->check = NULL;
  1988. dst_ops->negative_advice = NULL;
  1989. dst_ops->link_failure = NULL;
  1990. afinfo->garbage_collect = NULL;
  1991. }
  1992. }
  1993. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1994. return err;
  1995. }
  1996. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  1997. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  1998. {
  1999. struct xfrm_policy_afinfo *afinfo;
  2000. if (unlikely(family >= NPROTO))
  2001. return NULL;
  2002. read_lock(&xfrm_policy_afinfo_lock);
  2003. afinfo = xfrm_policy_afinfo[family];
  2004. if (unlikely(!afinfo))
  2005. read_unlock(&xfrm_policy_afinfo_lock);
  2006. return afinfo;
  2007. }
  2008. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2009. {
  2010. read_unlock(&xfrm_policy_afinfo_lock);
  2011. }
  2012. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2013. {
  2014. struct net_device *dev = ptr;
  2015. if (!net_eq(dev_net(dev), &init_net))
  2016. return NOTIFY_DONE;
  2017. switch (event) {
  2018. case NETDEV_DOWN:
  2019. xfrm_flush_bundles();
  2020. }
  2021. return NOTIFY_DONE;
  2022. }
  2023. static struct notifier_block xfrm_dev_notifier = {
  2024. .notifier_call = xfrm_dev_event,
  2025. };
  2026. #ifdef CONFIG_XFRM_STATISTICS
  2027. static int __init xfrm_statistics_init(void)
  2028. {
  2029. if (snmp_mib_init((void **)xfrm_statistics,
  2030. sizeof(struct linux_xfrm_mib)) < 0)
  2031. return -ENOMEM;
  2032. return 0;
  2033. }
  2034. #endif
  2035. static int __net_init xfrm_policy_init(struct net *net)
  2036. {
  2037. unsigned int hmask, sz;
  2038. int dir;
  2039. if (net_eq(net, &init_net))
  2040. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2041. sizeof(struct xfrm_dst),
  2042. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2043. NULL);
  2044. hmask = 8 - 1;
  2045. sz = (hmask+1) * sizeof(struct hlist_head);
  2046. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2047. if (!net->xfrm.policy_byidx)
  2048. goto out_byidx;
  2049. net->xfrm.policy_idx_hmask = hmask;
  2050. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2051. struct xfrm_policy_hash *htab;
  2052. net->xfrm.policy_count[dir] = 0;
  2053. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2054. htab = &net->xfrm.policy_bydst[dir];
  2055. htab->table = xfrm_hash_alloc(sz);
  2056. if (!htab->table)
  2057. goto out_bydst;
  2058. htab->hmask = hmask;
  2059. }
  2060. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2061. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2062. if (net_eq(net, &init_net))
  2063. register_netdevice_notifier(&xfrm_dev_notifier);
  2064. return 0;
  2065. out_bydst:
  2066. for (dir--; dir >= 0; dir--) {
  2067. struct xfrm_policy_hash *htab;
  2068. htab = &net->xfrm.policy_bydst[dir];
  2069. xfrm_hash_free(htab->table, sz);
  2070. }
  2071. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2072. out_byidx:
  2073. return -ENOMEM;
  2074. }
  2075. static void xfrm_policy_fini(struct net *net)
  2076. {
  2077. unsigned int sz;
  2078. int dir;
  2079. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2080. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2081. struct xfrm_policy_hash *htab;
  2082. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2083. htab = &net->xfrm.policy_bydst[dir];
  2084. sz = (htab->hmask + 1);
  2085. WARN_ON(!hlist_empty(htab->table));
  2086. xfrm_hash_free(htab->table, sz);
  2087. }
  2088. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2089. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2090. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2091. }
  2092. static int __net_init xfrm_net_init(struct net *net)
  2093. {
  2094. int rv;
  2095. rv = xfrm_state_init(net);
  2096. if (rv < 0)
  2097. goto out_state;
  2098. rv = xfrm_policy_init(net);
  2099. if (rv < 0)
  2100. goto out_policy;
  2101. return 0;
  2102. out_policy:
  2103. xfrm_state_fini(net);
  2104. out_state:
  2105. return rv;
  2106. }
  2107. static void __net_exit xfrm_net_exit(struct net *net)
  2108. {
  2109. xfrm_policy_fini(net);
  2110. xfrm_state_fini(net);
  2111. }
  2112. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2113. .init = xfrm_net_init,
  2114. .exit = xfrm_net_exit,
  2115. };
  2116. void __init xfrm_init(void)
  2117. {
  2118. register_pernet_subsys(&xfrm_net_ops);
  2119. #ifdef CONFIG_XFRM_STATISTICS
  2120. xfrm_statistics_init();
  2121. #endif
  2122. xfrm_input_init();
  2123. #ifdef CONFIG_XFRM_STATISTICS
  2124. xfrm_proc_init();
  2125. #endif
  2126. }
  2127. #ifdef CONFIG_AUDITSYSCALL
  2128. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2129. struct audit_buffer *audit_buf)
  2130. {
  2131. struct xfrm_sec_ctx *ctx = xp->security;
  2132. struct xfrm_selector *sel = &xp->selector;
  2133. if (ctx)
  2134. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2135. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2136. switch(sel->family) {
  2137. case AF_INET:
  2138. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2139. if (sel->prefixlen_s != 32)
  2140. audit_log_format(audit_buf, " src_prefixlen=%d",
  2141. sel->prefixlen_s);
  2142. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2143. if (sel->prefixlen_d != 32)
  2144. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2145. sel->prefixlen_d);
  2146. break;
  2147. case AF_INET6:
  2148. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2149. if (sel->prefixlen_s != 128)
  2150. audit_log_format(audit_buf, " src_prefixlen=%d",
  2151. sel->prefixlen_s);
  2152. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2153. if (sel->prefixlen_d != 128)
  2154. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2155. sel->prefixlen_d);
  2156. break;
  2157. }
  2158. }
  2159. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2160. uid_t auid, u32 sessionid, u32 secid)
  2161. {
  2162. struct audit_buffer *audit_buf;
  2163. audit_buf = xfrm_audit_start("SPD-add");
  2164. if (audit_buf == NULL)
  2165. return;
  2166. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2167. audit_log_format(audit_buf, " res=%u", result);
  2168. xfrm_audit_common_policyinfo(xp, audit_buf);
  2169. audit_log_end(audit_buf);
  2170. }
  2171. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2172. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2173. uid_t auid, u32 sessionid, u32 secid)
  2174. {
  2175. struct audit_buffer *audit_buf;
  2176. audit_buf = xfrm_audit_start("SPD-delete");
  2177. if (audit_buf == NULL)
  2178. return;
  2179. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2180. audit_log_format(audit_buf, " res=%u", result);
  2181. xfrm_audit_common_policyinfo(xp, audit_buf);
  2182. audit_log_end(audit_buf);
  2183. }
  2184. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2185. #endif
  2186. #ifdef CONFIG_XFRM_MIGRATE
  2187. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2188. struct xfrm_selector *sel_tgt)
  2189. {
  2190. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2191. if (sel_tgt->family == sel_cmp->family &&
  2192. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2193. sel_cmp->family) == 0 &&
  2194. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2195. sel_cmp->family) == 0 &&
  2196. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2197. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2198. return 1;
  2199. }
  2200. } else {
  2201. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2202. return 1;
  2203. }
  2204. }
  2205. return 0;
  2206. }
  2207. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2208. u8 dir, u8 type)
  2209. {
  2210. struct xfrm_policy *pol, *ret = NULL;
  2211. struct hlist_node *entry;
  2212. struct hlist_head *chain;
  2213. u32 priority = ~0U;
  2214. read_lock_bh(&xfrm_policy_lock);
  2215. chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
  2216. hlist_for_each_entry(pol, entry, chain, bydst) {
  2217. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2218. pol->type == type) {
  2219. ret = pol;
  2220. priority = ret->priority;
  2221. break;
  2222. }
  2223. }
  2224. chain = &init_net.xfrm.policy_inexact[dir];
  2225. hlist_for_each_entry(pol, entry, chain, bydst) {
  2226. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2227. pol->type == type &&
  2228. pol->priority < priority) {
  2229. ret = pol;
  2230. break;
  2231. }
  2232. }
  2233. if (ret)
  2234. xfrm_pol_hold(ret);
  2235. read_unlock_bh(&xfrm_policy_lock);
  2236. return ret;
  2237. }
  2238. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2239. {
  2240. int match = 0;
  2241. if (t->mode == m->mode && t->id.proto == m->proto &&
  2242. (m->reqid == 0 || t->reqid == m->reqid)) {
  2243. switch (t->mode) {
  2244. case XFRM_MODE_TUNNEL:
  2245. case XFRM_MODE_BEET:
  2246. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2247. m->old_family) == 0 &&
  2248. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2249. m->old_family) == 0) {
  2250. match = 1;
  2251. }
  2252. break;
  2253. case XFRM_MODE_TRANSPORT:
  2254. /* in case of transport mode, template does not store
  2255. any IP addresses, hence we just compare mode and
  2256. protocol */
  2257. match = 1;
  2258. break;
  2259. default:
  2260. break;
  2261. }
  2262. }
  2263. return match;
  2264. }
  2265. /* update endpoint address(es) of template(s) */
  2266. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2267. struct xfrm_migrate *m, int num_migrate)
  2268. {
  2269. struct xfrm_migrate *mp;
  2270. struct dst_entry *dst;
  2271. int i, j, n = 0;
  2272. write_lock_bh(&pol->lock);
  2273. if (unlikely(pol->walk.dead)) {
  2274. /* target policy has been deleted */
  2275. write_unlock_bh(&pol->lock);
  2276. return -ENOENT;
  2277. }
  2278. for (i = 0; i < pol->xfrm_nr; i++) {
  2279. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2280. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2281. continue;
  2282. n++;
  2283. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2284. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2285. continue;
  2286. /* update endpoints */
  2287. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2288. sizeof(pol->xfrm_vec[i].id.daddr));
  2289. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2290. sizeof(pol->xfrm_vec[i].saddr));
  2291. pol->xfrm_vec[i].encap_family = mp->new_family;
  2292. /* flush bundles */
  2293. while ((dst = pol->bundles) != NULL) {
  2294. pol->bundles = dst->next;
  2295. dst_free(dst);
  2296. }
  2297. }
  2298. }
  2299. write_unlock_bh(&pol->lock);
  2300. if (!n)
  2301. return -ENODATA;
  2302. return 0;
  2303. }
  2304. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2305. {
  2306. int i, j;
  2307. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2308. return -EINVAL;
  2309. for (i = 0; i < num_migrate; i++) {
  2310. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2311. m[i].old_family) == 0) &&
  2312. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2313. m[i].old_family) == 0))
  2314. return -EINVAL;
  2315. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2316. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2317. return -EINVAL;
  2318. /* check if there is any duplicated entry */
  2319. for (j = i + 1; j < num_migrate; j++) {
  2320. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2321. sizeof(m[i].old_daddr)) &&
  2322. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2323. sizeof(m[i].old_saddr)) &&
  2324. m[i].proto == m[j].proto &&
  2325. m[i].mode == m[j].mode &&
  2326. m[i].reqid == m[j].reqid &&
  2327. m[i].old_family == m[j].old_family)
  2328. return -EINVAL;
  2329. }
  2330. }
  2331. return 0;
  2332. }
  2333. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2334. struct xfrm_migrate *m, int num_migrate,
  2335. struct xfrm_kmaddress *k)
  2336. {
  2337. int i, err, nx_cur = 0, nx_new = 0;
  2338. struct xfrm_policy *pol = NULL;
  2339. struct xfrm_state *x, *xc;
  2340. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2341. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2342. struct xfrm_migrate *mp;
  2343. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2344. goto out;
  2345. /* Stage 1 - find policy */
  2346. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2347. err = -ENOENT;
  2348. goto out;
  2349. }
  2350. /* Stage 2 - find and update state(s) */
  2351. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2352. if ((x = xfrm_migrate_state_find(mp))) {
  2353. x_cur[nx_cur] = x;
  2354. nx_cur++;
  2355. if ((xc = xfrm_state_migrate(x, mp))) {
  2356. x_new[nx_new] = xc;
  2357. nx_new++;
  2358. } else {
  2359. err = -ENODATA;
  2360. goto restore_state;
  2361. }
  2362. }
  2363. }
  2364. /* Stage 3 - update policy */
  2365. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2366. goto restore_state;
  2367. /* Stage 4 - delete old state(s) */
  2368. if (nx_cur) {
  2369. xfrm_states_put(x_cur, nx_cur);
  2370. xfrm_states_delete(x_cur, nx_cur);
  2371. }
  2372. /* Stage 5 - announce */
  2373. km_migrate(sel, dir, type, m, num_migrate, k);
  2374. xfrm_pol_put(pol);
  2375. return 0;
  2376. out:
  2377. return err;
  2378. restore_state:
  2379. if (pol)
  2380. xfrm_pol_put(pol);
  2381. if (nx_cur)
  2382. xfrm_states_put(x_cur, nx_cur);
  2383. if (nx_new)
  2384. xfrm_states_delete(x_new, nx_new);
  2385. return err;
  2386. }
  2387. EXPORT_SYMBOL(xfrm_migrate);
  2388. #endif