xfrm_policy.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/xfrm.h>
  29. #include <net/ip.h>
  30. #ifdef CONFIG_XFRM_STATISTICS
  31. #include <net/snmp.h>
  32. #endif
  33. #include "xfrm_hash.h"
  34. DEFINE_MUTEX(xfrm_cfg_mutex);
  35. EXPORT_SYMBOL(xfrm_cfg_mutex);
  36. static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
  37. static struct dst_entry *xfrm_policy_sk_bundles;
  38. static DEFINE_RWLOCK(xfrm_policy_lock);
  39. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  40. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  41. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  42. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  43. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  44. static void xfrm_init_pmtu(struct dst_entry *dst);
  45. static int stale_bundle(struct dst_entry *dst);
  46. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  47. int dir);
  48. static inline int
  49. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  50. {
  51. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  52. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  53. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  54. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  55. (fl->proto == sel->proto || !sel->proto) &&
  56. (fl->oif == sel->ifindex || !sel->ifindex);
  57. }
  58. static inline int
  59. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  60. {
  61. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  62. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  63. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  64. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  65. (fl->proto == sel->proto || !sel->proto) &&
  66. (fl->oif == sel->ifindex || !sel->ifindex);
  67. }
  68. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  69. unsigned short family)
  70. {
  71. switch (family) {
  72. case AF_INET:
  73. return __xfrm4_selector_match(sel, fl);
  74. case AF_INET6:
  75. return __xfrm6_selector_match(sel, fl);
  76. }
  77. return 0;
  78. }
  79. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  80. xfrm_address_t *saddr,
  81. xfrm_address_t *daddr,
  82. int family)
  83. {
  84. struct xfrm_policy_afinfo *afinfo;
  85. struct dst_entry *dst;
  86. afinfo = xfrm_policy_get_afinfo(family);
  87. if (unlikely(afinfo == NULL))
  88. return ERR_PTR(-EAFNOSUPPORT);
  89. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  90. xfrm_policy_put_afinfo(afinfo);
  91. return dst;
  92. }
  93. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  94. xfrm_address_t *prev_saddr,
  95. xfrm_address_t *prev_daddr,
  96. int family)
  97. {
  98. struct net *net = xs_net(x);
  99. xfrm_address_t *saddr = &x->props.saddr;
  100. xfrm_address_t *daddr = &x->id.daddr;
  101. struct dst_entry *dst;
  102. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  103. saddr = x->coaddr;
  104. daddr = prev_daddr;
  105. }
  106. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  107. saddr = prev_saddr;
  108. daddr = x->coaddr;
  109. }
  110. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  111. if (!IS_ERR(dst)) {
  112. if (prev_saddr != saddr)
  113. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  114. if (prev_daddr != daddr)
  115. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  116. }
  117. return dst;
  118. }
  119. static inline unsigned long make_jiffies(long secs)
  120. {
  121. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  122. return MAX_SCHEDULE_TIMEOUT-1;
  123. else
  124. return secs*HZ;
  125. }
  126. static void xfrm_policy_timer(unsigned long data)
  127. {
  128. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  129. unsigned long now = get_seconds();
  130. long next = LONG_MAX;
  131. int warn = 0;
  132. int dir;
  133. read_lock(&xp->lock);
  134. if (unlikely(xp->walk.dead))
  135. goto out;
  136. dir = xfrm_policy_id2dir(xp->index);
  137. if (xp->lft.hard_add_expires_seconds) {
  138. long tmo = xp->lft.hard_add_expires_seconds +
  139. xp->curlft.add_time - now;
  140. if (tmo <= 0)
  141. goto expired;
  142. if (tmo < next)
  143. next = tmo;
  144. }
  145. if (xp->lft.hard_use_expires_seconds) {
  146. long tmo = xp->lft.hard_use_expires_seconds +
  147. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  148. if (tmo <= 0)
  149. goto expired;
  150. if (tmo < next)
  151. next = tmo;
  152. }
  153. if (xp->lft.soft_add_expires_seconds) {
  154. long tmo = xp->lft.soft_add_expires_seconds +
  155. xp->curlft.add_time - now;
  156. if (tmo <= 0) {
  157. warn = 1;
  158. tmo = XFRM_KM_TIMEOUT;
  159. }
  160. if (tmo < next)
  161. next = tmo;
  162. }
  163. if (xp->lft.soft_use_expires_seconds) {
  164. long tmo = xp->lft.soft_use_expires_seconds +
  165. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  166. if (tmo <= 0) {
  167. warn = 1;
  168. tmo = XFRM_KM_TIMEOUT;
  169. }
  170. if (tmo < next)
  171. next = tmo;
  172. }
  173. if (warn)
  174. km_policy_expired(xp, dir, 0, 0);
  175. if (next != LONG_MAX &&
  176. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  177. xfrm_pol_hold(xp);
  178. out:
  179. read_unlock(&xp->lock);
  180. xfrm_pol_put(xp);
  181. return;
  182. expired:
  183. read_unlock(&xp->lock);
  184. if (!xfrm_policy_delete(xp, dir))
  185. km_policy_expired(xp, dir, 1, 0);
  186. xfrm_pol_put(xp);
  187. }
  188. static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
  189. {
  190. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  191. if (unlikely(pol->walk.dead))
  192. flo = NULL;
  193. else
  194. xfrm_pol_hold(pol);
  195. return flo;
  196. }
  197. static int xfrm_policy_flo_check(struct flow_cache_object *flo)
  198. {
  199. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  200. return !pol->walk.dead;
  201. }
  202. static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
  203. {
  204. xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
  205. }
  206. static const struct flow_cache_ops xfrm_policy_fc_ops = {
  207. .get = xfrm_policy_flo_get,
  208. .check = xfrm_policy_flo_check,
  209. .delete = xfrm_policy_flo_delete,
  210. };
  211. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  212. * SPD calls.
  213. */
  214. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  215. {
  216. struct xfrm_policy *policy;
  217. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  218. if (policy) {
  219. write_pnet(&policy->xp_net, net);
  220. INIT_LIST_HEAD(&policy->walk.all);
  221. INIT_HLIST_NODE(&policy->bydst);
  222. INIT_HLIST_NODE(&policy->byidx);
  223. rwlock_init(&policy->lock);
  224. atomic_set(&policy->refcnt, 1);
  225. setup_timer(&policy->timer, xfrm_policy_timer,
  226. (unsigned long)policy);
  227. policy->flo.ops = &xfrm_policy_fc_ops;
  228. }
  229. return policy;
  230. }
  231. EXPORT_SYMBOL(xfrm_policy_alloc);
  232. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  233. void xfrm_policy_destroy(struct xfrm_policy *policy)
  234. {
  235. BUG_ON(!policy->walk.dead);
  236. if (del_timer(&policy->timer))
  237. BUG();
  238. security_xfrm_policy_free(policy->security);
  239. kfree(policy);
  240. }
  241. EXPORT_SYMBOL(xfrm_policy_destroy);
  242. /* Rule must be locked. Release descentant resources, announce
  243. * entry dead. The rule must be unlinked from lists to the moment.
  244. */
  245. static void xfrm_policy_kill(struct xfrm_policy *policy)
  246. {
  247. policy->walk.dead = 1;
  248. atomic_inc(&policy->genid);
  249. if (del_timer(&policy->timer))
  250. xfrm_pol_put(policy);
  251. xfrm_pol_put(policy);
  252. }
  253. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  254. static inline unsigned int idx_hash(struct net *net, u32 index)
  255. {
  256. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  257. }
  258. static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
  259. {
  260. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  261. unsigned int hash = __sel_hash(sel, family, hmask);
  262. return (hash == hmask + 1 ?
  263. &net->xfrm.policy_inexact[dir] :
  264. net->xfrm.policy_bydst[dir].table + hash);
  265. }
  266. static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  267. {
  268. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  269. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  270. return net->xfrm.policy_bydst[dir].table + hash;
  271. }
  272. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  273. struct hlist_head *ndsttable,
  274. unsigned int nhashmask)
  275. {
  276. struct hlist_node *entry, *tmp, *entry0 = NULL;
  277. struct xfrm_policy *pol;
  278. unsigned int h0 = 0;
  279. redo:
  280. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  281. unsigned int h;
  282. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  283. pol->family, nhashmask);
  284. if (!entry0) {
  285. hlist_del(entry);
  286. hlist_add_head(&pol->bydst, ndsttable+h);
  287. h0 = h;
  288. } else {
  289. if (h != h0)
  290. continue;
  291. hlist_del(entry);
  292. hlist_add_after(entry0, &pol->bydst);
  293. }
  294. entry0 = entry;
  295. }
  296. if (!hlist_empty(list)) {
  297. entry0 = NULL;
  298. goto redo;
  299. }
  300. }
  301. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  302. struct hlist_head *nidxtable,
  303. unsigned int nhashmask)
  304. {
  305. struct hlist_node *entry, *tmp;
  306. struct xfrm_policy *pol;
  307. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  308. unsigned int h;
  309. h = __idx_hash(pol->index, nhashmask);
  310. hlist_add_head(&pol->byidx, nidxtable+h);
  311. }
  312. }
  313. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  314. {
  315. return ((old_hmask + 1) << 1) - 1;
  316. }
  317. static void xfrm_bydst_resize(struct net *net, int dir)
  318. {
  319. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  320. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  321. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  322. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  323. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  324. int i;
  325. if (!ndst)
  326. return;
  327. write_lock_bh(&xfrm_policy_lock);
  328. for (i = hmask; i >= 0; i--)
  329. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  330. net->xfrm.policy_bydst[dir].table = ndst;
  331. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  332. write_unlock_bh(&xfrm_policy_lock);
  333. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  334. }
  335. static void xfrm_byidx_resize(struct net *net, int total)
  336. {
  337. unsigned int hmask = net->xfrm.policy_idx_hmask;
  338. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  339. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  340. struct hlist_head *oidx = net->xfrm.policy_byidx;
  341. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  342. int i;
  343. if (!nidx)
  344. return;
  345. write_lock_bh(&xfrm_policy_lock);
  346. for (i = hmask; i >= 0; i--)
  347. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  348. net->xfrm.policy_byidx = nidx;
  349. net->xfrm.policy_idx_hmask = nhashmask;
  350. write_unlock_bh(&xfrm_policy_lock);
  351. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  352. }
  353. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  354. {
  355. unsigned int cnt = net->xfrm.policy_count[dir];
  356. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  357. if (total)
  358. *total += cnt;
  359. if ((hmask + 1) < xfrm_policy_hashmax &&
  360. cnt > hmask)
  361. return 1;
  362. return 0;
  363. }
  364. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  365. {
  366. unsigned int hmask = net->xfrm.policy_idx_hmask;
  367. if ((hmask + 1) < xfrm_policy_hashmax &&
  368. total > hmask)
  369. return 1;
  370. return 0;
  371. }
  372. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  373. {
  374. read_lock_bh(&xfrm_policy_lock);
  375. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  376. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  377. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  378. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  379. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  380. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  381. si->spdhcnt = net->xfrm.policy_idx_hmask;
  382. si->spdhmcnt = xfrm_policy_hashmax;
  383. read_unlock_bh(&xfrm_policy_lock);
  384. }
  385. EXPORT_SYMBOL(xfrm_spd_getinfo);
  386. static DEFINE_MUTEX(hash_resize_mutex);
  387. static void xfrm_hash_resize(struct work_struct *work)
  388. {
  389. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  390. int dir, total;
  391. mutex_lock(&hash_resize_mutex);
  392. total = 0;
  393. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  394. if (xfrm_bydst_should_resize(net, dir, &total))
  395. xfrm_bydst_resize(net, dir);
  396. }
  397. if (xfrm_byidx_should_resize(net, total))
  398. xfrm_byidx_resize(net, total);
  399. mutex_unlock(&hash_resize_mutex);
  400. }
  401. /* Generate new index... KAME seems to generate them ordered by cost
  402. * of an absolute inpredictability of ordering of rules. This will not pass. */
  403. static u32 xfrm_gen_index(struct net *net, int dir)
  404. {
  405. static u32 idx_generator;
  406. for (;;) {
  407. struct hlist_node *entry;
  408. struct hlist_head *list;
  409. struct xfrm_policy *p;
  410. u32 idx;
  411. int found;
  412. idx = (idx_generator | dir);
  413. idx_generator += 8;
  414. if (idx == 0)
  415. idx = 8;
  416. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  417. found = 0;
  418. hlist_for_each_entry(p, entry, list, byidx) {
  419. if (p->index == idx) {
  420. found = 1;
  421. break;
  422. }
  423. }
  424. if (!found)
  425. return idx;
  426. }
  427. }
  428. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  429. {
  430. u32 *p1 = (u32 *) s1;
  431. u32 *p2 = (u32 *) s2;
  432. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  433. int i;
  434. for (i = 0; i < len; i++) {
  435. if (p1[i] != p2[i])
  436. return 1;
  437. }
  438. return 0;
  439. }
  440. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  441. {
  442. struct net *net = xp_net(policy);
  443. struct xfrm_policy *pol;
  444. struct xfrm_policy *delpol;
  445. struct hlist_head *chain;
  446. struct hlist_node *entry, *newpos;
  447. u32 mark = policy->mark.v & policy->mark.m;
  448. write_lock_bh(&xfrm_policy_lock);
  449. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  450. delpol = NULL;
  451. newpos = NULL;
  452. hlist_for_each_entry(pol, entry, chain, bydst) {
  453. if (pol->type == policy->type &&
  454. !selector_cmp(&pol->selector, &policy->selector) &&
  455. (mark & pol->mark.m) == pol->mark.v &&
  456. xfrm_sec_ctx_match(pol->security, policy->security) &&
  457. !WARN_ON(delpol)) {
  458. if (excl) {
  459. write_unlock_bh(&xfrm_policy_lock);
  460. return -EEXIST;
  461. }
  462. delpol = pol;
  463. if (policy->priority > pol->priority)
  464. continue;
  465. } else if (policy->priority >= pol->priority) {
  466. newpos = &pol->bydst;
  467. continue;
  468. }
  469. if (delpol)
  470. break;
  471. }
  472. if (newpos)
  473. hlist_add_after(newpos, &policy->bydst);
  474. else
  475. hlist_add_head(&policy->bydst, chain);
  476. xfrm_pol_hold(policy);
  477. net->xfrm.policy_count[dir]++;
  478. atomic_inc(&flow_cache_genid);
  479. if (delpol)
  480. __xfrm_policy_unlink(delpol, dir);
  481. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
  482. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  483. policy->curlft.add_time = get_seconds();
  484. policy->curlft.use_time = 0;
  485. if (!mod_timer(&policy->timer, jiffies + HZ))
  486. xfrm_pol_hold(policy);
  487. list_add(&policy->walk.all, &net->xfrm.policy_all);
  488. write_unlock_bh(&xfrm_policy_lock);
  489. if (delpol)
  490. xfrm_policy_kill(delpol);
  491. else if (xfrm_bydst_should_resize(net, dir, NULL))
  492. schedule_work(&net->xfrm.policy_hash_work);
  493. return 0;
  494. }
  495. EXPORT_SYMBOL(xfrm_policy_insert);
  496. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
  497. int dir, struct xfrm_selector *sel,
  498. struct xfrm_sec_ctx *ctx, int delete,
  499. int *err)
  500. {
  501. struct xfrm_policy *pol, *ret;
  502. struct hlist_head *chain;
  503. struct hlist_node *entry;
  504. *err = 0;
  505. write_lock_bh(&xfrm_policy_lock);
  506. chain = policy_hash_bysel(net, sel, sel->family, dir);
  507. ret = NULL;
  508. hlist_for_each_entry(pol, entry, chain, bydst) {
  509. if (pol->type == type &&
  510. (mark & pol->mark.m) == pol->mark.v &&
  511. !selector_cmp(sel, &pol->selector) &&
  512. xfrm_sec_ctx_match(ctx, pol->security)) {
  513. xfrm_pol_hold(pol);
  514. if (delete) {
  515. *err = security_xfrm_policy_delete(
  516. pol->security);
  517. if (*err) {
  518. write_unlock_bh(&xfrm_policy_lock);
  519. return pol;
  520. }
  521. __xfrm_policy_unlink(pol, dir);
  522. }
  523. ret = pol;
  524. break;
  525. }
  526. }
  527. write_unlock_bh(&xfrm_policy_lock);
  528. if (ret && delete)
  529. xfrm_policy_kill(ret);
  530. return ret;
  531. }
  532. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  533. struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
  534. int dir, u32 id, int delete, int *err)
  535. {
  536. struct xfrm_policy *pol, *ret;
  537. struct hlist_head *chain;
  538. struct hlist_node *entry;
  539. *err = -ENOENT;
  540. if (xfrm_policy_id2dir(id) != dir)
  541. return NULL;
  542. *err = 0;
  543. write_lock_bh(&xfrm_policy_lock);
  544. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  545. ret = NULL;
  546. hlist_for_each_entry(pol, entry, chain, byidx) {
  547. if (pol->type == type && pol->index == id &&
  548. (mark & pol->mark.m) == pol->mark.v) {
  549. xfrm_pol_hold(pol);
  550. if (delete) {
  551. *err = security_xfrm_policy_delete(
  552. pol->security);
  553. if (*err) {
  554. write_unlock_bh(&xfrm_policy_lock);
  555. return pol;
  556. }
  557. __xfrm_policy_unlink(pol, dir);
  558. }
  559. ret = pol;
  560. break;
  561. }
  562. }
  563. write_unlock_bh(&xfrm_policy_lock);
  564. if (ret && delete)
  565. xfrm_policy_kill(ret);
  566. return ret;
  567. }
  568. EXPORT_SYMBOL(xfrm_policy_byid);
  569. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  570. static inline int
  571. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  572. {
  573. int dir, err = 0;
  574. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  575. struct xfrm_policy *pol;
  576. struct hlist_node *entry;
  577. int i;
  578. hlist_for_each_entry(pol, entry,
  579. &net->xfrm.policy_inexact[dir], bydst) {
  580. if (pol->type != type)
  581. continue;
  582. err = security_xfrm_policy_delete(pol->security);
  583. if (err) {
  584. xfrm_audit_policy_delete(pol, 0,
  585. audit_info->loginuid,
  586. audit_info->sessionid,
  587. audit_info->secid);
  588. return err;
  589. }
  590. }
  591. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  592. hlist_for_each_entry(pol, entry,
  593. net->xfrm.policy_bydst[dir].table + i,
  594. bydst) {
  595. if (pol->type != type)
  596. continue;
  597. err = security_xfrm_policy_delete(
  598. pol->security);
  599. if (err) {
  600. xfrm_audit_policy_delete(pol, 0,
  601. audit_info->loginuid,
  602. audit_info->sessionid,
  603. audit_info->secid);
  604. return err;
  605. }
  606. }
  607. }
  608. }
  609. return err;
  610. }
  611. #else
  612. static inline int
  613. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  614. {
  615. return 0;
  616. }
  617. #endif
  618. int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
  619. {
  620. int dir, err = 0, cnt = 0;
  621. write_lock_bh(&xfrm_policy_lock);
  622. err = xfrm_policy_flush_secctx_check(net, type, audit_info);
  623. if (err)
  624. goto out;
  625. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  626. struct xfrm_policy *pol;
  627. struct hlist_node *entry;
  628. int i;
  629. again1:
  630. hlist_for_each_entry(pol, entry,
  631. &net->xfrm.policy_inexact[dir], bydst) {
  632. if (pol->type != type)
  633. continue;
  634. __xfrm_policy_unlink(pol, dir);
  635. write_unlock_bh(&xfrm_policy_lock);
  636. cnt++;
  637. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  638. audit_info->sessionid,
  639. audit_info->secid);
  640. xfrm_policy_kill(pol);
  641. write_lock_bh(&xfrm_policy_lock);
  642. goto again1;
  643. }
  644. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  645. again2:
  646. hlist_for_each_entry(pol, entry,
  647. net->xfrm.policy_bydst[dir].table + i,
  648. bydst) {
  649. if (pol->type != type)
  650. continue;
  651. __xfrm_policy_unlink(pol, dir);
  652. write_unlock_bh(&xfrm_policy_lock);
  653. cnt++;
  654. xfrm_audit_policy_delete(pol, 1,
  655. audit_info->loginuid,
  656. audit_info->sessionid,
  657. audit_info->secid);
  658. xfrm_policy_kill(pol);
  659. write_lock_bh(&xfrm_policy_lock);
  660. goto again2;
  661. }
  662. }
  663. }
  664. if (!cnt)
  665. err = -ESRCH;
  666. out:
  667. write_unlock_bh(&xfrm_policy_lock);
  668. return err;
  669. }
  670. EXPORT_SYMBOL(xfrm_policy_flush);
  671. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  672. int (*func)(struct xfrm_policy *, int, int, void*),
  673. void *data)
  674. {
  675. struct xfrm_policy *pol;
  676. struct xfrm_policy_walk_entry *x;
  677. int error = 0;
  678. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  679. walk->type != XFRM_POLICY_TYPE_ANY)
  680. return -EINVAL;
  681. if (list_empty(&walk->walk.all) && walk->seq != 0)
  682. return 0;
  683. write_lock_bh(&xfrm_policy_lock);
  684. if (list_empty(&walk->walk.all))
  685. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  686. else
  687. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  688. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  689. if (x->dead)
  690. continue;
  691. pol = container_of(x, struct xfrm_policy, walk);
  692. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  693. walk->type != pol->type)
  694. continue;
  695. error = func(pol, xfrm_policy_id2dir(pol->index),
  696. walk->seq, data);
  697. if (error) {
  698. list_move_tail(&walk->walk.all, &x->all);
  699. goto out;
  700. }
  701. walk->seq++;
  702. }
  703. if (walk->seq == 0) {
  704. error = -ENOENT;
  705. goto out;
  706. }
  707. list_del_init(&walk->walk.all);
  708. out:
  709. write_unlock_bh(&xfrm_policy_lock);
  710. return error;
  711. }
  712. EXPORT_SYMBOL(xfrm_policy_walk);
  713. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  714. {
  715. INIT_LIST_HEAD(&walk->walk.all);
  716. walk->walk.dead = 1;
  717. walk->type = type;
  718. walk->seq = 0;
  719. }
  720. EXPORT_SYMBOL(xfrm_policy_walk_init);
  721. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  722. {
  723. if (list_empty(&walk->walk.all))
  724. return;
  725. write_lock_bh(&xfrm_policy_lock);
  726. list_del(&walk->walk.all);
  727. write_unlock_bh(&xfrm_policy_lock);
  728. }
  729. EXPORT_SYMBOL(xfrm_policy_walk_done);
  730. /*
  731. * Find policy to apply to this flow.
  732. *
  733. * Returns 0 if policy found, else an -errno.
  734. */
  735. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  736. u8 type, u16 family, int dir)
  737. {
  738. struct xfrm_selector *sel = &pol->selector;
  739. int match, ret = -ESRCH;
  740. if (pol->family != family ||
  741. (fl->mark & pol->mark.m) != pol->mark.v ||
  742. pol->type != type)
  743. return ret;
  744. match = xfrm_selector_match(sel, fl, family);
  745. if (match)
  746. ret = security_xfrm_policy_lookup(pol->security, fl->secid,
  747. dir);
  748. return ret;
  749. }
  750. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  751. struct flowi *fl,
  752. u16 family, u8 dir)
  753. {
  754. int err;
  755. struct xfrm_policy *pol, *ret;
  756. xfrm_address_t *daddr, *saddr;
  757. struct hlist_node *entry;
  758. struct hlist_head *chain;
  759. u32 priority = ~0U;
  760. daddr = xfrm_flowi_daddr(fl, family);
  761. saddr = xfrm_flowi_saddr(fl, family);
  762. if (unlikely(!daddr || !saddr))
  763. return NULL;
  764. read_lock_bh(&xfrm_policy_lock);
  765. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  766. ret = NULL;
  767. hlist_for_each_entry(pol, entry, chain, bydst) {
  768. err = xfrm_policy_match(pol, fl, type, family, dir);
  769. if (err) {
  770. if (err == -ESRCH)
  771. continue;
  772. else {
  773. ret = ERR_PTR(err);
  774. goto fail;
  775. }
  776. } else {
  777. ret = pol;
  778. priority = ret->priority;
  779. break;
  780. }
  781. }
  782. chain = &net->xfrm.policy_inexact[dir];
  783. hlist_for_each_entry(pol, entry, chain, bydst) {
  784. err = xfrm_policy_match(pol, fl, type, family, dir);
  785. if (err) {
  786. if (err == -ESRCH)
  787. continue;
  788. else {
  789. ret = ERR_PTR(err);
  790. goto fail;
  791. }
  792. } else if (pol->priority < priority) {
  793. ret = pol;
  794. break;
  795. }
  796. }
  797. if (ret)
  798. xfrm_pol_hold(ret);
  799. fail:
  800. read_unlock_bh(&xfrm_policy_lock);
  801. return ret;
  802. }
  803. static struct xfrm_policy *
  804. __xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
  805. {
  806. #ifdef CONFIG_XFRM_SUB_POLICY
  807. struct xfrm_policy *pol;
  808. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  809. if (pol != NULL)
  810. return pol;
  811. #endif
  812. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  813. }
  814. static struct flow_cache_object *
  815. xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
  816. u8 dir, struct flow_cache_object *old_obj, void *ctx)
  817. {
  818. struct xfrm_policy *pol;
  819. if (old_obj)
  820. xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
  821. pol = __xfrm_policy_lookup(net, fl, family, dir);
  822. if (IS_ERR_OR_NULL(pol))
  823. return ERR_CAST(pol);
  824. /* Resolver returns two references:
  825. * one for cache and one for caller of flow_cache_lookup() */
  826. xfrm_pol_hold(pol);
  827. return &pol->flo;
  828. }
  829. static inline int policy_to_flow_dir(int dir)
  830. {
  831. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  832. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  833. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  834. return dir;
  835. switch (dir) {
  836. default:
  837. case XFRM_POLICY_IN:
  838. return FLOW_DIR_IN;
  839. case XFRM_POLICY_OUT:
  840. return FLOW_DIR_OUT;
  841. case XFRM_POLICY_FWD:
  842. return FLOW_DIR_FWD;
  843. }
  844. }
  845. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  846. {
  847. struct xfrm_policy *pol;
  848. read_lock_bh(&xfrm_policy_lock);
  849. if ((pol = sk->sk_policy[dir]) != NULL) {
  850. int match = xfrm_selector_match(&pol->selector, fl,
  851. sk->sk_family);
  852. int err = 0;
  853. if (match) {
  854. if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
  855. pol = NULL;
  856. goto out;
  857. }
  858. err = security_xfrm_policy_lookup(pol->security,
  859. fl->secid,
  860. policy_to_flow_dir(dir));
  861. if (!err)
  862. xfrm_pol_hold(pol);
  863. else if (err == -ESRCH)
  864. pol = NULL;
  865. else
  866. pol = ERR_PTR(err);
  867. } else
  868. pol = NULL;
  869. }
  870. out:
  871. read_unlock_bh(&xfrm_policy_lock);
  872. return pol;
  873. }
  874. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  875. {
  876. struct net *net = xp_net(pol);
  877. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  878. pol->family, dir);
  879. list_add(&pol->walk.all, &net->xfrm.policy_all);
  880. hlist_add_head(&pol->bydst, chain);
  881. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  882. net->xfrm.policy_count[dir]++;
  883. xfrm_pol_hold(pol);
  884. if (xfrm_bydst_should_resize(net, dir, NULL))
  885. schedule_work(&net->xfrm.policy_hash_work);
  886. }
  887. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  888. int dir)
  889. {
  890. struct net *net = xp_net(pol);
  891. if (hlist_unhashed(&pol->bydst))
  892. return NULL;
  893. hlist_del(&pol->bydst);
  894. hlist_del(&pol->byidx);
  895. list_del(&pol->walk.all);
  896. net->xfrm.policy_count[dir]--;
  897. return pol;
  898. }
  899. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  900. {
  901. write_lock_bh(&xfrm_policy_lock);
  902. pol = __xfrm_policy_unlink(pol, dir);
  903. write_unlock_bh(&xfrm_policy_lock);
  904. if (pol) {
  905. xfrm_policy_kill(pol);
  906. return 0;
  907. }
  908. return -ENOENT;
  909. }
  910. EXPORT_SYMBOL(xfrm_policy_delete);
  911. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  912. {
  913. struct net *net = xp_net(pol);
  914. struct xfrm_policy *old_pol;
  915. #ifdef CONFIG_XFRM_SUB_POLICY
  916. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  917. return -EINVAL;
  918. #endif
  919. write_lock_bh(&xfrm_policy_lock);
  920. old_pol = sk->sk_policy[dir];
  921. sk->sk_policy[dir] = pol;
  922. if (pol) {
  923. pol->curlft.add_time = get_seconds();
  924. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
  925. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  926. }
  927. if (old_pol)
  928. /* Unlinking succeeds always. This is the only function
  929. * allowed to delete or replace socket policy.
  930. */
  931. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  932. write_unlock_bh(&xfrm_policy_lock);
  933. if (old_pol) {
  934. xfrm_policy_kill(old_pol);
  935. }
  936. return 0;
  937. }
  938. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  939. {
  940. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  941. if (newp) {
  942. newp->selector = old->selector;
  943. if (security_xfrm_policy_clone(old->security,
  944. &newp->security)) {
  945. kfree(newp);
  946. return NULL; /* ENOMEM */
  947. }
  948. newp->lft = old->lft;
  949. newp->curlft = old->curlft;
  950. newp->mark = old->mark;
  951. newp->action = old->action;
  952. newp->flags = old->flags;
  953. newp->xfrm_nr = old->xfrm_nr;
  954. newp->index = old->index;
  955. newp->type = old->type;
  956. memcpy(newp->xfrm_vec, old->xfrm_vec,
  957. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  958. write_lock_bh(&xfrm_policy_lock);
  959. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  960. write_unlock_bh(&xfrm_policy_lock);
  961. xfrm_pol_put(newp);
  962. }
  963. return newp;
  964. }
  965. int __xfrm_sk_clone_policy(struct sock *sk)
  966. {
  967. struct xfrm_policy *p0 = sk->sk_policy[0],
  968. *p1 = sk->sk_policy[1];
  969. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  970. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  971. return -ENOMEM;
  972. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  973. return -ENOMEM;
  974. return 0;
  975. }
  976. static int
  977. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  978. unsigned short family)
  979. {
  980. int err;
  981. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  982. if (unlikely(afinfo == NULL))
  983. return -EINVAL;
  984. err = afinfo->get_saddr(net, local, remote);
  985. xfrm_policy_put_afinfo(afinfo);
  986. return err;
  987. }
  988. /* Resolve list of templates for the flow, given policy. */
  989. static int
  990. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  991. struct xfrm_state **xfrm,
  992. unsigned short family)
  993. {
  994. struct net *net = xp_net(policy);
  995. int nx;
  996. int i, error;
  997. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  998. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  999. xfrm_address_t tmp;
  1000. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1001. struct xfrm_state *x;
  1002. xfrm_address_t *remote = daddr;
  1003. xfrm_address_t *local = saddr;
  1004. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1005. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1006. tmpl->mode == XFRM_MODE_BEET) {
  1007. remote = &tmpl->id.daddr;
  1008. local = &tmpl->saddr;
  1009. family = tmpl->encap_family;
  1010. if (xfrm_addr_any(local, family)) {
  1011. error = xfrm_get_saddr(net, &tmp, remote, family);
  1012. if (error)
  1013. goto fail;
  1014. local = &tmp;
  1015. }
  1016. }
  1017. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1018. if (x && x->km.state == XFRM_STATE_VALID) {
  1019. xfrm[nx++] = x;
  1020. daddr = remote;
  1021. saddr = local;
  1022. continue;
  1023. }
  1024. if (x) {
  1025. error = (x->km.state == XFRM_STATE_ERROR ?
  1026. -EINVAL : -EAGAIN);
  1027. xfrm_state_put(x);
  1028. }
  1029. else if (error == -ESRCH)
  1030. error = -EAGAIN;
  1031. if (!tmpl->optional)
  1032. goto fail;
  1033. }
  1034. return nx;
  1035. fail:
  1036. for (nx--; nx>=0; nx--)
  1037. xfrm_state_put(xfrm[nx]);
  1038. return error;
  1039. }
  1040. static int
  1041. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1042. struct xfrm_state **xfrm,
  1043. unsigned short family)
  1044. {
  1045. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1046. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1047. int cnx = 0;
  1048. int error;
  1049. int ret;
  1050. int i;
  1051. for (i = 0; i < npols; i++) {
  1052. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1053. error = -ENOBUFS;
  1054. goto fail;
  1055. }
  1056. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1057. if (ret < 0) {
  1058. error = ret;
  1059. goto fail;
  1060. } else
  1061. cnx += ret;
  1062. }
  1063. /* found states are sorted for outbound processing */
  1064. if (npols > 1)
  1065. xfrm_state_sort(xfrm, tpp, cnx, family);
  1066. return cnx;
  1067. fail:
  1068. for (cnx--; cnx>=0; cnx--)
  1069. xfrm_state_put(tpp[cnx]);
  1070. return error;
  1071. }
  1072. /* Check that the bundle accepts the flow and its components are
  1073. * still valid.
  1074. */
  1075. static inline int xfrm_get_tos(struct flowi *fl, int family)
  1076. {
  1077. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1078. int tos;
  1079. if (!afinfo)
  1080. return -EINVAL;
  1081. tos = afinfo->get_tos(fl);
  1082. xfrm_policy_put_afinfo(afinfo);
  1083. return tos;
  1084. }
  1085. static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
  1086. {
  1087. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1088. struct dst_entry *dst = &xdst->u.dst;
  1089. if (xdst->route == NULL) {
  1090. /* Dummy bundle - if it has xfrms we were not
  1091. * able to build bundle as template resolution failed.
  1092. * It means we need to try again resolving. */
  1093. if (xdst->num_xfrms > 0)
  1094. return NULL;
  1095. } else {
  1096. /* Real bundle */
  1097. if (stale_bundle(dst))
  1098. return NULL;
  1099. }
  1100. dst_hold(dst);
  1101. return flo;
  1102. }
  1103. static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
  1104. {
  1105. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1106. struct dst_entry *dst = &xdst->u.dst;
  1107. if (!xdst->route)
  1108. return 0;
  1109. if (stale_bundle(dst))
  1110. return 0;
  1111. return 1;
  1112. }
  1113. static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
  1114. {
  1115. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1116. struct dst_entry *dst = &xdst->u.dst;
  1117. dst_free(dst);
  1118. }
  1119. static const struct flow_cache_ops xfrm_bundle_fc_ops = {
  1120. .get = xfrm_bundle_flo_get,
  1121. .check = xfrm_bundle_flo_check,
  1122. .delete = xfrm_bundle_flo_delete,
  1123. };
  1124. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  1125. {
  1126. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1127. struct dst_ops *dst_ops;
  1128. struct xfrm_dst *xdst;
  1129. if (!afinfo)
  1130. return ERR_PTR(-EINVAL);
  1131. switch (family) {
  1132. case AF_INET:
  1133. dst_ops = &net->xfrm.xfrm4_dst_ops;
  1134. break;
  1135. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  1136. case AF_INET6:
  1137. dst_ops = &net->xfrm.xfrm6_dst_ops;
  1138. break;
  1139. #endif
  1140. default:
  1141. BUG();
  1142. }
  1143. xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
  1144. xfrm_policy_put_afinfo(afinfo);
  1145. xdst->flo.ops = &xfrm_bundle_fc_ops;
  1146. return xdst;
  1147. }
  1148. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1149. int nfheader_len)
  1150. {
  1151. struct xfrm_policy_afinfo *afinfo =
  1152. xfrm_policy_get_afinfo(dst->ops->family);
  1153. int err;
  1154. if (!afinfo)
  1155. return -EINVAL;
  1156. err = afinfo->init_path(path, dst, nfheader_len);
  1157. xfrm_policy_put_afinfo(afinfo);
  1158. return err;
  1159. }
  1160. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  1161. struct flowi *fl)
  1162. {
  1163. struct xfrm_policy_afinfo *afinfo =
  1164. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1165. int err;
  1166. if (!afinfo)
  1167. return -EINVAL;
  1168. err = afinfo->fill_dst(xdst, dev, fl);
  1169. xfrm_policy_put_afinfo(afinfo);
  1170. return err;
  1171. }
  1172. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1173. * all the metrics... Shortly, bundle a bundle.
  1174. */
  1175. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1176. struct xfrm_state **xfrm, int nx,
  1177. struct flowi *fl,
  1178. struct dst_entry *dst)
  1179. {
  1180. struct net *net = xp_net(policy);
  1181. unsigned long now = jiffies;
  1182. struct net_device *dev;
  1183. struct dst_entry *dst_prev = NULL;
  1184. struct dst_entry *dst0 = NULL;
  1185. int i = 0;
  1186. int err;
  1187. int header_len = 0;
  1188. int nfheader_len = 0;
  1189. int trailer_len = 0;
  1190. int tos;
  1191. int family = policy->selector.family;
  1192. xfrm_address_t saddr, daddr;
  1193. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1194. tos = xfrm_get_tos(fl, family);
  1195. err = tos;
  1196. if (tos < 0)
  1197. goto put_states;
  1198. dst_hold(dst);
  1199. for (; i < nx; i++) {
  1200. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  1201. struct dst_entry *dst1 = &xdst->u.dst;
  1202. err = PTR_ERR(xdst);
  1203. if (IS_ERR(xdst)) {
  1204. dst_release(dst);
  1205. goto put_states;
  1206. }
  1207. if (!dst_prev)
  1208. dst0 = dst1;
  1209. else {
  1210. dst_prev->child = dst_clone(dst1);
  1211. dst1->flags |= DST_NOHASH;
  1212. }
  1213. xdst->route = dst;
  1214. memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
  1215. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1216. family = xfrm[i]->props.family;
  1217. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1218. family);
  1219. err = PTR_ERR(dst);
  1220. if (IS_ERR(dst))
  1221. goto put_states;
  1222. } else
  1223. dst_hold(dst);
  1224. dst1->xfrm = xfrm[i];
  1225. xdst->xfrm_genid = xfrm[i]->genid;
  1226. dst1->obsolete = -1;
  1227. dst1->flags |= DST_HOST;
  1228. dst1->lastuse = now;
  1229. dst1->input = dst_discard;
  1230. dst1->output = xfrm[i]->outer_mode->afinfo->output;
  1231. dst1->next = dst_prev;
  1232. dst_prev = dst1;
  1233. header_len += xfrm[i]->props.header_len;
  1234. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1235. nfheader_len += xfrm[i]->props.header_len;
  1236. trailer_len += xfrm[i]->props.trailer_len;
  1237. }
  1238. dst_prev->child = dst;
  1239. dst0->path = dst;
  1240. err = -ENODEV;
  1241. dev = dst->dev;
  1242. if (!dev)
  1243. goto free_dst;
  1244. /* Copy neighbour for reachability confirmation */
  1245. dst0->neighbour = neigh_clone(dst->neighbour);
  1246. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1247. xfrm_init_pmtu(dst_prev);
  1248. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1249. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1250. err = xfrm_fill_dst(xdst, dev, fl);
  1251. if (err)
  1252. goto free_dst;
  1253. dst_prev->header_len = header_len;
  1254. dst_prev->trailer_len = trailer_len;
  1255. header_len -= xdst->u.dst.xfrm->props.header_len;
  1256. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1257. }
  1258. out:
  1259. return dst0;
  1260. put_states:
  1261. for (; i < nx; i++)
  1262. xfrm_state_put(xfrm[i]);
  1263. free_dst:
  1264. if (dst0)
  1265. dst_free(dst0);
  1266. dst0 = ERR_PTR(err);
  1267. goto out;
  1268. }
  1269. static int inline
  1270. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1271. {
  1272. if (!*target) {
  1273. *target = kmalloc(size, GFP_ATOMIC);
  1274. if (!*target)
  1275. return -ENOMEM;
  1276. }
  1277. memcpy(*target, src, size);
  1278. return 0;
  1279. }
  1280. static int inline
  1281. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1282. {
  1283. #ifdef CONFIG_XFRM_SUB_POLICY
  1284. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1285. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1286. sel, sizeof(*sel));
  1287. #else
  1288. return 0;
  1289. #endif
  1290. }
  1291. static int inline
  1292. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1293. {
  1294. #ifdef CONFIG_XFRM_SUB_POLICY
  1295. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1296. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1297. #else
  1298. return 0;
  1299. #endif
  1300. }
  1301. static int xfrm_expand_policies(struct flowi *fl, u16 family,
  1302. struct xfrm_policy **pols,
  1303. int *num_pols, int *num_xfrms)
  1304. {
  1305. int i;
  1306. if (*num_pols == 0 || !pols[0]) {
  1307. *num_pols = 0;
  1308. *num_xfrms = 0;
  1309. return 0;
  1310. }
  1311. if (IS_ERR(pols[0]))
  1312. return PTR_ERR(pols[0]);
  1313. *num_xfrms = pols[0]->xfrm_nr;
  1314. #ifdef CONFIG_XFRM_SUB_POLICY
  1315. if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
  1316. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1317. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  1318. XFRM_POLICY_TYPE_MAIN,
  1319. fl, family,
  1320. XFRM_POLICY_OUT);
  1321. if (pols[1]) {
  1322. if (IS_ERR(pols[1])) {
  1323. xfrm_pols_put(pols, *num_pols);
  1324. return PTR_ERR(pols[1]);
  1325. }
  1326. (*num_pols) ++;
  1327. (*num_xfrms) += pols[1]->xfrm_nr;
  1328. }
  1329. }
  1330. #endif
  1331. for (i = 0; i < *num_pols; i++) {
  1332. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  1333. *num_xfrms = -1;
  1334. break;
  1335. }
  1336. }
  1337. return 0;
  1338. }
  1339. static struct xfrm_dst *
  1340. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  1341. struct flowi *fl, u16 family,
  1342. struct dst_entry *dst_orig)
  1343. {
  1344. struct net *net = xp_net(pols[0]);
  1345. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1346. struct dst_entry *dst;
  1347. struct xfrm_dst *xdst;
  1348. int err;
  1349. /* Try to instantiate a bundle */
  1350. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  1351. if (err < 0) {
  1352. if (err != -EAGAIN)
  1353. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1354. return ERR_PTR(err);
  1355. }
  1356. dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
  1357. if (IS_ERR(dst)) {
  1358. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1359. return ERR_CAST(dst);
  1360. }
  1361. xdst = (struct xfrm_dst *)dst;
  1362. xdst->num_xfrms = err;
  1363. if (num_pols > 1)
  1364. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1365. else
  1366. err = xfrm_dst_update_origin(dst, fl);
  1367. if (unlikely(err)) {
  1368. dst_free(dst);
  1369. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1370. return ERR_PTR(err);
  1371. }
  1372. xdst->num_pols = num_pols;
  1373. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1374. xdst->policy_genid = atomic_read(&pols[0]->genid);
  1375. return xdst;
  1376. }
  1377. static struct flow_cache_object *
  1378. xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
  1379. struct flow_cache_object *oldflo, void *ctx)
  1380. {
  1381. struct dst_entry *dst_orig = (struct dst_entry *)ctx;
  1382. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1383. struct xfrm_dst *xdst, *new_xdst;
  1384. int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
  1385. /* Check if the policies from old bundle are usable */
  1386. xdst = NULL;
  1387. if (oldflo) {
  1388. xdst = container_of(oldflo, struct xfrm_dst, flo);
  1389. num_pols = xdst->num_pols;
  1390. num_xfrms = xdst->num_xfrms;
  1391. pol_dead = 0;
  1392. for (i = 0; i < num_pols; i++) {
  1393. pols[i] = xdst->pols[i];
  1394. pol_dead |= pols[i]->walk.dead;
  1395. }
  1396. if (pol_dead) {
  1397. dst_free(&xdst->u.dst);
  1398. xdst = NULL;
  1399. num_pols = 0;
  1400. num_xfrms = 0;
  1401. oldflo = NULL;
  1402. }
  1403. }
  1404. /* Resolve policies to use if we couldn't get them from
  1405. * previous cache entry */
  1406. if (xdst == NULL) {
  1407. num_pols = 1;
  1408. pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
  1409. err = xfrm_expand_policies(fl, family, pols,
  1410. &num_pols, &num_xfrms);
  1411. if (err < 0)
  1412. goto inc_error;
  1413. if (num_pols == 0)
  1414. return NULL;
  1415. if (num_xfrms <= 0)
  1416. goto make_dummy_bundle;
  1417. }
  1418. new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
  1419. if (IS_ERR(new_xdst)) {
  1420. err = PTR_ERR(new_xdst);
  1421. if (err != -EAGAIN)
  1422. goto error;
  1423. if (oldflo == NULL)
  1424. goto make_dummy_bundle;
  1425. dst_hold(&xdst->u.dst);
  1426. return oldflo;
  1427. }
  1428. /* Kill the previous bundle */
  1429. if (xdst) {
  1430. /* The policies were stolen for newly generated bundle */
  1431. xdst->num_pols = 0;
  1432. dst_free(&xdst->u.dst);
  1433. }
  1434. /* Flow cache does not have reference, it dst_free()'s,
  1435. * but we do need to return one reference for original caller */
  1436. dst_hold(&new_xdst->u.dst);
  1437. return &new_xdst->flo;
  1438. make_dummy_bundle:
  1439. /* We found policies, but there's no bundles to instantiate:
  1440. * either because the policy blocks, has no transformations or
  1441. * we could not build template (no xfrm_states).*/
  1442. xdst = xfrm_alloc_dst(net, family);
  1443. if (IS_ERR(xdst)) {
  1444. xfrm_pols_put(pols, num_pols);
  1445. return ERR_CAST(xdst);
  1446. }
  1447. xdst->num_pols = num_pols;
  1448. xdst->num_xfrms = num_xfrms;
  1449. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1450. dst_hold(&xdst->u.dst);
  1451. return &xdst->flo;
  1452. inc_error:
  1453. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1454. error:
  1455. if (xdst != NULL)
  1456. dst_free(&xdst->u.dst);
  1457. else
  1458. xfrm_pols_put(pols, num_pols);
  1459. return ERR_PTR(err);
  1460. }
  1461. /* Main function: finds/creates a bundle for given flow.
  1462. *
  1463. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1464. * on interfaces with disabled IPsec.
  1465. */
  1466. int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
  1467. struct sock *sk, int flags)
  1468. {
  1469. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1470. struct flow_cache_object *flo;
  1471. struct xfrm_dst *xdst;
  1472. struct dst_entry *dst, *dst_orig = *dst_p, *route;
  1473. u16 family = dst_orig->ops->family;
  1474. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1475. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  1476. restart:
  1477. dst = NULL;
  1478. xdst = NULL;
  1479. route = NULL;
  1480. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1481. num_pols = 1;
  1482. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1483. err = xfrm_expand_policies(fl, family, pols,
  1484. &num_pols, &num_xfrms);
  1485. if (err < 0)
  1486. goto dropdst;
  1487. if (num_pols) {
  1488. if (num_xfrms <= 0) {
  1489. drop_pols = num_pols;
  1490. goto no_transform;
  1491. }
  1492. xdst = xfrm_resolve_and_create_bundle(
  1493. pols, num_pols, fl,
  1494. family, dst_orig);
  1495. if (IS_ERR(xdst)) {
  1496. xfrm_pols_put(pols, num_pols);
  1497. err = PTR_ERR(xdst);
  1498. goto dropdst;
  1499. }
  1500. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1501. xdst->u.dst.next = xfrm_policy_sk_bundles;
  1502. xfrm_policy_sk_bundles = &xdst->u.dst;
  1503. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1504. route = xdst->route;
  1505. }
  1506. }
  1507. if (xdst == NULL) {
  1508. /* To accelerate a bit... */
  1509. if ((dst_orig->flags & DST_NOXFRM) ||
  1510. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1511. goto nopol;
  1512. flo = flow_cache_lookup(net, fl, family, dir,
  1513. xfrm_bundle_lookup, dst_orig);
  1514. if (flo == NULL)
  1515. goto nopol;
  1516. if (IS_ERR(flo)) {
  1517. err = PTR_ERR(flo);
  1518. goto dropdst;
  1519. }
  1520. xdst = container_of(flo, struct xfrm_dst, flo);
  1521. num_pols = xdst->num_pols;
  1522. num_xfrms = xdst->num_xfrms;
  1523. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
  1524. route = xdst->route;
  1525. }
  1526. dst = &xdst->u.dst;
  1527. if (route == NULL && num_xfrms > 0) {
  1528. /* The only case when xfrm_bundle_lookup() returns a
  1529. * bundle with null route, is when the template could
  1530. * not be resolved. It means policies are there, but
  1531. * bundle could not be created, since we don't yet
  1532. * have the xfrm_state's. We need to wait for KM to
  1533. * negotiate new SA's or bail out with error.*/
  1534. if (net->xfrm.sysctl_larval_drop) {
  1535. /* EREMOTE tells the caller to generate
  1536. * a one-shot blackhole route. */
  1537. dst_release(dst);
  1538. xfrm_pols_put(pols, drop_pols);
  1539. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1540. return -EREMOTE;
  1541. }
  1542. if (flags & XFRM_LOOKUP_WAIT) {
  1543. DECLARE_WAITQUEUE(wait, current);
  1544. add_wait_queue(&net->xfrm.km_waitq, &wait);
  1545. set_current_state(TASK_INTERRUPTIBLE);
  1546. schedule();
  1547. set_current_state(TASK_RUNNING);
  1548. remove_wait_queue(&net->xfrm.km_waitq, &wait);
  1549. if (!signal_pending(current)) {
  1550. dst_release(dst);
  1551. goto restart;
  1552. }
  1553. err = -ERESTART;
  1554. } else
  1555. err = -EAGAIN;
  1556. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1557. goto error;
  1558. }
  1559. no_transform:
  1560. if (num_pols == 0)
  1561. goto nopol;
  1562. if ((flags & XFRM_LOOKUP_ICMP) &&
  1563. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  1564. err = -ENOENT;
  1565. goto error;
  1566. }
  1567. for (i = 0; i < num_pols; i++)
  1568. pols[i]->curlft.use_time = get_seconds();
  1569. if (num_xfrms < 0) {
  1570. /* Prohibit the flow */
  1571. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1572. err = -EPERM;
  1573. goto error;
  1574. } else if (num_xfrms > 0) {
  1575. /* Flow transformed */
  1576. *dst_p = dst;
  1577. dst_release(dst_orig);
  1578. } else {
  1579. /* Flow passes untransformed */
  1580. dst_release(dst);
  1581. }
  1582. ok:
  1583. xfrm_pols_put(pols, drop_pols);
  1584. return 0;
  1585. nopol:
  1586. if (!(flags & XFRM_LOOKUP_ICMP))
  1587. goto ok;
  1588. err = -ENOENT;
  1589. error:
  1590. dst_release(dst);
  1591. dropdst:
  1592. dst_release(dst_orig);
  1593. *dst_p = NULL;
  1594. xfrm_pols_put(pols, drop_pols);
  1595. return err;
  1596. }
  1597. EXPORT_SYMBOL(__xfrm_lookup);
  1598. int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
  1599. struct sock *sk, int flags)
  1600. {
  1601. int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
  1602. if (err == -EREMOTE) {
  1603. dst_release(*dst_p);
  1604. *dst_p = NULL;
  1605. err = -EAGAIN;
  1606. }
  1607. return err;
  1608. }
  1609. EXPORT_SYMBOL(xfrm_lookup);
  1610. static inline int
  1611. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1612. {
  1613. struct xfrm_state *x;
  1614. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1615. return 0;
  1616. x = skb->sp->xvec[idx];
  1617. if (!x->type->reject)
  1618. return 0;
  1619. return x->type->reject(x, skb, fl);
  1620. }
  1621. /* When skb is transformed back to its "native" form, we have to
  1622. * check policy restrictions. At the moment we make this in maximally
  1623. * stupid way. Shame on me. :-) Of course, connected sockets must
  1624. * have policy cached at them.
  1625. */
  1626. static inline int
  1627. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1628. unsigned short family)
  1629. {
  1630. if (xfrm_state_kern(x))
  1631. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1632. return x->id.proto == tmpl->id.proto &&
  1633. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1634. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1635. x->props.mode == tmpl->mode &&
  1636. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1637. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1638. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1639. xfrm_state_addr_cmp(tmpl, x, family));
  1640. }
  1641. /*
  1642. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1643. * because of optional transport mode, or next index of the mathced secpath
  1644. * state with the template.
  1645. * -1 is returned when no matching template is found.
  1646. * Otherwise "-2 - errored_index" is returned.
  1647. */
  1648. static inline int
  1649. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1650. unsigned short family)
  1651. {
  1652. int idx = start;
  1653. if (tmpl->optional) {
  1654. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1655. return start;
  1656. } else
  1657. start = -1;
  1658. for (; idx < sp->len; idx++) {
  1659. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1660. return ++idx;
  1661. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1662. if (start == -1)
  1663. start = -2-idx;
  1664. break;
  1665. }
  1666. }
  1667. return start;
  1668. }
  1669. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1670. unsigned int family, int reverse)
  1671. {
  1672. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1673. int err;
  1674. if (unlikely(afinfo == NULL))
  1675. return -EAFNOSUPPORT;
  1676. afinfo->decode_session(skb, fl, reverse);
  1677. err = security_xfrm_decode_session(skb, &fl->secid);
  1678. xfrm_policy_put_afinfo(afinfo);
  1679. return err;
  1680. }
  1681. EXPORT_SYMBOL(__xfrm_decode_session);
  1682. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1683. {
  1684. for (; k < sp->len; k++) {
  1685. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1686. *idxp = k;
  1687. return 1;
  1688. }
  1689. }
  1690. return 0;
  1691. }
  1692. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1693. unsigned short family)
  1694. {
  1695. struct net *net = dev_net(skb->dev);
  1696. struct xfrm_policy *pol;
  1697. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1698. int npols = 0;
  1699. int xfrm_nr;
  1700. int pi;
  1701. int reverse;
  1702. struct flowi fl;
  1703. u8 fl_dir;
  1704. int xerr_idx = -1;
  1705. reverse = dir & ~XFRM_POLICY_MASK;
  1706. dir &= XFRM_POLICY_MASK;
  1707. fl_dir = policy_to_flow_dir(dir);
  1708. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1709. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  1710. return 0;
  1711. }
  1712. nf_nat_decode_session(skb, &fl, family);
  1713. /* First, check used SA against their selectors. */
  1714. if (skb->sp) {
  1715. int i;
  1716. for (i=skb->sp->len-1; i>=0; i--) {
  1717. struct xfrm_state *x = skb->sp->xvec[i];
  1718. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1719. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  1720. return 0;
  1721. }
  1722. }
  1723. }
  1724. pol = NULL;
  1725. if (sk && sk->sk_policy[dir]) {
  1726. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1727. if (IS_ERR(pol)) {
  1728. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1729. return 0;
  1730. }
  1731. }
  1732. if (!pol) {
  1733. struct flow_cache_object *flo;
  1734. flo = flow_cache_lookup(net, &fl, family, fl_dir,
  1735. xfrm_policy_lookup, NULL);
  1736. if (IS_ERR_OR_NULL(flo))
  1737. pol = ERR_CAST(flo);
  1738. else
  1739. pol = container_of(flo, struct xfrm_policy, flo);
  1740. }
  1741. if (IS_ERR(pol)) {
  1742. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1743. return 0;
  1744. }
  1745. if (!pol) {
  1746. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1747. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1748. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  1749. return 0;
  1750. }
  1751. return 1;
  1752. }
  1753. pol->curlft.use_time = get_seconds();
  1754. pols[0] = pol;
  1755. npols ++;
  1756. #ifdef CONFIG_XFRM_SUB_POLICY
  1757. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1758. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1759. &fl, family,
  1760. XFRM_POLICY_IN);
  1761. if (pols[1]) {
  1762. if (IS_ERR(pols[1])) {
  1763. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1764. return 0;
  1765. }
  1766. pols[1]->curlft.use_time = get_seconds();
  1767. npols ++;
  1768. }
  1769. }
  1770. #endif
  1771. if (pol->action == XFRM_POLICY_ALLOW) {
  1772. struct sec_path *sp;
  1773. static struct sec_path dummy;
  1774. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1775. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1776. struct xfrm_tmpl **tpp = tp;
  1777. int ti = 0;
  1778. int i, k;
  1779. if ((sp = skb->sp) == NULL)
  1780. sp = &dummy;
  1781. for (pi = 0; pi < npols; pi++) {
  1782. if (pols[pi] != pol &&
  1783. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1784. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1785. goto reject;
  1786. }
  1787. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1788. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  1789. goto reject_error;
  1790. }
  1791. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1792. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1793. }
  1794. xfrm_nr = ti;
  1795. if (npols > 1) {
  1796. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1797. tpp = stp;
  1798. }
  1799. /* For each tunnel xfrm, find the first matching tmpl.
  1800. * For each tmpl before that, find corresponding xfrm.
  1801. * Order is _important_. Later we will implement
  1802. * some barriers, but at the moment barriers
  1803. * are implied between each two transformations.
  1804. */
  1805. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1806. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1807. if (k < 0) {
  1808. if (k < -1)
  1809. /* "-2 - errored_index" returned */
  1810. xerr_idx = -(2+k);
  1811. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1812. goto reject;
  1813. }
  1814. }
  1815. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1816. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1817. goto reject;
  1818. }
  1819. xfrm_pols_put(pols, npols);
  1820. return 1;
  1821. }
  1822. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1823. reject:
  1824. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1825. reject_error:
  1826. xfrm_pols_put(pols, npols);
  1827. return 0;
  1828. }
  1829. EXPORT_SYMBOL(__xfrm_policy_check);
  1830. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1831. {
  1832. struct net *net = dev_net(skb->dev);
  1833. struct flowi fl;
  1834. struct dst_entry *dst;
  1835. int res;
  1836. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1837. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  1838. return 0;
  1839. }
  1840. skb_dst_force(skb);
  1841. dst = skb_dst(skb);
  1842. res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
  1843. skb_dst_set(skb, dst);
  1844. return res;
  1845. }
  1846. EXPORT_SYMBOL(__xfrm_route_forward);
  1847. /* Optimize later using cookies and generation ids. */
  1848. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1849. {
  1850. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1851. * to "-1" to force all XFRM destinations to get validated by
  1852. * dst_ops->check on every use. We do this because when a
  1853. * normal route referenced by an XFRM dst is obsoleted we do
  1854. * not go looking around for all parent referencing XFRM dsts
  1855. * so that we can invalidate them. It is just too much work.
  1856. * Instead we make the checks here on every use. For example:
  1857. *
  1858. * XFRM dst A --> IPv4 dst X
  1859. *
  1860. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1861. * in this example). If X is marked obsolete, "A" will not
  1862. * notice. That's what we are validating here via the
  1863. * stale_bundle() check.
  1864. *
  1865. * When a policy's bundle is pruned, we dst_free() the XFRM
  1866. * dst which causes it's ->obsolete field to be set to a
  1867. * positive non-zero integer. If an XFRM dst has been pruned
  1868. * like this, we want to force a new route lookup.
  1869. */
  1870. if (dst->obsolete < 0 && !stale_bundle(dst))
  1871. return dst;
  1872. return NULL;
  1873. }
  1874. static int stale_bundle(struct dst_entry *dst)
  1875. {
  1876. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1877. }
  1878. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1879. {
  1880. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1881. dst->dev = dev_net(dev)->loopback_dev;
  1882. dev_hold(dst->dev);
  1883. dev_put(dev);
  1884. }
  1885. }
  1886. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1887. static void xfrm_link_failure(struct sk_buff *skb)
  1888. {
  1889. /* Impossible. Such dst must be popped before reaches point of failure. */
  1890. }
  1891. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1892. {
  1893. if (dst) {
  1894. if (dst->obsolete) {
  1895. dst_release(dst);
  1896. dst = NULL;
  1897. }
  1898. }
  1899. return dst;
  1900. }
  1901. static void __xfrm_garbage_collect(struct net *net)
  1902. {
  1903. struct dst_entry *head, *next;
  1904. flow_cache_flush();
  1905. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1906. head = xfrm_policy_sk_bundles;
  1907. xfrm_policy_sk_bundles = NULL;
  1908. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1909. while (head) {
  1910. next = head->next;
  1911. dst_free(head);
  1912. head = next;
  1913. }
  1914. }
  1915. static void xfrm_init_pmtu(struct dst_entry *dst)
  1916. {
  1917. do {
  1918. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1919. u32 pmtu, route_mtu_cached;
  1920. pmtu = dst_mtu(dst->child);
  1921. xdst->child_mtu_cached = pmtu;
  1922. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1923. route_mtu_cached = dst_mtu(xdst->route);
  1924. xdst->route_mtu_cached = route_mtu_cached;
  1925. if (pmtu > route_mtu_cached)
  1926. pmtu = route_mtu_cached;
  1927. dst->metrics[RTAX_MTU-1] = pmtu;
  1928. } while ((dst = dst->next));
  1929. }
  1930. /* Check that the bundle accepts the flow and its components are
  1931. * still valid.
  1932. */
  1933. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1934. struct flowi *fl, int family, int strict)
  1935. {
  1936. struct dst_entry *dst = &first->u.dst;
  1937. struct xfrm_dst *last;
  1938. u32 mtu;
  1939. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1940. (dst->dev && !netif_running(dst->dev)))
  1941. return 0;
  1942. #ifdef CONFIG_XFRM_SUB_POLICY
  1943. if (fl) {
  1944. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1945. return 0;
  1946. if (first->partner &&
  1947. !xfrm_selector_match(first->partner, fl, family))
  1948. return 0;
  1949. }
  1950. #endif
  1951. last = NULL;
  1952. do {
  1953. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1954. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1955. return 0;
  1956. if (fl && pol &&
  1957. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1958. return 0;
  1959. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1960. return 0;
  1961. if (xdst->xfrm_genid != dst->xfrm->genid)
  1962. return 0;
  1963. if (xdst->num_pols > 0 &&
  1964. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  1965. return 0;
  1966. if (strict && fl &&
  1967. !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
  1968. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1969. return 0;
  1970. mtu = dst_mtu(dst->child);
  1971. if (xdst->child_mtu_cached != mtu) {
  1972. last = xdst;
  1973. xdst->child_mtu_cached = mtu;
  1974. }
  1975. if (!dst_check(xdst->route, xdst->route_cookie))
  1976. return 0;
  1977. mtu = dst_mtu(xdst->route);
  1978. if (xdst->route_mtu_cached != mtu) {
  1979. last = xdst;
  1980. xdst->route_mtu_cached = mtu;
  1981. }
  1982. dst = dst->child;
  1983. } while (dst->xfrm);
  1984. if (likely(!last))
  1985. return 1;
  1986. mtu = last->child_mtu_cached;
  1987. for (;;) {
  1988. dst = &last->u.dst;
  1989. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1990. if (mtu > last->route_mtu_cached)
  1991. mtu = last->route_mtu_cached;
  1992. dst->metrics[RTAX_MTU-1] = mtu;
  1993. if (last == first)
  1994. break;
  1995. last = (struct xfrm_dst *)last->u.dst.next;
  1996. last->child_mtu_cached = mtu;
  1997. }
  1998. return 1;
  1999. }
  2000. EXPORT_SYMBOL(xfrm_bundle_ok);
  2001. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  2002. {
  2003. struct net *net;
  2004. int err = 0;
  2005. if (unlikely(afinfo == NULL))
  2006. return -EINVAL;
  2007. if (unlikely(afinfo->family >= NPROTO))
  2008. return -EAFNOSUPPORT;
  2009. write_lock_bh(&xfrm_policy_afinfo_lock);
  2010. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  2011. err = -ENOBUFS;
  2012. else {
  2013. struct dst_ops *dst_ops = afinfo->dst_ops;
  2014. if (likely(dst_ops->kmem_cachep == NULL))
  2015. dst_ops->kmem_cachep = xfrm_dst_cache;
  2016. if (likely(dst_ops->check == NULL))
  2017. dst_ops->check = xfrm_dst_check;
  2018. if (likely(dst_ops->negative_advice == NULL))
  2019. dst_ops->negative_advice = xfrm_negative_advice;
  2020. if (likely(dst_ops->link_failure == NULL))
  2021. dst_ops->link_failure = xfrm_link_failure;
  2022. if (likely(afinfo->garbage_collect == NULL))
  2023. afinfo->garbage_collect = __xfrm_garbage_collect;
  2024. xfrm_policy_afinfo[afinfo->family] = afinfo;
  2025. }
  2026. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2027. rtnl_lock();
  2028. for_each_net(net) {
  2029. struct dst_ops *xfrm_dst_ops;
  2030. switch (afinfo->family) {
  2031. case AF_INET:
  2032. xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
  2033. break;
  2034. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  2035. case AF_INET6:
  2036. xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
  2037. break;
  2038. #endif
  2039. default:
  2040. BUG();
  2041. }
  2042. *xfrm_dst_ops = *afinfo->dst_ops;
  2043. }
  2044. rtnl_unlock();
  2045. return err;
  2046. }
  2047. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  2048. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  2049. {
  2050. int err = 0;
  2051. if (unlikely(afinfo == NULL))
  2052. return -EINVAL;
  2053. if (unlikely(afinfo->family >= NPROTO))
  2054. return -EAFNOSUPPORT;
  2055. write_lock_bh(&xfrm_policy_afinfo_lock);
  2056. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  2057. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  2058. err = -EINVAL;
  2059. else {
  2060. struct dst_ops *dst_ops = afinfo->dst_ops;
  2061. xfrm_policy_afinfo[afinfo->family] = NULL;
  2062. dst_ops->kmem_cachep = NULL;
  2063. dst_ops->check = NULL;
  2064. dst_ops->negative_advice = NULL;
  2065. dst_ops->link_failure = NULL;
  2066. afinfo->garbage_collect = NULL;
  2067. }
  2068. }
  2069. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2070. return err;
  2071. }
  2072. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2073. static void __net_init xfrm_dst_ops_init(struct net *net)
  2074. {
  2075. struct xfrm_policy_afinfo *afinfo;
  2076. read_lock_bh(&xfrm_policy_afinfo_lock);
  2077. afinfo = xfrm_policy_afinfo[AF_INET];
  2078. if (afinfo)
  2079. net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
  2080. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  2081. afinfo = xfrm_policy_afinfo[AF_INET6];
  2082. if (afinfo)
  2083. net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
  2084. #endif
  2085. read_unlock_bh(&xfrm_policy_afinfo_lock);
  2086. }
  2087. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  2088. {
  2089. struct xfrm_policy_afinfo *afinfo;
  2090. if (unlikely(family >= NPROTO))
  2091. return NULL;
  2092. read_lock(&xfrm_policy_afinfo_lock);
  2093. afinfo = xfrm_policy_afinfo[family];
  2094. if (unlikely(!afinfo))
  2095. read_unlock(&xfrm_policy_afinfo_lock);
  2096. return afinfo;
  2097. }
  2098. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2099. {
  2100. read_unlock(&xfrm_policy_afinfo_lock);
  2101. }
  2102. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2103. {
  2104. struct net_device *dev = ptr;
  2105. switch (event) {
  2106. case NETDEV_DOWN:
  2107. __xfrm_garbage_collect(dev_net(dev));
  2108. }
  2109. return NOTIFY_DONE;
  2110. }
  2111. static struct notifier_block xfrm_dev_notifier = {
  2112. .notifier_call = xfrm_dev_event,
  2113. };
  2114. #ifdef CONFIG_XFRM_STATISTICS
  2115. static int __net_init xfrm_statistics_init(struct net *net)
  2116. {
  2117. int rv;
  2118. if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
  2119. sizeof(struct linux_xfrm_mib),
  2120. __alignof__(struct linux_xfrm_mib)) < 0)
  2121. return -ENOMEM;
  2122. rv = xfrm_proc_init(net);
  2123. if (rv < 0)
  2124. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2125. return rv;
  2126. }
  2127. static void xfrm_statistics_fini(struct net *net)
  2128. {
  2129. xfrm_proc_fini(net);
  2130. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2131. }
  2132. #else
  2133. static int __net_init xfrm_statistics_init(struct net *net)
  2134. {
  2135. return 0;
  2136. }
  2137. static void xfrm_statistics_fini(struct net *net)
  2138. {
  2139. }
  2140. #endif
  2141. static int __net_init xfrm_policy_init(struct net *net)
  2142. {
  2143. unsigned int hmask, sz;
  2144. int dir;
  2145. if (net_eq(net, &init_net))
  2146. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2147. sizeof(struct xfrm_dst),
  2148. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2149. NULL);
  2150. hmask = 8 - 1;
  2151. sz = (hmask+1) * sizeof(struct hlist_head);
  2152. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2153. if (!net->xfrm.policy_byidx)
  2154. goto out_byidx;
  2155. net->xfrm.policy_idx_hmask = hmask;
  2156. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2157. struct xfrm_policy_hash *htab;
  2158. net->xfrm.policy_count[dir] = 0;
  2159. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2160. htab = &net->xfrm.policy_bydst[dir];
  2161. htab->table = xfrm_hash_alloc(sz);
  2162. if (!htab->table)
  2163. goto out_bydst;
  2164. htab->hmask = hmask;
  2165. }
  2166. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2167. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2168. if (net_eq(net, &init_net))
  2169. register_netdevice_notifier(&xfrm_dev_notifier);
  2170. return 0;
  2171. out_bydst:
  2172. for (dir--; dir >= 0; dir--) {
  2173. struct xfrm_policy_hash *htab;
  2174. htab = &net->xfrm.policy_bydst[dir];
  2175. xfrm_hash_free(htab->table, sz);
  2176. }
  2177. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2178. out_byidx:
  2179. return -ENOMEM;
  2180. }
  2181. static void xfrm_policy_fini(struct net *net)
  2182. {
  2183. struct xfrm_audit audit_info;
  2184. unsigned int sz;
  2185. int dir;
  2186. flush_work(&net->xfrm.policy_hash_work);
  2187. #ifdef CONFIG_XFRM_SUB_POLICY
  2188. audit_info.loginuid = -1;
  2189. audit_info.sessionid = -1;
  2190. audit_info.secid = 0;
  2191. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
  2192. #endif
  2193. audit_info.loginuid = -1;
  2194. audit_info.sessionid = -1;
  2195. audit_info.secid = 0;
  2196. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
  2197. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2198. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2199. struct xfrm_policy_hash *htab;
  2200. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2201. htab = &net->xfrm.policy_bydst[dir];
  2202. sz = (htab->hmask + 1);
  2203. WARN_ON(!hlist_empty(htab->table));
  2204. xfrm_hash_free(htab->table, sz);
  2205. }
  2206. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2207. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2208. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2209. }
  2210. static int __net_init xfrm_net_init(struct net *net)
  2211. {
  2212. int rv;
  2213. rv = xfrm_statistics_init(net);
  2214. if (rv < 0)
  2215. goto out_statistics;
  2216. rv = xfrm_state_init(net);
  2217. if (rv < 0)
  2218. goto out_state;
  2219. rv = xfrm_policy_init(net);
  2220. if (rv < 0)
  2221. goto out_policy;
  2222. xfrm_dst_ops_init(net);
  2223. rv = xfrm_sysctl_init(net);
  2224. if (rv < 0)
  2225. goto out_sysctl;
  2226. return 0;
  2227. out_sysctl:
  2228. xfrm_policy_fini(net);
  2229. out_policy:
  2230. xfrm_state_fini(net);
  2231. out_state:
  2232. xfrm_statistics_fini(net);
  2233. out_statistics:
  2234. return rv;
  2235. }
  2236. static void __net_exit xfrm_net_exit(struct net *net)
  2237. {
  2238. xfrm_sysctl_fini(net);
  2239. xfrm_policy_fini(net);
  2240. xfrm_state_fini(net);
  2241. xfrm_statistics_fini(net);
  2242. }
  2243. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2244. .init = xfrm_net_init,
  2245. .exit = xfrm_net_exit,
  2246. };
  2247. void __init xfrm_init(void)
  2248. {
  2249. register_pernet_subsys(&xfrm_net_ops);
  2250. xfrm_input_init();
  2251. }
  2252. #ifdef CONFIG_AUDITSYSCALL
  2253. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2254. struct audit_buffer *audit_buf)
  2255. {
  2256. struct xfrm_sec_ctx *ctx = xp->security;
  2257. struct xfrm_selector *sel = &xp->selector;
  2258. if (ctx)
  2259. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2260. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2261. switch(sel->family) {
  2262. case AF_INET:
  2263. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2264. if (sel->prefixlen_s != 32)
  2265. audit_log_format(audit_buf, " src_prefixlen=%d",
  2266. sel->prefixlen_s);
  2267. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2268. if (sel->prefixlen_d != 32)
  2269. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2270. sel->prefixlen_d);
  2271. break;
  2272. case AF_INET6:
  2273. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2274. if (sel->prefixlen_s != 128)
  2275. audit_log_format(audit_buf, " src_prefixlen=%d",
  2276. sel->prefixlen_s);
  2277. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2278. if (sel->prefixlen_d != 128)
  2279. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2280. sel->prefixlen_d);
  2281. break;
  2282. }
  2283. }
  2284. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2285. uid_t auid, u32 sessionid, u32 secid)
  2286. {
  2287. struct audit_buffer *audit_buf;
  2288. audit_buf = xfrm_audit_start("SPD-add");
  2289. if (audit_buf == NULL)
  2290. return;
  2291. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2292. audit_log_format(audit_buf, " res=%u", result);
  2293. xfrm_audit_common_policyinfo(xp, audit_buf);
  2294. audit_log_end(audit_buf);
  2295. }
  2296. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2297. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2298. uid_t auid, u32 sessionid, u32 secid)
  2299. {
  2300. struct audit_buffer *audit_buf;
  2301. audit_buf = xfrm_audit_start("SPD-delete");
  2302. if (audit_buf == NULL)
  2303. return;
  2304. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2305. audit_log_format(audit_buf, " res=%u", result);
  2306. xfrm_audit_common_policyinfo(xp, audit_buf);
  2307. audit_log_end(audit_buf);
  2308. }
  2309. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2310. #endif
  2311. #ifdef CONFIG_XFRM_MIGRATE
  2312. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2313. struct xfrm_selector *sel_tgt)
  2314. {
  2315. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2316. if (sel_tgt->family == sel_cmp->family &&
  2317. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2318. sel_cmp->family) == 0 &&
  2319. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2320. sel_cmp->family) == 0 &&
  2321. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2322. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2323. return 1;
  2324. }
  2325. } else {
  2326. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2327. return 1;
  2328. }
  2329. }
  2330. return 0;
  2331. }
  2332. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2333. u8 dir, u8 type)
  2334. {
  2335. struct xfrm_policy *pol, *ret = NULL;
  2336. struct hlist_node *entry;
  2337. struct hlist_head *chain;
  2338. u32 priority = ~0U;
  2339. read_lock_bh(&xfrm_policy_lock);
  2340. chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
  2341. hlist_for_each_entry(pol, entry, chain, bydst) {
  2342. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2343. pol->type == type) {
  2344. ret = pol;
  2345. priority = ret->priority;
  2346. break;
  2347. }
  2348. }
  2349. chain = &init_net.xfrm.policy_inexact[dir];
  2350. hlist_for_each_entry(pol, entry, chain, bydst) {
  2351. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2352. pol->type == type &&
  2353. pol->priority < priority) {
  2354. ret = pol;
  2355. break;
  2356. }
  2357. }
  2358. if (ret)
  2359. xfrm_pol_hold(ret);
  2360. read_unlock_bh(&xfrm_policy_lock);
  2361. return ret;
  2362. }
  2363. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2364. {
  2365. int match = 0;
  2366. if (t->mode == m->mode && t->id.proto == m->proto &&
  2367. (m->reqid == 0 || t->reqid == m->reqid)) {
  2368. switch (t->mode) {
  2369. case XFRM_MODE_TUNNEL:
  2370. case XFRM_MODE_BEET:
  2371. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2372. m->old_family) == 0 &&
  2373. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2374. m->old_family) == 0) {
  2375. match = 1;
  2376. }
  2377. break;
  2378. case XFRM_MODE_TRANSPORT:
  2379. /* in case of transport mode, template does not store
  2380. any IP addresses, hence we just compare mode and
  2381. protocol */
  2382. match = 1;
  2383. break;
  2384. default:
  2385. break;
  2386. }
  2387. }
  2388. return match;
  2389. }
  2390. /* update endpoint address(es) of template(s) */
  2391. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2392. struct xfrm_migrate *m, int num_migrate)
  2393. {
  2394. struct xfrm_migrate *mp;
  2395. int i, j, n = 0;
  2396. write_lock_bh(&pol->lock);
  2397. if (unlikely(pol->walk.dead)) {
  2398. /* target policy has been deleted */
  2399. write_unlock_bh(&pol->lock);
  2400. return -ENOENT;
  2401. }
  2402. for (i = 0; i < pol->xfrm_nr; i++) {
  2403. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2404. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2405. continue;
  2406. n++;
  2407. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2408. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2409. continue;
  2410. /* update endpoints */
  2411. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2412. sizeof(pol->xfrm_vec[i].id.daddr));
  2413. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2414. sizeof(pol->xfrm_vec[i].saddr));
  2415. pol->xfrm_vec[i].encap_family = mp->new_family;
  2416. /* flush bundles */
  2417. atomic_inc(&pol->genid);
  2418. }
  2419. }
  2420. write_unlock_bh(&pol->lock);
  2421. if (!n)
  2422. return -ENODATA;
  2423. return 0;
  2424. }
  2425. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2426. {
  2427. int i, j;
  2428. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2429. return -EINVAL;
  2430. for (i = 0; i < num_migrate; i++) {
  2431. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2432. m[i].old_family) == 0) &&
  2433. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2434. m[i].old_family) == 0))
  2435. return -EINVAL;
  2436. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2437. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2438. return -EINVAL;
  2439. /* check if there is any duplicated entry */
  2440. for (j = i + 1; j < num_migrate; j++) {
  2441. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2442. sizeof(m[i].old_daddr)) &&
  2443. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2444. sizeof(m[i].old_saddr)) &&
  2445. m[i].proto == m[j].proto &&
  2446. m[i].mode == m[j].mode &&
  2447. m[i].reqid == m[j].reqid &&
  2448. m[i].old_family == m[j].old_family)
  2449. return -EINVAL;
  2450. }
  2451. }
  2452. return 0;
  2453. }
  2454. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2455. struct xfrm_migrate *m, int num_migrate,
  2456. struct xfrm_kmaddress *k)
  2457. {
  2458. int i, err, nx_cur = 0, nx_new = 0;
  2459. struct xfrm_policy *pol = NULL;
  2460. struct xfrm_state *x, *xc;
  2461. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2462. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2463. struct xfrm_migrate *mp;
  2464. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2465. goto out;
  2466. /* Stage 1 - find policy */
  2467. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2468. err = -ENOENT;
  2469. goto out;
  2470. }
  2471. /* Stage 2 - find and update state(s) */
  2472. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2473. if ((x = xfrm_migrate_state_find(mp))) {
  2474. x_cur[nx_cur] = x;
  2475. nx_cur++;
  2476. if ((xc = xfrm_state_migrate(x, mp))) {
  2477. x_new[nx_new] = xc;
  2478. nx_new++;
  2479. } else {
  2480. err = -ENODATA;
  2481. goto restore_state;
  2482. }
  2483. }
  2484. }
  2485. /* Stage 3 - update policy */
  2486. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2487. goto restore_state;
  2488. /* Stage 4 - delete old state(s) */
  2489. if (nx_cur) {
  2490. xfrm_states_put(x_cur, nx_cur);
  2491. xfrm_states_delete(x_cur, nx_cur);
  2492. }
  2493. /* Stage 5 - announce */
  2494. km_migrate(sel, dir, type, m, num_migrate, k);
  2495. xfrm_pol_put(pol);
  2496. return 0;
  2497. out:
  2498. return err;
  2499. restore_state:
  2500. if (pol)
  2501. xfrm_pol_put(pol);
  2502. if (nx_cur)
  2503. xfrm_states_put(x_cur, nx_cur);
  2504. if (nx_new)
  2505. xfrm_states_delete(x_new, nx_new);
  2506. return err;
  2507. }
  2508. EXPORT_SYMBOL(xfrm_migrate);
  2509. #endif