xfrm_policy.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/flow.h>
  29. #include <net/xfrm.h>
  30. #include <net/ip.h>
  31. #ifdef CONFIG_XFRM_STATISTICS
  32. #include <net/snmp.h>
  33. #endif
  34. #include "xfrm_hash.h"
  35. DEFINE_MUTEX(xfrm_cfg_mutex);
  36. EXPORT_SYMBOL(xfrm_cfg_mutex);
  37. static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
  38. static struct dst_entry *xfrm_policy_sk_bundles;
  39. static DEFINE_RWLOCK(xfrm_policy_lock);
  40. static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  41. static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
  42. __read_mostly;
  43. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  44. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  45. static inline void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  46. static void xfrm_init_pmtu(struct dst_entry *dst);
  47. static int stale_bundle(struct dst_entry *dst);
  48. static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  49. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  50. int dir);
  51. static inline bool
  52. __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  53. {
  54. const struct flowi4 *fl4 = &fl->u.ip4;
  55. return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  56. addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  57. !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  58. !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  59. (fl4->flowi4_proto == sel->proto || !sel->proto) &&
  60. (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  61. }
  62. static inline bool
  63. __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  64. {
  65. const struct flowi6 *fl6 = &fl->u.ip6;
  66. return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  67. addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  68. !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  69. !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  70. (fl6->flowi6_proto == sel->proto || !sel->proto) &&
  71. (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  72. }
  73. bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  74. unsigned short family)
  75. {
  76. switch (family) {
  77. case AF_INET:
  78. return __xfrm4_selector_match(sel, fl);
  79. case AF_INET6:
  80. return __xfrm6_selector_match(sel, fl);
  81. }
  82. return false;
  83. }
  84. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  85. const xfrm_address_t *saddr,
  86. const xfrm_address_t *daddr,
  87. int family)
  88. {
  89. struct xfrm_policy_afinfo *afinfo;
  90. struct dst_entry *dst;
  91. afinfo = xfrm_policy_get_afinfo(family);
  92. if (unlikely(afinfo == NULL))
  93. return ERR_PTR(-EAFNOSUPPORT);
  94. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  95. xfrm_policy_put_afinfo(afinfo);
  96. return dst;
  97. }
  98. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  99. xfrm_address_t *prev_saddr,
  100. xfrm_address_t *prev_daddr,
  101. int family)
  102. {
  103. struct net *net = xs_net(x);
  104. xfrm_address_t *saddr = &x->props.saddr;
  105. xfrm_address_t *daddr = &x->id.daddr;
  106. struct dst_entry *dst;
  107. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  108. saddr = x->coaddr;
  109. daddr = prev_daddr;
  110. }
  111. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  112. saddr = prev_saddr;
  113. daddr = x->coaddr;
  114. }
  115. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  116. if (!IS_ERR(dst)) {
  117. if (prev_saddr != saddr)
  118. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  119. if (prev_daddr != daddr)
  120. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  121. }
  122. return dst;
  123. }
  124. static inline unsigned long make_jiffies(long secs)
  125. {
  126. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  127. return MAX_SCHEDULE_TIMEOUT-1;
  128. else
  129. return secs*HZ;
  130. }
  131. static void xfrm_policy_timer(unsigned long data)
  132. {
  133. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  134. unsigned long now = get_seconds();
  135. long next = LONG_MAX;
  136. int warn = 0;
  137. int dir;
  138. read_lock(&xp->lock);
  139. if (unlikely(xp->walk.dead))
  140. goto out;
  141. dir = xfrm_policy_id2dir(xp->index);
  142. if (xp->lft.hard_add_expires_seconds) {
  143. long tmo = xp->lft.hard_add_expires_seconds +
  144. xp->curlft.add_time - now;
  145. if (tmo <= 0)
  146. goto expired;
  147. if (tmo < next)
  148. next = tmo;
  149. }
  150. if (xp->lft.hard_use_expires_seconds) {
  151. long tmo = xp->lft.hard_use_expires_seconds +
  152. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  153. if (tmo <= 0)
  154. goto expired;
  155. if (tmo < next)
  156. next = tmo;
  157. }
  158. if (xp->lft.soft_add_expires_seconds) {
  159. long tmo = xp->lft.soft_add_expires_seconds +
  160. xp->curlft.add_time - now;
  161. if (tmo <= 0) {
  162. warn = 1;
  163. tmo = XFRM_KM_TIMEOUT;
  164. }
  165. if (tmo < next)
  166. next = tmo;
  167. }
  168. if (xp->lft.soft_use_expires_seconds) {
  169. long tmo = xp->lft.soft_use_expires_seconds +
  170. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  171. if (tmo <= 0) {
  172. warn = 1;
  173. tmo = XFRM_KM_TIMEOUT;
  174. }
  175. if (tmo < next)
  176. next = tmo;
  177. }
  178. if (warn)
  179. km_policy_expired(xp, dir, 0, 0);
  180. if (next != LONG_MAX &&
  181. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  182. xfrm_pol_hold(xp);
  183. out:
  184. read_unlock(&xp->lock);
  185. xfrm_pol_put(xp);
  186. return;
  187. expired:
  188. read_unlock(&xp->lock);
  189. if (!xfrm_policy_delete(xp, dir))
  190. km_policy_expired(xp, dir, 1, 0);
  191. xfrm_pol_put(xp);
  192. }
  193. static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
  194. {
  195. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  196. if (unlikely(pol->walk.dead))
  197. flo = NULL;
  198. else
  199. xfrm_pol_hold(pol);
  200. return flo;
  201. }
  202. static int xfrm_policy_flo_check(struct flow_cache_object *flo)
  203. {
  204. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  205. return !pol->walk.dead;
  206. }
  207. static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
  208. {
  209. xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
  210. }
  211. static const struct flow_cache_ops xfrm_policy_fc_ops = {
  212. .get = xfrm_policy_flo_get,
  213. .check = xfrm_policy_flo_check,
  214. .delete = xfrm_policy_flo_delete,
  215. };
  216. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  217. * SPD calls.
  218. */
  219. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  220. {
  221. struct xfrm_policy *policy;
  222. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  223. if (policy) {
  224. write_pnet(&policy->xp_net, net);
  225. INIT_LIST_HEAD(&policy->walk.all);
  226. INIT_HLIST_NODE(&policy->bydst);
  227. INIT_HLIST_NODE(&policy->byidx);
  228. rwlock_init(&policy->lock);
  229. atomic_set(&policy->refcnt, 1);
  230. setup_timer(&policy->timer, xfrm_policy_timer,
  231. (unsigned long)policy);
  232. policy->flo.ops = &xfrm_policy_fc_ops;
  233. }
  234. return policy;
  235. }
  236. EXPORT_SYMBOL(xfrm_policy_alloc);
  237. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  238. void xfrm_policy_destroy(struct xfrm_policy *policy)
  239. {
  240. BUG_ON(!policy->walk.dead);
  241. if (del_timer(&policy->timer))
  242. BUG();
  243. security_xfrm_policy_free(policy->security);
  244. kfree(policy);
  245. }
  246. EXPORT_SYMBOL(xfrm_policy_destroy);
  247. /* Rule must be locked. Release descentant resources, announce
  248. * entry dead. The rule must be unlinked from lists to the moment.
  249. */
  250. static void xfrm_policy_kill(struct xfrm_policy *policy)
  251. {
  252. policy->walk.dead = 1;
  253. atomic_inc(&policy->genid);
  254. if (del_timer(&policy->timer))
  255. xfrm_pol_put(policy);
  256. xfrm_pol_put(policy);
  257. }
  258. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  259. static inline unsigned int idx_hash(struct net *net, u32 index)
  260. {
  261. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  262. }
  263. static struct hlist_head *policy_hash_bysel(struct net *net,
  264. const struct xfrm_selector *sel,
  265. unsigned short family, int dir)
  266. {
  267. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  268. unsigned int hash = __sel_hash(sel, family, hmask);
  269. return (hash == hmask + 1 ?
  270. &net->xfrm.policy_inexact[dir] :
  271. net->xfrm.policy_bydst[dir].table + hash);
  272. }
  273. static struct hlist_head *policy_hash_direct(struct net *net,
  274. const xfrm_address_t *daddr,
  275. const xfrm_address_t *saddr,
  276. unsigned short family, int dir)
  277. {
  278. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  279. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  280. return net->xfrm.policy_bydst[dir].table + hash;
  281. }
  282. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  283. struct hlist_head *ndsttable,
  284. unsigned int nhashmask)
  285. {
  286. struct hlist_node *entry, *tmp, *entry0 = NULL;
  287. struct xfrm_policy *pol;
  288. unsigned int h0 = 0;
  289. redo:
  290. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  291. unsigned int h;
  292. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  293. pol->family, nhashmask);
  294. if (!entry0) {
  295. hlist_del(entry);
  296. hlist_add_head(&pol->bydst, ndsttable+h);
  297. h0 = h;
  298. } else {
  299. if (h != h0)
  300. continue;
  301. hlist_del(entry);
  302. hlist_add_after(entry0, &pol->bydst);
  303. }
  304. entry0 = entry;
  305. }
  306. if (!hlist_empty(list)) {
  307. entry0 = NULL;
  308. goto redo;
  309. }
  310. }
  311. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  312. struct hlist_head *nidxtable,
  313. unsigned int nhashmask)
  314. {
  315. struct hlist_node *entry, *tmp;
  316. struct xfrm_policy *pol;
  317. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  318. unsigned int h;
  319. h = __idx_hash(pol->index, nhashmask);
  320. hlist_add_head(&pol->byidx, nidxtable+h);
  321. }
  322. }
  323. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  324. {
  325. return ((old_hmask + 1) << 1) - 1;
  326. }
  327. static void xfrm_bydst_resize(struct net *net, int dir)
  328. {
  329. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  330. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  331. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  332. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  333. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  334. int i;
  335. if (!ndst)
  336. return;
  337. write_lock_bh(&xfrm_policy_lock);
  338. for (i = hmask; i >= 0; i--)
  339. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  340. net->xfrm.policy_bydst[dir].table = ndst;
  341. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  342. write_unlock_bh(&xfrm_policy_lock);
  343. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  344. }
  345. static void xfrm_byidx_resize(struct net *net, int total)
  346. {
  347. unsigned int hmask = net->xfrm.policy_idx_hmask;
  348. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  349. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  350. struct hlist_head *oidx = net->xfrm.policy_byidx;
  351. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  352. int i;
  353. if (!nidx)
  354. return;
  355. write_lock_bh(&xfrm_policy_lock);
  356. for (i = hmask; i >= 0; i--)
  357. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  358. net->xfrm.policy_byidx = nidx;
  359. net->xfrm.policy_idx_hmask = nhashmask;
  360. write_unlock_bh(&xfrm_policy_lock);
  361. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  362. }
  363. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  364. {
  365. unsigned int cnt = net->xfrm.policy_count[dir];
  366. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  367. if (total)
  368. *total += cnt;
  369. if ((hmask + 1) < xfrm_policy_hashmax &&
  370. cnt > hmask)
  371. return 1;
  372. return 0;
  373. }
  374. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  375. {
  376. unsigned int hmask = net->xfrm.policy_idx_hmask;
  377. if ((hmask + 1) < xfrm_policy_hashmax &&
  378. total > hmask)
  379. return 1;
  380. return 0;
  381. }
  382. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  383. {
  384. read_lock_bh(&xfrm_policy_lock);
  385. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  386. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  387. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  388. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  389. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  390. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  391. si->spdhcnt = net->xfrm.policy_idx_hmask;
  392. si->spdhmcnt = xfrm_policy_hashmax;
  393. read_unlock_bh(&xfrm_policy_lock);
  394. }
  395. EXPORT_SYMBOL(xfrm_spd_getinfo);
  396. static DEFINE_MUTEX(hash_resize_mutex);
  397. static void xfrm_hash_resize(struct work_struct *work)
  398. {
  399. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  400. int dir, total;
  401. mutex_lock(&hash_resize_mutex);
  402. total = 0;
  403. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  404. if (xfrm_bydst_should_resize(net, dir, &total))
  405. xfrm_bydst_resize(net, dir);
  406. }
  407. if (xfrm_byidx_should_resize(net, total))
  408. xfrm_byidx_resize(net, total);
  409. mutex_unlock(&hash_resize_mutex);
  410. }
  411. /* Generate new index... KAME seems to generate them ordered by cost
  412. * of an absolute inpredictability of ordering of rules. This will not pass. */
  413. static u32 xfrm_gen_index(struct net *net, int dir)
  414. {
  415. static u32 idx_generator;
  416. for (;;) {
  417. struct hlist_node *entry;
  418. struct hlist_head *list;
  419. struct xfrm_policy *p;
  420. u32 idx;
  421. int found;
  422. idx = (idx_generator | dir);
  423. idx_generator += 8;
  424. if (idx == 0)
  425. idx = 8;
  426. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  427. found = 0;
  428. hlist_for_each_entry(p, entry, list, byidx) {
  429. if (p->index == idx) {
  430. found = 1;
  431. break;
  432. }
  433. }
  434. if (!found)
  435. return idx;
  436. }
  437. }
  438. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  439. {
  440. u32 *p1 = (u32 *) s1;
  441. u32 *p2 = (u32 *) s2;
  442. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  443. int i;
  444. for (i = 0; i < len; i++) {
  445. if (p1[i] != p2[i])
  446. return 1;
  447. }
  448. return 0;
  449. }
  450. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  451. {
  452. struct net *net = xp_net(policy);
  453. struct xfrm_policy *pol;
  454. struct xfrm_policy *delpol;
  455. struct hlist_head *chain;
  456. struct hlist_node *entry, *newpos;
  457. u32 mark = policy->mark.v & policy->mark.m;
  458. write_lock_bh(&xfrm_policy_lock);
  459. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  460. delpol = NULL;
  461. newpos = NULL;
  462. hlist_for_each_entry(pol, entry, chain, bydst) {
  463. if (pol->type == policy->type &&
  464. !selector_cmp(&pol->selector, &policy->selector) &&
  465. (mark & pol->mark.m) == pol->mark.v &&
  466. xfrm_sec_ctx_match(pol->security, policy->security) &&
  467. !WARN_ON(delpol)) {
  468. if (excl) {
  469. write_unlock_bh(&xfrm_policy_lock);
  470. return -EEXIST;
  471. }
  472. delpol = pol;
  473. if (policy->priority > pol->priority)
  474. continue;
  475. } else if (policy->priority >= pol->priority) {
  476. newpos = &pol->bydst;
  477. continue;
  478. }
  479. if (delpol)
  480. break;
  481. }
  482. if (newpos)
  483. hlist_add_after(newpos, &policy->bydst);
  484. else
  485. hlist_add_head(&policy->bydst, chain);
  486. xfrm_pol_hold(policy);
  487. net->xfrm.policy_count[dir]++;
  488. atomic_inc(&flow_cache_genid);
  489. if (delpol)
  490. __xfrm_policy_unlink(delpol, dir);
  491. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
  492. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  493. policy->curlft.add_time = get_seconds();
  494. policy->curlft.use_time = 0;
  495. if (!mod_timer(&policy->timer, jiffies + HZ))
  496. xfrm_pol_hold(policy);
  497. list_add(&policy->walk.all, &net->xfrm.policy_all);
  498. write_unlock_bh(&xfrm_policy_lock);
  499. if (delpol)
  500. xfrm_policy_kill(delpol);
  501. else if (xfrm_bydst_should_resize(net, dir, NULL))
  502. schedule_work(&net->xfrm.policy_hash_work);
  503. return 0;
  504. }
  505. EXPORT_SYMBOL(xfrm_policy_insert);
  506. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
  507. int dir, struct xfrm_selector *sel,
  508. struct xfrm_sec_ctx *ctx, int delete,
  509. int *err)
  510. {
  511. struct xfrm_policy *pol, *ret;
  512. struct hlist_head *chain;
  513. struct hlist_node *entry;
  514. *err = 0;
  515. write_lock_bh(&xfrm_policy_lock);
  516. chain = policy_hash_bysel(net, sel, sel->family, dir);
  517. ret = NULL;
  518. hlist_for_each_entry(pol, entry, chain, bydst) {
  519. if (pol->type == type &&
  520. (mark & pol->mark.m) == pol->mark.v &&
  521. !selector_cmp(sel, &pol->selector) &&
  522. xfrm_sec_ctx_match(ctx, pol->security)) {
  523. xfrm_pol_hold(pol);
  524. if (delete) {
  525. *err = security_xfrm_policy_delete(
  526. pol->security);
  527. if (*err) {
  528. write_unlock_bh(&xfrm_policy_lock);
  529. return pol;
  530. }
  531. __xfrm_policy_unlink(pol, dir);
  532. }
  533. ret = pol;
  534. break;
  535. }
  536. }
  537. write_unlock_bh(&xfrm_policy_lock);
  538. if (ret && delete)
  539. xfrm_policy_kill(ret);
  540. return ret;
  541. }
  542. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  543. struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
  544. int dir, u32 id, int delete, int *err)
  545. {
  546. struct xfrm_policy *pol, *ret;
  547. struct hlist_head *chain;
  548. struct hlist_node *entry;
  549. *err = -ENOENT;
  550. if (xfrm_policy_id2dir(id) != dir)
  551. return NULL;
  552. *err = 0;
  553. write_lock_bh(&xfrm_policy_lock);
  554. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  555. ret = NULL;
  556. hlist_for_each_entry(pol, entry, chain, byidx) {
  557. if (pol->type == type && pol->index == id &&
  558. (mark & pol->mark.m) == pol->mark.v) {
  559. xfrm_pol_hold(pol);
  560. if (delete) {
  561. *err = security_xfrm_policy_delete(
  562. pol->security);
  563. if (*err) {
  564. write_unlock_bh(&xfrm_policy_lock);
  565. return pol;
  566. }
  567. __xfrm_policy_unlink(pol, dir);
  568. }
  569. ret = pol;
  570. break;
  571. }
  572. }
  573. write_unlock_bh(&xfrm_policy_lock);
  574. if (ret && delete)
  575. xfrm_policy_kill(ret);
  576. return ret;
  577. }
  578. EXPORT_SYMBOL(xfrm_policy_byid);
  579. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  580. static inline int
  581. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  582. {
  583. int dir, err = 0;
  584. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  585. struct xfrm_policy *pol;
  586. struct hlist_node *entry;
  587. int i;
  588. hlist_for_each_entry(pol, entry,
  589. &net->xfrm.policy_inexact[dir], bydst) {
  590. if (pol->type != type)
  591. continue;
  592. err = security_xfrm_policy_delete(pol->security);
  593. if (err) {
  594. xfrm_audit_policy_delete(pol, 0,
  595. audit_info->loginuid,
  596. audit_info->sessionid,
  597. audit_info->secid);
  598. return err;
  599. }
  600. }
  601. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  602. hlist_for_each_entry(pol, entry,
  603. net->xfrm.policy_bydst[dir].table + i,
  604. bydst) {
  605. if (pol->type != type)
  606. continue;
  607. err = security_xfrm_policy_delete(
  608. pol->security);
  609. if (err) {
  610. xfrm_audit_policy_delete(pol, 0,
  611. audit_info->loginuid,
  612. audit_info->sessionid,
  613. audit_info->secid);
  614. return err;
  615. }
  616. }
  617. }
  618. }
  619. return err;
  620. }
  621. #else
  622. static inline int
  623. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  624. {
  625. return 0;
  626. }
  627. #endif
  628. int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
  629. {
  630. int dir, err = 0, cnt = 0;
  631. write_lock_bh(&xfrm_policy_lock);
  632. err = xfrm_policy_flush_secctx_check(net, type, audit_info);
  633. if (err)
  634. goto out;
  635. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  636. struct xfrm_policy *pol;
  637. struct hlist_node *entry;
  638. int i;
  639. again1:
  640. hlist_for_each_entry(pol, entry,
  641. &net->xfrm.policy_inexact[dir], bydst) {
  642. if (pol->type != type)
  643. continue;
  644. __xfrm_policy_unlink(pol, dir);
  645. write_unlock_bh(&xfrm_policy_lock);
  646. cnt++;
  647. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  648. audit_info->sessionid,
  649. audit_info->secid);
  650. xfrm_policy_kill(pol);
  651. write_lock_bh(&xfrm_policy_lock);
  652. goto again1;
  653. }
  654. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  655. again2:
  656. hlist_for_each_entry(pol, entry,
  657. net->xfrm.policy_bydst[dir].table + i,
  658. bydst) {
  659. if (pol->type != type)
  660. continue;
  661. __xfrm_policy_unlink(pol, dir);
  662. write_unlock_bh(&xfrm_policy_lock);
  663. cnt++;
  664. xfrm_audit_policy_delete(pol, 1,
  665. audit_info->loginuid,
  666. audit_info->sessionid,
  667. audit_info->secid);
  668. xfrm_policy_kill(pol);
  669. write_lock_bh(&xfrm_policy_lock);
  670. goto again2;
  671. }
  672. }
  673. }
  674. if (!cnt)
  675. err = -ESRCH;
  676. out:
  677. write_unlock_bh(&xfrm_policy_lock);
  678. return err;
  679. }
  680. EXPORT_SYMBOL(xfrm_policy_flush);
  681. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  682. int (*func)(struct xfrm_policy *, int, int, void*),
  683. void *data)
  684. {
  685. struct xfrm_policy *pol;
  686. struct xfrm_policy_walk_entry *x;
  687. int error = 0;
  688. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  689. walk->type != XFRM_POLICY_TYPE_ANY)
  690. return -EINVAL;
  691. if (list_empty(&walk->walk.all) && walk->seq != 0)
  692. return 0;
  693. write_lock_bh(&xfrm_policy_lock);
  694. if (list_empty(&walk->walk.all))
  695. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  696. else
  697. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  698. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  699. if (x->dead)
  700. continue;
  701. pol = container_of(x, struct xfrm_policy, walk);
  702. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  703. walk->type != pol->type)
  704. continue;
  705. error = func(pol, xfrm_policy_id2dir(pol->index),
  706. walk->seq, data);
  707. if (error) {
  708. list_move_tail(&walk->walk.all, &x->all);
  709. goto out;
  710. }
  711. walk->seq++;
  712. }
  713. if (walk->seq == 0) {
  714. error = -ENOENT;
  715. goto out;
  716. }
  717. list_del_init(&walk->walk.all);
  718. out:
  719. write_unlock_bh(&xfrm_policy_lock);
  720. return error;
  721. }
  722. EXPORT_SYMBOL(xfrm_policy_walk);
  723. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  724. {
  725. INIT_LIST_HEAD(&walk->walk.all);
  726. walk->walk.dead = 1;
  727. walk->type = type;
  728. walk->seq = 0;
  729. }
  730. EXPORT_SYMBOL(xfrm_policy_walk_init);
  731. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  732. {
  733. if (list_empty(&walk->walk.all))
  734. return;
  735. write_lock_bh(&xfrm_policy_lock);
  736. list_del(&walk->walk.all);
  737. write_unlock_bh(&xfrm_policy_lock);
  738. }
  739. EXPORT_SYMBOL(xfrm_policy_walk_done);
  740. /*
  741. * Find policy to apply to this flow.
  742. *
  743. * Returns 0 if policy found, else an -errno.
  744. */
  745. static int xfrm_policy_match(const struct xfrm_policy *pol,
  746. const struct flowi *fl,
  747. u8 type, u16 family, int dir)
  748. {
  749. const struct xfrm_selector *sel = &pol->selector;
  750. int ret = -ESRCH;
  751. bool match;
  752. if (pol->family != family ||
  753. (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
  754. pol->type != type)
  755. return ret;
  756. match = xfrm_selector_match(sel, fl, family);
  757. if (match)
  758. ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
  759. dir);
  760. return ret;
  761. }
  762. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  763. const struct flowi *fl,
  764. u16 family, u8 dir)
  765. {
  766. int err;
  767. struct xfrm_policy *pol, *ret;
  768. const xfrm_address_t *daddr, *saddr;
  769. struct hlist_node *entry;
  770. struct hlist_head *chain;
  771. u32 priority = ~0U;
  772. daddr = xfrm_flowi_daddr(fl, family);
  773. saddr = xfrm_flowi_saddr(fl, family);
  774. if (unlikely(!daddr || !saddr))
  775. return NULL;
  776. read_lock_bh(&xfrm_policy_lock);
  777. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  778. ret = NULL;
  779. hlist_for_each_entry(pol, entry, chain, bydst) {
  780. err = xfrm_policy_match(pol, fl, type, family, dir);
  781. if (err) {
  782. if (err == -ESRCH)
  783. continue;
  784. else {
  785. ret = ERR_PTR(err);
  786. goto fail;
  787. }
  788. } else {
  789. ret = pol;
  790. priority = ret->priority;
  791. break;
  792. }
  793. }
  794. chain = &net->xfrm.policy_inexact[dir];
  795. hlist_for_each_entry(pol, entry, chain, bydst) {
  796. err = xfrm_policy_match(pol, fl, type, family, dir);
  797. if (err) {
  798. if (err == -ESRCH)
  799. continue;
  800. else {
  801. ret = ERR_PTR(err);
  802. goto fail;
  803. }
  804. } else if (pol->priority < priority) {
  805. ret = pol;
  806. break;
  807. }
  808. }
  809. if (ret)
  810. xfrm_pol_hold(ret);
  811. fail:
  812. read_unlock_bh(&xfrm_policy_lock);
  813. return ret;
  814. }
  815. static struct xfrm_policy *
  816. __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
  817. {
  818. #ifdef CONFIG_XFRM_SUB_POLICY
  819. struct xfrm_policy *pol;
  820. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  821. if (pol != NULL)
  822. return pol;
  823. #endif
  824. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  825. }
  826. static struct flow_cache_object *
  827. xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
  828. u8 dir, struct flow_cache_object *old_obj, void *ctx)
  829. {
  830. struct xfrm_policy *pol;
  831. if (old_obj)
  832. xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
  833. pol = __xfrm_policy_lookup(net, fl, family, dir);
  834. if (IS_ERR_OR_NULL(pol))
  835. return ERR_CAST(pol);
  836. /* Resolver returns two references:
  837. * one for cache and one for caller of flow_cache_lookup() */
  838. xfrm_pol_hold(pol);
  839. return &pol->flo;
  840. }
  841. static inline int policy_to_flow_dir(int dir)
  842. {
  843. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  844. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  845. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  846. return dir;
  847. switch (dir) {
  848. default:
  849. case XFRM_POLICY_IN:
  850. return FLOW_DIR_IN;
  851. case XFRM_POLICY_OUT:
  852. return FLOW_DIR_OUT;
  853. case XFRM_POLICY_FWD:
  854. return FLOW_DIR_FWD;
  855. }
  856. }
  857. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
  858. const struct flowi *fl)
  859. {
  860. struct xfrm_policy *pol;
  861. read_lock_bh(&xfrm_policy_lock);
  862. if ((pol = sk->sk_policy[dir]) != NULL) {
  863. bool match = xfrm_selector_match(&pol->selector, fl,
  864. sk->sk_family);
  865. int err = 0;
  866. if (match) {
  867. if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
  868. pol = NULL;
  869. goto out;
  870. }
  871. err = security_xfrm_policy_lookup(pol->security,
  872. fl->flowi_secid,
  873. policy_to_flow_dir(dir));
  874. if (!err)
  875. xfrm_pol_hold(pol);
  876. else if (err == -ESRCH)
  877. pol = NULL;
  878. else
  879. pol = ERR_PTR(err);
  880. } else
  881. pol = NULL;
  882. }
  883. out:
  884. read_unlock_bh(&xfrm_policy_lock);
  885. return pol;
  886. }
  887. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  888. {
  889. struct net *net = xp_net(pol);
  890. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  891. pol->family, dir);
  892. list_add(&pol->walk.all, &net->xfrm.policy_all);
  893. hlist_add_head(&pol->bydst, chain);
  894. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  895. net->xfrm.policy_count[dir]++;
  896. xfrm_pol_hold(pol);
  897. if (xfrm_bydst_should_resize(net, dir, NULL))
  898. schedule_work(&net->xfrm.policy_hash_work);
  899. }
  900. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  901. int dir)
  902. {
  903. struct net *net = xp_net(pol);
  904. if (hlist_unhashed(&pol->bydst))
  905. return NULL;
  906. hlist_del(&pol->bydst);
  907. hlist_del(&pol->byidx);
  908. list_del(&pol->walk.all);
  909. net->xfrm.policy_count[dir]--;
  910. return pol;
  911. }
  912. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  913. {
  914. write_lock_bh(&xfrm_policy_lock);
  915. pol = __xfrm_policy_unlink(pol, dir);
  916. write_unlock_bh(&xfrm_policy_lock);
  917. if (pol) {
  918. xfrm_policy_kill(pol);
  919. return 0;
  920. }
  921. return -ENOENT;
  922. }
  923. EXPORT_SYMBOL(xfrm_policy_delete);
  924. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  925. {
  926. struct net *net = xp_net(pol);
  927. struct xfrm_policy *old_pol;
  928. #ifdef CONFIG_XFRM_SUB_POLICY
  929. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  930. return -EINVAL;
  931. #endif
  932. write_lock_bh(&xfrm_policy_lock);
  933. old_pol = sk->sk_policy[dir];
  934. sk->sk_policy[dir] = pol;
  935. if (pol) {
  936. pol->curlft.add_time = get_seconds();
  937. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
  938. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  939. }
  940. if (old_pol)
  941. /* Unlinking succeeds always. This is the only function
  942. * allowed to delete or replace socket policy.
  943. */
  944. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  945. write_unlock_bh(&xfrm_policy_lock);
  946. if (old_pol) {
  947. xfrm_policy_kill(old_pol);
  948. }
  949. return 0;
  950. }
  951. static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
  952. {
  953. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  954. if (newp) {
  955. newp->selector = old->selector;
  956. if (security_xfrm_policy_clone(old->security,
  957. &newp->security)) {
  958. kfree(newp);
  959. return NULL; /* ENOMEM */
  960. }
  961. newp->lft = old->lft;
  962. newp->curlft = old->curlft;
  963. newp->mark = old->mark;
  964. newp->action = old->action;
  965. newp->flags = old->flags;
  966. newp->xfrm_nr = old->xfrm_nr;
  967. newp->index = old->index;
  968. newp->type = old->type;
  969. memcpy(newp->xfrm_vec, old->xfrm_vec,
  970. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  971. write_lock_bh(&xfrm_policy_lock);
  972. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  973. write_unlock_bh(&xfrm_policy_lock);
  974. xfrm_pol_put(newp);
  975. }
  976. return newp;
  977. }
  978. int __xfrm_sk_clone_policy(struct sock *sk)
  979. {
  980. struct xfrm_policy *p0 = sk->sk_policy[0],
  981. *p1 = sk->sk_policy[1];
  982. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  983. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  984. return -ENOMEM;
  985. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  986. return -ENOMEM;
  987. return 0;
  988. }
  989. static int
  990. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  991. unsigned short family)
  992. {
  993. int err;
  994. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  995. if (unlikely(afinfo == NULL))
  996. return -EINVAL;
  997. err = afinfo->get_saddr(net, local, remote);
  998. xfrm_policy_put_afinfo(afinfo);
  999. return err;
  1000. }
  1001. /* Resolve list of templates for the flow, given policy. */
  1002. static int
  1003. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
  1004. struct xfrm_state **xfrm, unsigned short family)
  1005. {
  1006. struct net *net = xp_net(policy);
  1007. int nx;
  1008. int i, error;
  1009. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1010. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1011. xfrm_address_t tmp;
  1012. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1013. struct xfrm_state *x;
  1014. xfrm_address_t *remote = daddr;
  1015. xfrm_address_t *local = saddr;
  1016. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1017. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1018. tmpl->mode == XFRM_MODE_BEET) {
  1019. remote = &tmpl->id.daddr;
  1020. local = &tmpl->saddr;
  1021. if (xfrm_addr_any(local, tmpl->encap_family)) {
  1022. error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
  1023. if (error)
  1024. goto fail;
  1025. local = &tmp;
  1026. }
  1027. }
  1028. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1029. if (x && x->km.state == XFRM_STATE_VALID) {
  1030. xfrm[nx++] = x;
  1031. daddr = remote;
  1032. saddr = local;
  1033. continue;
  1034. }
  1035. if (x) {
  1036. error = (x->km.state == XFRM_STATE_ERROR ?
  1037. -EINVAL : -EAGAIN);
  1038. xfrm_state_put(x);
  1039. }
  1040. else if (error == -ESRCH)
  1041. error = -EAGAIN;
  1042. if (!tmpl->optional)
  1043. goto fail;
  1044. }
  1045. return nx;
  1046. fail:
  1047. for (nx--; nx>=0; nx--)
  1048. xfrm_state_put(xfrm[nx]);
  1049. return error;
  1050. }
  1051. static int
  1052. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
  1053. struct xfrm_state **xfrm, unsigned short family)
  1054. {
  1055. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1056. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1057. int cnx = 0;
  1058. int error;
  1059. int ret;
  1060. int i;
  1061. for (i = 0; i < npols; i++) {
  1062. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1063. error = -ENOBUFS;
  1064. goto fail;
  1065. }
  1066. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1067. if (ret < 0) {
  1068. error = ret;
  1069. goto fail;
  1070. } else
  1071. cnx += ret;
  1072. }
  1073. /* found states are sorted for outbound processing */
  1074. if (npols > 1)
  1075. xfrm_state_sort(xfrm, tpp, cnx, family);
  1076. return cnx;
  1077. fail:
  1078. for (cnx--; cnx>=0; cnx--)
  1079. xfrm_state_put(tpp[cnx]);
  1080. return error;
  1081. }
  1082. /* Check that the bundle accepts the flow and its components are
  1083. * still valid.
  1084. */
  1085. static inline int xfrm_get_tos(const struct flowi *fl, int family)
  1086. {
  1087. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1088. int tos;
  1089. if (!afinfo)
  1090. return -EINVAL;
  1091. tos = afinfo->get_tos(fl);
  1092. xfrm_policy_put_afinfo(afinfo);
  1093. return tos;
  1094. }
  1095. static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
  1096. {
  1097. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1098. struct dst_entry *dst = &xdst->u.dst;
  1099. if (xdst->route == NULL) {
  1100. /* Dummy bundle - if it has xfrms we were not
  1101. * able to build bundle as template resolution failed.
  1102. * It means we need to try again resolving. */
  1103. if (xdst->num_xfrms > 0)
  1104. return NULL;
  1105. } else {
  1106. /* Real bundle */
  1107. if (stale_bundle(dst))
  1108. return NULL;
  1109. }
  1110. dst_hold(dst);
  1111. return flo;
  1112. }
  1113. static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
  1114. {
  1115. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1116. struct dst_entry *dst = &xdst->u.dst;
  1117. if (!xdst->route)
  1118. return 0;
  1119. if (stale_bundle(dst))
  1120. return 0;
  1121. return 1;
  1122. }
  1123. static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
  1124. {
  1125. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1126. struct dst_entry *dst = &xdst->u.dst;
  1127. dst_free(dst);
  1128. }
  1129. static const struct flow_cache_ops xfrm_bundle_fc_ops = {
  1130. .get = xfrm_bundle_flo_get,
  1131. .check = xfrm_bundle_flo_check,
  1132. .delete = xfrm_bundle_flo_delete,
  1133. };
  1134. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  1135. {
  1136. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1137. struct dst_ops *dst_ops;
  1138. struct xfrm_dst *xdst;
  1139. if (!afinfo)
  1140. return ERR_PTR(-EINVAL);
  1141. switch (family) {
  1142. case AF_INET:
  1143. dst_ops = &net->xfrm.xfrm4_dst_ops;
  1144. break;
  1145. #if IS_ENABLED(CONFIG_IPV6)
  1146. case AF_INET6:
  1147. dst_ops = &net->xfrm.xfrm6_dst_ops;
  1148. break;
  1149. #endif
  1150. default:
  1151. BUG();
  1152. }
  1153. xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
  1154. if (likely(xdst)) {
  1155. struct dst_entry *dst = &xdst->u.dst;
  1156. memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
  1157. xdst->flo.ops = &xfrm_bundle_fc_ops;
  1158. } else
  1159. xdst = ERR_PTR(-ENOBUFS);
  1160. xfrm_policy_put_afinfo(afinfo);
  1161. return xdst;
  1162. }
  1163. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1164. int nfheader_len)
  1165. {
  1166. struct xfrm_policy_afinfo *afinfo =
  1167. xfrm_policy_get_afinfo(dst->ops->family);
  1168. int err;
  1169. if (!afinfo)
  1170. return -EINVAL;
  1171. err = afinfo->init_path(path, dst, nfheader_len);
  1172. xfrm_policy_put_afinfo(afinfo);
  1173. return err;
  1174. }
  1175. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  1176. const struct flowi *fl)
  1177. {
  1178. struct xfrm_policy_afinfo *afinfo =
  1179. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1180. int err;
  1181. if (!afinfo)
  1182. return -EINVAL;
  1183. err = afinfo->fill_dst(xdst, dev, fl);
  1184. xfrm_policy_put_afinfo(afinfo);
  1185. return err;
  1186. }
  1187. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1188. * all the metrics... Shortly, bundle a bundle.
  1189. */
  1190. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1191. struct xfrm_state **xfrm, int nx,
  1192. const struct flowi *fl,
  1193. struct dst_entry *dst)
  1194. {
  1195. struct net *net = xp_net(policy);
  1196. unsigned long now = jiffies;
  1197. struct net_device *dev;
  1198. struct xfrm_mode *inner_mode;
  1199. struct dst_entry *dst_prev = NULL;
  1200. struct dst_entry *dst0 = NULL;
  1201. int i = 0;
  1202. int err;
  1203. int header_len = 0;
  1204. int nfheader_len = 0;
  1205. int trailer_len = 0;
  1206. int tos;
  1207. int family = policy->selector.family;
  1208. xfrm_address_t saddr, daddr;
  1209. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1210. tos = xfrm_get_tos(fl, family);
  1211. err = tos;
  1212. if (tos < 0)
  1213. goto put_states;
  1214. dst_hold(dst);
  1215. for (; i < nx; i++) {
  1216. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  1217. struct dst_entry *dst1 = &xdst->u.dst;
  1218. err = PTR_ERR(xdst);
  1219. if (IS_ERR(xdst)) {
  1220. dst_release(dst);
  1221. goto put_states;
  1222. }
  1223. if (xfrm[i]->sel.family == AF_UNSPEC) {
  1224. inner_mode = xfrm_ip2inner_mode(xfrm[i],
  1225. xfrm_af2proto(family));
  1226. if (!inner_mode) {
  1227. err = -EAFNOSUPPORT;
  1228. dst_release(dst);
  1229. goto put_states;
  1230. }
  1231. } else
  1232. inner_mode = xfrm[i]->inner_mode;
  1233. if (!dst_prev)
  1234. dst0 = dst1;
  1235. else {
  1236. dst_prev->child = dst_clone(dst1);
  1237. dst1->flags |= DST_NOHASH;
  1238. }
  1239. xdst->route = dst;
  1240. dst_copy_metrics(dst1, dst);
  1241. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1242. family = xfrm[i]->props.family;
  1243. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1244. family);
  1245. err = PTR_ERR(dst);
  1246. if (IS_ERR(dst))
  1247. goto put_states;
  1248. } else
  1249. dst_hold(dst);
  1250. dst1->xfrm = xfrm[i];
  1251. xdst->xfrm_genid = xfrm[i]->genid;
  1252. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  1253. dst1->flags |= DST_HOST;
  1254. dst1->lastuse = now;
  1255. dst1->input = dst_discard;
  1256. dst1->output = inner_mode->afinfo->output;
  1257. dst1->next = dst_prev;
  1258. dst_prev = dst1;
  1259. header_len += xfrm[i]->props.header_len;
  1260. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1261. nfheader_len += xfrm[i]->props.header_len;
  1262. trailer_len += xfrm[i]->props.trailer_len;
  1263. }
  1264. dst_prev->child = dst;
  1265. dst0->path = dst;
  1266. err = -ENODEV;
  1267. dev = dst->dev;
  1268. if (!dev)
  1269. goto free_dst;
  1270. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1271. xfrm_init_pmtu(dst_prev);
  1272. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1273. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1274. err = xfrm_fill_dst(xdst, dev, fl);
  1275. if (err)
  1276. goto free_dst;
  1277. dst_prev->header_len = header_len;
  1278. dst_prev->trailer_len = trailer_len;
  1279. header_len -= xdst->u.dst.xfrm->props.header_len;
  1280. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1281. }
  1282. out:
  1283. return dst0;
  1284. put_states:
  1285. for (; i < nx; i++)
  1286. xfrm_state_put(xfrm[i]);
  1287. free_dst:
  1288. if (dst0)
  1289. dst_free(dst0);
  1290. dst0 = ERR_PTR(err);
  1291. goto out;
  1292. }
  1293. static int inline
  1294. xfrm_dst_alloc_copy(void **target, const void *src, int size)
  1295. {
  1296. if (!*target) {
  1297. *target = kmalloc(size, GFP_ATOMIC);
  1298. if (!*target)
  1299. return -ENOMEM;
  1300. }
  1301. memcpy(*target, src, size);
  1302. return 0;
  1303. }
  1304. static int inline
  1305. xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
  1306. {
  1307. #ifdef CONFIG_XFRM_SUB_POLICY
  1308. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1309. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1310. sel, sizeof(*sel));
  1311. #else
  1312. return 0;
  1313. #endif
  1314. }
  1315. static int inline
  1316. xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
  1317. {
  1318. #ifdef CONFIG_XFRM_SUB_POLICY
  1319. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1320. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1321. #else
  1322. return 0;
  1323. #endif
  1324. }
  1325. static int xfrm_expand_policies(const struct flowi *fl, u16 family,
  1326. struct xfrm_policy **pols,
  1327. int *num_pols, int *num_xfrms)
  1328. {
  1329. int i;
  1330. if (*num_pols == 0 || !pols[0]) {
  1331. *num_pols = 0;
  1332. *num_xfrms = 0;
  1333. return 0;
  1334. }
  1335. if (IS_ERR(pols[0]))
  1336. return PTR_ERR(pols[0]);
  1337. *num_xfrms = pols[0]->xfrm_nr;
  1338. #ifdef CONFIG_XFRM_SUB_POLICY
  1339. if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
  1340. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1341. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  1342. XFRM_POLICY_TYPE_MAIN,
  1343. fl, family,
  1344. XFRM_POLICY_OUT);
  1345. if (pols[1]) {
  1346. if (IS_ERR(pols[1])) {
  1347. xfrm_pols_put(pols, *num_pols);
  1348. return PTR_ERR(pols[1]);
  1349. }
  1350. (*num_pols) ++;
  1351. (*num_xfrms) += pols[1]->xfrm_nr;
  1352. }
  1353. }
  1354. #endif
  1355. for (i = 0; i < *num_pols; i++) {
  1356. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  1357. *num_xfrms = -1;
  1358. break;
  1359. }
  1360. }
  1361. return 0;
  1362. }
  1363. static struct xfrm_dst *
  1364. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  1365. const struct flowi *fl, u16 family,
  1366. struct dst_entry *dst_orig)
  1367. {
  1368. struct net *net = xp_net(pols[0]);
  1369. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1370. struct dst_entry *dst;
  1371. struct xfrm_dst *xdst;
  1372. int err;
  1373. /* Try to instantiate a bundle */
  1374. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  1375. if (err <= 0) {
  1376. if (err != 0 && err != -EAGAIN)
  1377. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1378. return ERR_PTR(err);
  1379. }
  1380. dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
  1381. if (IS_ERR(dst)) {
  1382. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1383. return ERR_CAST(dst);
  1384. }
  1385. xdst = (struct xfrm_dst *)dst;
  1386. xdst->num_xfrms = err;
  1387. if (num_pols > 1)
  1388. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1389. else
  1390. err = xfrm_dst_update_origin(dst, fl);
  1391. if (unlikely(err)) {
  1392. dst_free(dst);
  1393. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1394. return ERR_PTR(err);
  1395. }
  1396. xdst->num_pols = num_pols;
  1397. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1398. xdst->policy_genid = atomic_read(&pols[0]->genid);
  1399. return xdst;
  1400. }
  1401. static struct flow_cache_object *
  1402. xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
  1403. struct flow_cache_object *oldflo, void *ctx)
  1404. {
  1405. struct dst_entry *dst_orig = (struct dst_entry *)ctx;
  1406. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1407. struct xfrm_dst *xdst, *new_xdst;
  1408. int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
  1409. /* Check if the policies from old bundle are usable */
  1410. xdst = NULL;
  1411. if (oldflo) {
  1412. xdst = container_of(oldflo, struct xfrm_dst, flo);
  1413. num_pols = xdst->num_pols;
  1414. num_xfrms = xdst->num_xfrms;
  1415. pol_dead = 0;
  1416. for (i = 0; i < num_pols; i++) {
  1417. pols[i] = xdst->pols[i];
  1418. pol_dead |= pols[i]->walk.dead;
  1419. }
  1420. if (pol_dead) {
  1421. dst_free(&xdst->u.dst);
  1422. xdst = NULL;
  1423. num_pols = 0;
  1424. num_xfrms = 0;
  1425. oldflo = NULL;
  1426. }
  1427. }
  1428. /* Resolve policies to use if we couldn't get them from
  1429. * previous cache entry */
  1430. if (xdst == NULL) {
  1431. num_pols = 1;
  1432. pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
  1433. err = xfrm_expand_policies(fl, family, pols,
  1434. &num_pols, &num_xfrms);
  1435. if (err < 0)
  1436. goto inc_error;
  1437. if (num_pols == 0)
  1438. return NULL;
  1439. if (num_xfrms <= 0)
  1440. goto make_dummy_bundle;
  1441. }
  1442. new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
  1443. if (IS_ERR(new_xdst)) {
  1444. err = PTR_ERR(new_xdst);
  1445. if (err != -EAGAIN)
  1446. goto error;
  1447. if (oldflo == NULL)
  1448. goto make_dummy_bundle;
  1449. dst_hold(&xdst->u.dst);
  1450. return oldflo;
  1451. } else if (new_xdst == NULL) {
  1452. num_xfrms = 0;
  1453. if (oldflo == NULL)
  1454. goto make_dummy_bundle;
  1455. xdst->num_xfrms = 0;
  1456. dst_hold(&xdst->u.dst);
  1457. return oldflo;
  1458. }
  1459. /* Kill the previous bundle */
  1460. if (xdst) {
  1461. /* The policies were stolen for newly generated bundle */
  1462. xdst->num_pols = 0;
  1463. dst_free(&xdst->u.dst);
  1464. }
  1465. /* Flow cache does not have reference, it dst_free()'s,
  1466. * but we do need to return one reference for original caller */
  1467. dst_hold(&new_xdst->u.dst);
  1468. return &new_xdst->flo;
  1469. make_dummy_bundle:
  1470. /* We found policies, but there's no bundles to instantiate:
  1471. * either because the policy blocks, has no transformations or
  1472. * we could not build template (no xfrm_states).*/
  1473. xdst = xfrm_alloc_dst(net, family);
  1474. if (IS_ERR(xdst)) {
  1475. xfrm_pols_put(pols, num_pols);
  1476. return ERR_CAST(xdst);
  1477. }
  1478. xdst->num_pols = num_pols;
  1479. xdst->num_xfrms = num_xfrms;
  1480. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
  1481. dst_hold(&xdst->u.dst);
  1482. return &xdst->flo;
  1483. inc_error:
  1484. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1485. error:
  1486. if (xdst != NULL)
  1487. dst_free(&xdst->u.dst);
  1488. else
  1489. xfrm_pols_put(pols, num_pols);
  1490. return ERR_PTR(err);
  1491. }
  1492. static struct dst_entry *make_blackhole(struct net *net, u16 family,
  1493. struct dst_entry *dst_orig)
  1494. {
  1495. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1496. struct dst_entry *ret;
  1497. if (!afinfo) {
  1498. dst_release(dst_orig);
  1499. ret = ERR_PTR(-EINVAL);
  1500. } else {
  1501. ret = afinfo->blackhole_route(net, dst_orig);
  1502. }
  1503. xfrm_policy_put_afinfo(afinfo);
  1504. return ret;
  1505. }
  1506. /* Main function: finds/creates a bundle for given flow.
  1507. *
  1508. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1509. * on interfaces with disabled IPsec.
  1510. */
  1511. struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
  1512. const struct flowi *fl,
  1513. struct sock *sk, int flags)
  1514. {
  1515. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1516. struct flow_cache_object *flo;
  1517. struct xfrm_dst *xdst;
  1518. struct dst_entry *dst, *route;
  1519. u16 family = dst_orig->ops->family;
  1520. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1521. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  1522. restart:
  1523. dst = NULL;
  1524. xdst = NULL;
  1525. route = NULL;
  1526. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1527. num_pols = 1;
  1528. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1529. err = xfrm_expand_policies(fl, family, pols,
  1530. &num_pols, &num_xfrms);
  1531. if (err < 0)
  1532. goto dropdst;
  1533. if (num_pols) {
  1534. if (num_xfrms <= 0) {
  1535. drop_pols = num_pols;
  1536. goto no_transform;
  1537. }
  1538. xdst = xfrm_resolve_and_create_bundle(
  1539. pols, num_pols, fl,
  1540. family, dst_orig);
  1541. if (IS_ERR(xdst)) {
  1542. xfrm_pols_put(pols, num_pols);
  1543. err = PTR_ERR(xdst);
  1544. goto dropdst;
  1545. } else if (xdst == NULL) {
  1546. num_xfrms = 0;
  1547. drop_pols = num_pols;
  1548. goto no_transform;
  1549. }
  1550. dst_hold(&xdst->u.dst);
  1551. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1552. xdst->u.dst.next = xfrm_policy_sk_bundles;
  1553. xfrm_policy_sk_bundles = &xdst->u.dst;
  1554. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1555. route = xdst->route;
  1556. }
  1557. }
  1558. if (xdst == NULL) {
  1559. /* To accelerate a bit... */
  1560. if ((dst_orig->flags & DST_NOXFRM) ||
  1561. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1562. goto nopol;
  1563. flo = flow_cache_lookup(net, fl, family, dir,
  1564. xfrm_bundle_lookup, dst_orig);
  1565. if (flo == NULL)
  1566. goto nopol;
  1567. if (IS_ERR(flo)) {
  1568. err = PTR_ERR(flo);
  1569. goto dropdst;
  1570. }
  1571. xdst = container_of(flo, struct xfrm_dst, flo);
  1572. num_pols = xdst->num_pols;
  1573. num_xfrms = xdst->num_xfrms;
  1574. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
  1575. route = xdst->route;
  1576. }
  1577. dst = &xdst->u.dst;
  1578. if (route == NULL && num_xfrms > 0) {
  1579. /* The only case when xfrm_bundle_lookup() returns a
  1580. * bundle with null route, is when the template could
  1581. * not be resolved. It means policies are there, but
  1582. * bundle could not be created, since we don't yet
  1583. * have the xfrm_state's. We need to wait for KM to
  1584. * negotiate new SA's or bail out with error.*/
  1585. if (net->xfrm.sysctl_larval_drop) {
  1586. /* EREMOTE tells the caller to generate
  1587. * a one-shot blackhole route. */
  1588. dst_release(dst);
  1589. xfrm_pols_put(pols, drop_pols);
  1590. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1591. return make_blackhole(net, family, dst_orig);
  1592. }
  1593. if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
  1594. DECLARE_WAITQUEUE(wait, current);
  1595. add_wait_queue(&net->xfrm.km_waitq, &wait);
  1596. set_current_state(TASK_INTERRUPTIBLE);
  1597. schedule();
  1598. set_current_state(TASK_RUNNING);
  1599. remove_wait_queue(&net->xfrm.km_waitq, &wait);
  1600. if (!signal_pending(current)) {
  1601. dst_release(dst);
  1602. goto restart;
  1603. }
  1604. err = -ERESTART;
  1605. } else
  1606. err = -EAGAIN;
  1607. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1608. goto error;
  1609. }
  1610. no_transform:
  1611. if (num_pols == 0)
  1612. goto nopol;
  1613. if ((flags & XFRM_LOOKUP_ICMP) &&
  1614. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  1615. err = -ENOENT;
  1616. goto error;
  1617. }
  1618. for (i = 0; i < num_pols; i++)
  1619. pols[i]->curlft.use_time = get_seconds();
  1620. if (num_xfrms < 0) {
  1621. /* Prohibit the flow */
  1622. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1623. err = -EPERM;
  1624. goto error;
  1625. } else if (num_xfrms > 0) {
  1626. /* Flow transformed */
  1627. dst_release(dst_orig);
  1628. } else {
  1629. /* Flow passes untransformed */
  1630. dst_release(dst);
  1631. dst = dst_orig;
  1632. }
  1633. ok:
  1634. xfrm_pols_put(pols, drop_pols);
  1635. if (dst && dst->xfrm &&
  1636. dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
  1637. dst->flags |= DST_XFRM_TUNNEL;
  1638. return dst;
  1639. nopol:
  1640. if (!(flags & XFRM_LOOKUP_ICMP)) {
  1641. dst = dst_orig;
  1642. goto ok;
  1643. }
  1644. err = -ENOENT;
  1645. error:
  1646. dst_release(dst);
  1647. dropdst:
  1648. dst_release(dst_orig);
  1649. xfrm_pols_put(pols, drop_pols);
  1650. return ERR_PTR(err);
  1651. }
  1652. EXPORT_SYMBOL(xfrm_lookup);
  1653. static inline int
  1654. xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
  1655. {
  1656. struct xfrm_state *x;
  1657. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1658. return 0;
  1659. x = skb->sp->xvec[idx];
  1660. if (!x->type->reject)
  1661. return 0;
  1662. return x->type->reject(x, skb, fl);
  1663. }
  1664. /* When skb is transformed back to its "native" form, we have to
  1665. * check policy restrictions. At the moment we make this in maximally
  1666. * stupid way. Shame on me. :-) Of course, connected sockets must
  1667. * have policy cached at them.
  1668. */
  1669. static inline int
  1670. xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  1671. unsigned short family)
  1672. {
  1673. if (xfrm_state_kern(x))
  1674. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1675. return x->id.proto == tmpl->id.proto &&
  1676. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1677. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1678. x->props.mode == tmpl->mode &&
  1679. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1680. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1681. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1682. xfrm_state_addr_cmp(tmpl, x, family));
  1683. }
  1684. /*
  1685. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1686. * because of optional transport mode, or next index of the mathced secpath
  1687. * state with the template.
  1688. * -1 is returned when no matching template is found.
  1689. * Otherwise "-2 - errored_index" is returned.
  1690. */
  1691. static inline int
  1692. xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
  1693. unsigned short family)
  1694. {
  1695. int idx = start;
  1696. if (tmpl->optional) {
  1697. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1698. return start;
  1699. } else
  1700. start = -1;
  1701. for (; idx < sp->len; idx++) {
  1702. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1703. return ++idx;
  1704. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1705. if (start == -1)
  1706. start = -2-idx;
  1707. break;
  1708. }
  1709. }
  1710. return start;
  1711. }
  1712. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1713. unsigned int family, int reverse)
  1714. {
  1715. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1716. int err;
  1717. if (unlikely(afinfo == NULL))
  1718. return -EAFNOSUPPORT;
  1719. afinfo->decode_session(skb, fl, reverse);
  1720. err = security_xfrm_decode_session(skb, &fl->flowi_secid);
  1721. xfrm_policy_put_afinfo(afinfo);
  1722. return err;
  1723. }
  1724. EXPORT_SYMBOL(__xfrm_decode_session);
  1725. static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
  1726. {
  1727. for (; k < sp->len; k++) {
  1728. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1729. *idxp = k;
  1730. return 1;
  1731. }
  1732. }
  1733. return 0;
  1734. }
  1735. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1736. unsigned short family)
  1737. {
  1738. struct net *net = dev_net(skb->dev);
  1739. struct xfrm_policy *pol;
  1740. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1741. int npols = 0;
  1742. int xfrm_nr;
  1743. int pi;
  1744. int reverse;
  1745. struct flowi fl;
  1746. u8 fl_dir;
  1747. int xerr_idx = -1;
  1748. reverse = dir & ~XFRM_POLICY_MASK;
  1749. dir &= XFRM_POLICY_MASK;
  1750. fl_dir = policy_to_flow_dir(dir);
  1751. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1752. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  1753. return 0;
  1754. }
  1755. nf_nat_decode_session(skb, &fl, family);
  1756. /* First, check used SA against their selectors. */
  1757. if (skb->sp) {
  1758. int i;
  1759. for (i=skb->sp->len-1; i>=0; i--) {
  1760. struct xfrm_state *x = skb->sp->xvec[i];
  1761. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1762. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  1763. return 0;
  1764. }
  1765. }
  1766. }
  1767. pol = NULL;
  1768. if (sk && sk->sk_policy[dir]) {
  1769. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1770. if (IS_ERR(pol)) {
  1771. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1772. return 0;
  1773. }
  1774. }
  1775. if (!pol) {
  1776. struct flow_cache_object *flo;
  1777. flo = flow_cache_lookup(net, &fl, family, fl_dir,
  1778. xfrm_policy_lookup, NULL);
  1779. if (IS_ERR_OR_NULL(flo))
  1780. pol = ERR_CAST(flo);
  1781. else
  1782. pol = container_of(flo, struct xfrm_policy, flo);
  1783. }
  1784. if (IS_ERR(pol)) {
  1785. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1786. return 0;
  1787. }
  1788. if (!pol) {
  1789. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1790. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1791. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  1792. return 0;
  1793. }
  1794. return 1;
  1795. }
  1796. pol->curlft.use_time = get_seconds();
  1797. pols[0] = pol;
  1798. npols ++;
  1799. #ifdef CONFIG_XFRM_SUB_POLICY
  1800. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1801. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1802. &fl, family,
  1803. XFRM_POLICY_IN);
  1804. if (pols[1]) {
  1805. if (IS_ERR(pols[1])) {
  1806. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1807. return 0;
  1808. }
  1809. pols[1]->curlft.use_time = get_seconds();
  1810. npols ++;
  1811. }
  1812. }
  1813. #endif
  1814. if (pol->action == XFRM_POLICY_ALLOW) {
  1815. struct sec_path *sp;
  1816. static struct sec_path dummy;
  1817. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1818. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1819. struct xfrm_tmpl **tpp = tp;
  1820. int ti = 0;
  1821. int i, k;
  1822. if ((sp = skb->sp) == NULL)
  1823. sp = &dummy;
  1824. for (pi = 0; pi < npols; pi++) {
  1825. if (pols[pi] != pol &&
  1826. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1827. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1828. goto reject;
  1829. }
  1830. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1831. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  1832. goto reject_error;
  1833. }
  1834. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1835. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1836. }
  1837. xfrm_nr = ti;
  1838. if (npols > 1) {
  1839. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1840. tpp = stp;
  1841. }
  1842. /* For each tunnel xfrm, find the first matching tmpl.
  1843. * For each tmpl before that, find corresponding xfrm.
  1844. * Order is _important_. Later we will implement
  1845. * some barriers, but at the moment barriers
  1846. * are implied between each two transformations.
  1847. */
  1848. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1849. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1850. if (k < 0) {
  1851. if (k < -1)
  1852. /* "-2 - errored_index" returned */
  1853. xerr_idx = -(2+k);
  1854. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1855. goto reject;
  1856. }
  1857. }
  1858. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1859. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1860. goto reject;
  1861. }
  1862. xfrm_pols_put(pols, npols);
  1863. return 1;
  1864. }
  1865. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1866. reject:
  1867. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1868. reject_error:
  1869. xfrm_pols_put(pols, npols);
  1870. return 0;
  1871. }
  1872. EXPORT_SYMBOL(__xfrm_policy_check);
  1873. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1874. {
  1875. struct net *net = dev_net(skb->dev);
  1876. struct flowi fl;
  1877. struct dst_entry *dst;
  1878. int res = 1;
  1879. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1880. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  1881. return 0;
  1882. }
  1883. skb_dst_force(skb);
  1884. dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
  1885. if (IS_ERR(dst)) {
  1886. res = 0;
  1887. dst = NULL;
  1888. }
  1889. skb_dst_set(skb, dst);
  1890. return res;
  1891. }
  1892. EXPORT_SYMBOL(__xfrm_route_forward);
  1893. /* Optimize later using cookies and generation ids. */
  1894. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1895. {
  1896. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1897. * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
  1898. * get validated by dst_ops->check on every use. We do this
  1899. * because when a normal route referenced by an XFRM dst is
  1900. * obsoleted we do not go looking around for all parent
  1901. * referencing XFRM dsts so that we can invalidate them. It
  1902. * is just too much work. Instead we make the checks here on
  1903. * every use. For example:
  1904. *
  1905. * XFRM dst A --> IPv4 dst X
  1906. *
  1907. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1908. * in this example). If X is marked obsolete, "A" will not
  1909. * notice. That's what we are validating here via the
  1910. * stale_bundle() check.
  1911. *
  1912. * When a policy's bundle is pruned, we dst_free() the XFRM
  1913. * dst which causes it's ->obsolete field to be set to
  1914. * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
  1915. * this, we want to force a new route lookup.
  1916. */
  1917. if (dst->obsolete < 0 && !stale_bundle(dst))
  1918. return dst;
  1919. return NULL;
  1920. }
  1921. static int stale_bundle(struct dst_entry *dst)
  1922. {
  1923. return !xfrm_bundle_ok((struct xfrm_dst *)dst);
  1924. }
  1925. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1926. {
  1927. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1928. dst->dev = dev_net(dev)->loopback_dev;
  1929. dev_hold(dst->dev);
  1930. dev_put(dev);
  1931. }
  1932. }
  1933. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1934. static void xfrm_link_failure(struct sk_buff *skb)
  1935. {
  1936. /* Impossible. Such dst must be popped before reaches point of failure. */
  1937. }
  1938. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1939. {
  1940. if (dst) {
  1941. if (dst->obsolete) {
  1942. dst_release(dst);
  1943. dst = NULL;
  1944. }
  1945. }
  1946. return dst;
  1947. }
  1948. static void __xfrm_garbage_collect(struct net *net)
  1949. {
  1950. struct dst_entry *head, *next;
  1951. spin_lock_bh(&xfrm_policy_sk_bundle_lock);
  1952. head = xfrm_policy_sk_bundles;
  1953. xfrm_policy_sk_bundles = NULL;
  1954. spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
  1955. while (head) {
  1956. next = head->next;
  1957. dst_free(head);
  1958. head = next;
  1959. }
  1960. }
  1961. static void xfrm_garbage_collect(struct net *net)
  1962. {
  1963. flow_cache_flush();
  1964. __xfrm_garbage_collect(net);
  1965. }
  1966. static void xfrm_garbage_collect_deferred(struct net *net)
  1967. {
  1968. flow_cache_flush_deferred();
  1969. __xfrm_garbage_collect(net);
  1970. }
  1971. static void xfrm_init_pmtu(struct dst_entry *dst)
  1972. {
  1973. do {
  1974. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1975. u32 pmtu, route_mtu_cached;
  1976. pmtu = dst_mtu(dst->child);
  1977. xdst->child_mtu_cached = pmtu;
  1978. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1979. route_mtu_cached = dst_mtu(xdst->route);
  1980. xdst->route_mtu_cached = route_mtu_cached;
  1981. if (pmtu > route_mtu_cached)
  1982. pmtu = route_mtu_cached;
  1983. dst_metric_set(dst, RTAX_MTU, pmtu);
  1984. } while ((dst = dst->next));
  1985. }
  1986. /* Check that the bundle accepts the flow and its components are
  1987. * still valid.
  1988. */
  1989. static int xfrm_bundle_ok(struct xfrm_dst *first)
  1990. {
  1991. struct dst_entry *dst = &first->u.dst;
  1992. struct xfrm_dst *last;
  1993. u32 mtu;
  1994. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1995. (dst->dev && !netif_running(dst->dev)))
  1996. return 0;
  1997. last = NULL;
  1998. do {
  1999. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  2000. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  2001. return 0;
  2002. if (xdst->xfrm_genid != dst->xfrm->genid)
  2003. return 0;
  2004. if (xdst->num_pols > 0 &&
  2005. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  2006. return 0;
  2007. mtu = dst_mtu(dst->child);
  2008. if (xdst->child_mtu_cached != mtu) {
  2009. last = xdst;
  2010. xdst->child_mtu_cached = mtu;
  2011. }
  2012. if (!dst_check(xdst->route, xdst->route_cookie))
  2013. return 0;
  2014. mtu = dst_mtu(xdst->route);
  2015. if (xdst->route_mtu_cached != mtu) {
  2016. last = xdst;
  2017. xdst->route_mtu_cached = mtu;
  2018. }
  2019. dst = dst->child;
  2020. } while (dst->xfrm);
  2021. if (likely(!last))
  2022. return 1;
  2023. mtu = last->child_mtu_cached;
  2024. for (;;) {
  2025. dst = &last->u.dst;
  2026. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  2027. if (mtu > last->route_mtu_cached)
  2028. mtu = last->route_mtu_cached;
  2029. dst_metric_set(dst, RTAX_MTU, mtu);
  2030. if (last == first)
  2031. break;
  2032. last = (struct xfrm_dst *)last->u.dst.next;
  2033. last->child_mtu_cached = mtu;
  2034. }
  2035. return 1;
  2036. }
  2037. static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  2038. {
  2039. return dst_metric_advmss(dst->path);
  2040. }
  2041. static unsigned int xfrm_mtu(const struct dst_entry *dst)
  2042. {
  2043. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  2044. return mtu ? : dst_mtu(dst->path);
  2045. }
  2046. static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
  2047. struct sk_buff *skb,
  2048. const void *daddr)
  2049. {
  2050. return dst->path->ops->neigh_lookup(dst, skb, daddr);
  2051. }
  2052. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  2053. {
  2054. struct net *net;
  2055. int err = 0;
  2056. if (unlikely(afinfo == NULL))
  2057. return -EINVAL;
  2058. if (unlikely(afinfo->family >= NPROTO))
  2059. return -EAFNOSUPPORT;
  2060. spin_lock_bh(&xfrm_policy_afinfo_lock);
  2061. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  2062. err = -ENOBUFS;
  2063. else {
  2064. struct dst_ops *dst_ops = afinfo->dst_ops;
  2065. if (likely(dst_ops->kmem_cachep == NULL))
  2066. dst_ops->kmem_cachep = xfrm_dst_cache;
  2067. if (likely(dst_ops->check == NULL))
  2068. dst_ops->check = xfrm_dst_check;
  2069. if (likely(dst_ops->default_advmss == NULL))
  2070. dst_ops->default_advmss = xfrm_default_advmss;
  2071. if (likely(dst_ops->mtu == NULL))
  2072. dst_ops->mtu = xfrm_mtu;
  2073. if (likely(dst_ops->negative_advice == NULL))
  2074. dst_ops->negative_advice = xfrm_negative_advice;
  2075. if (likely(dst_ops->link_failure == NULL))
  2076. dst_ops->link_failure = xfrm_link_failure;
  2077. if (likely(dst_ops->neigh_lookup == NULL))
  2078. dst_ops->neigh_lookup = xfrm_neigh_lookup;
  2079. if (likely(afinfo->garbage_collect == NULL))
  2080. afinfo->garbage_collect = xfrm_garbage_collect_deferred;
  2081. rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
  2082. }
  2083. spin_unlock_bh(&xfrm_policy_afinfo_lock);
  2084. rtnl_lock();
  2085. for_each_net(net) {
  2086. struct dst_ops *xfrm_dst_ops;
  2087. switch (afinfo->family) {
  2088. case AF_INET:
  2089. xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
  2090. break;
  2091. #if IS_ENABLED(CONFIG_IPV6)
  2092. case AF_INET6:
  2093. xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
  2094. break;
  2095. #endif
  2096. default:
  2097. BUG();
  2098. }
  2099. *xfrm_dst_ops = *afinfo->dst_ops;
  2100. }
  2101. rtnl_unlock();
  2102. return err;
  2103. }
  2104. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  2105. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  2106. {
  2107. int err = 0;
  2108. if (unlikely(afinfo == NULL))
  2109. return -EINVAL;
  2110. if (unlikely(afinfo->family >= NPROTO))
  2111. return -EAFNOSUPPORT;
  2112. spin_lock_bh(&xfrm_policy_afinfo_lock);
  2113. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  2114. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  2115. err = -EINVAL;
  2116. else {
  2117. struct dst_ops *dst_ops = afinfo->dst_ops;
  2118. rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family],
  2119. NULL);
  2120. dst_ops->kmem_cachep = NULL;
  2121. dst_ops->check = NULL;
  2122. dst_ops->negative_advice = NULL;
  2123. dst_ops->link_failure = NULL;
  2124. afinfo->garbage_collect = NULL;
  2125. }
  2126. }
  2127. spin_unlock_bh(&xfrm_policy_afinfo_lock);
  2128. synchronize_rcu();
  2129. return err;
  2130. }
  2131. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2132. static void __net_init xfrm_dst_ops_init(struct net *net)
  2133. {
  2134. struct xfrm_policy_afinfo *afinfo;
  2135. rcu_read_lock_bh();
  2136. afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
  2137. if (afinfo)
  2138. net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
  2139. #if IS_ENABLED(CONFIG_IPV6)
  2140. afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
  2141. if (afinfo)
  2142. net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
  2143. #endif
  2144. rcu_read_unlock_bh();
  2145. }
  2146. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  2147. {
  2148. struct xfrm_policy_afinfo *afinfo;
  2149. if (unlikely(family >= NPROTO))
  2150. return NULL;
  2151. rcu_read_lock();
  2152. afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
  2153. if (unlikely(!afinfo))
  2154. rcu_read_unlock();
  2155. return afinfo;
  2156. }
  2157. static inline void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2158. {
  2159. rcu_read_unlock();
  2160. }
  2161. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2162. {
  2163. struct net_device *dev = ptr;
  2164. switch (event) {
  2165. case NETDEV_DOWN:
  2166. xfrm_garbage_collect(dev_net(dev));
  2167. }
  2168. return NOTIFY_DONE;
  2169. }
  2170. static struct notifier_block xfrm_dev_notifier = {
  2171. .notifier_call = xfrm_dev_event,
  2172. };
  2173. #ifdef CONFIG_XFRM_STATISTICS
  2174. static int __net_init xfrm_statistics_init(struct net *net)
  2175. {
  2176. int rv;
  2177. if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
  2178. sizeof(struct linux_xfrm_mib),
  2179. __alignof__(struct linux_xfrm_mib)) < 0)
  2180. return -ENOMEM;
  2181. rv = xfrm_proc_init(net);
  2182. if (rv < 0)
  2183. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2184. return rv;
  2185. }
  2186. static void xfrm_statistics_fini(struct net *net)
  2187. {
  2188. xfrm_proc_fini(net);
  2189. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2190. }
  2191. #else
  2192. static int __net_init xfrm_statistics_init(struct net *net)
  2193. {
  2194. return 0;
  2195. }
  2196. static void xfrm_statistics_fini(struct net *net)
  2197. {
  2198. }
  2199. #endif
  2200. static int __net_init xfrm_policy_init(struct net *net)
  2201. {
  2202. unsigned int hmask, sz;
  2203. int dir;
  2204. if (net_eq(net, &init_net))
  2205. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2206. sizeof(struct xfrm_dst),
  2207. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2208. NULL);
  2209. hmask = 8 - 1;
  2210. sz = (hmask+1) * sizeof(struct hlist_head);
  2211. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2212. if (!net->xfrm.policy_byidx)
  2213. goto out_byidx;
  2214. net->xfrm.policy_idx_hmask = hmask;
  2215. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2216. struct xfrm_policy_hash *htab;
  2217. net->xfrm.policy_count[dir] = 0;
  2218. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2219. htab = &net->xfrm.policy_bydst[dir];
  2220. htab->table = xfrm_hash_alloc(sz);
  2221. if (!htab->table)
  2222. goto out_bydst;
  2223. htab->hmask = hmask;
  2224. }
  2225. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2226. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2227. if (net_eq(net, &init_net))
  2228. register_netdevice_notifier(&xfrm_dev_notifier);
  2229. return 0;
  2230. out_bydst:
  2231. for (dir--; dir >= 0; dir--) {
  2232. struct xfrm_policy_hash *htab;
  2233. htab = &net->xfrm.policy_bydst[dir];
  2234. xfrm_hash_free(htab->table, sz);
  2235. }
  2236. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2237. out_byidx:
  2238. return -ENOMEM;
  2239. }
  2240. static void xfrm_policy_fini(struct net *net)
  2241. {
  2242. struct xfrm_audit audit_info;
  2243. unsigned int sz;
  2244. int dir;
  2245. flush_work(&net->xfrm.policy_hash_work);
  2246. #ifdef CONFIG_XFRM_SUB_POLICY
  2247. audit_info.loginuid = -1;
  2248. audit_info.sessionid = -1;
  2249. audit_info.secid = 0;
  2250. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
  2251. #endif
  2252. audit_info.loginuid = -1;
  2253. audit_info.sessionid = -1;
  2254. audit_info.secid = 0;
  2255. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
  2256. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2257. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2258. struct xfrm_policy_hash *htab;
  2259. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2260. htab = &net->xfrm.policy_bydst[dir];
  2261. sz = (htab->hmask + 1);
  2262. WARN_ON(!hlist_empty(htab->table));
  2263. xfrm_hash_free(htab->table, sz);
  2264. }
  2265. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2266. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2267. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2268. }
  2269. static int __net_init xfrm_net_init(struct net *net)
  2270. {
  2271. int rv;
  2272. rv = xfrm_statistics_init(net);
  2273. if (rv < 0)
  2274. goto out_statistics;
  2275. rv = xfrm_state_init(net);
  2276. if (rv < 0)
  2277. goto out_state;
  2278. rv = xfrm_policy_init(net);
  2279. if (rv < 0)
  2280. goto out_policy;
  2281. xfrm_dst_ops_init(net);
  2282. rv = xfrm_sysctl_init(net);
  2283. if (rv < 0)
  2284. goto out_sysctl;
  2285. return 0;
  2286. out_sysctl:
  2287. xfrm_policy_fini(net);
  2288. out_policy:
  2289. xfrm_state_fini(net);
  2290. out_state:
  2291. xfrm_statistics_fini(net);
  2292. out_statistics:
  2293. return rv;
  2294. }
  2295. static void __net_exit xfrm_net_exit(struct net *net)
  2296. {
  2297. xfrm_sysctl_fini(net);
  2298. xfrm_policy_fini(net);
  2299. xfrm_state_fini(net);
  2300. xfrm_statistics_fini(net);
  2301. }
  2302. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2303. .init = xfrm_net_init,
  2304. .exit = xfrm_net_exit,
  2305. };
  2306. void __init xfrm_init(void)
  2307. {
  2308. register_pernet_subsys(&xfrm_net_ops);
  2309. xfrm_input_init();
  2310. }
  2311. #ifdef CONFIG_AUDITSYSCALL
  2312. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2313. struct audit_buffer *audit_buf)
  2314. {
  2315. struct xfrm_sec_ctx *ctx = xp->security;
  2316. struct xfrm_selector *sel = &xp->selector;
  2317. if (ctx)
  2318. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2319. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2320. switch(sel->family) {
  2321. case AF_INET:
  2322. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2323. if (sel->prefixlen_s != 32)
  2324. audit_log_format(audit_buf, " src_prefixlen=%d",
  2325. sel->prefixlen_s);
  2326. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2327. if (sel->prefixlen_d != 32)
  2328. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2329. sel->prefixlen_d);
  2330. break;
  2331. case AF_INET6:
  2332. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2333. if (sel->prefixlen_s != 128)
  2334. audit_log_format(audit_buf, " src_prefixlen=%d",
  2335. sel->prefixlen_s);
  2336. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2337. if (sel->prefixlen_d != 128)
  2338. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2339. sel->prefixlen_d);
  2340. break;
  2341. }
  2342. }
  2343. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2344. uid_t auid, u32 sessionid, u32 secid)
  2345. {
  2346. struct audit_buffer *audit_buf;
  2347. audit_buf = xfrm_audit_start("SPD-add");
  2348. if (audit_buf == NULL)
  2349. return;
  2350. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2351. audit_log_format(audit_buf, " res=%u", result);
  2352. xfrm_audit_common_policyinfo(xp, audit_buf);
  2353. audit_log_end(audit_buf);
  2354. }
  2355. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2356. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2357. uid_t auid, u32 sessionid, u32 secid)
  2358. {
  2359. struct audit_buffer *audit_buf;
  2360. audit_buf = xfrm_audit_start("SPD-delete");
  2361. if (audit_buf == NULL)
  2362. return;
  2363. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2364. audit_log_format(audit_buf, " res=%u", result);
  2365. xfrm_audit_common_policyinfo(xp, audit_buf);
  2366. audit_log_end(audit_buf);
  2367. }
  2368. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2369. #endif
  2370. #ifdef CONFIG_XFRM_MIGRATE
  2371. static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
  2372. const struct xfrm_selector *sel_tgt)
  2373. {
  2374. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2375. if (sel_tgt->family == sel_cmp->family &&
  2376. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2377. sel_cmp->family) == 0 &&
  2378. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2379. sel_cmp->family) == 0 &&
  2380. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2381. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2382. return true;
  2383. }
  2384. } else {
  2385. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2386. return true;
  2387. }
  2388. }
  2389. return false;
  2390. }
  2391. static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
  2392. u8 dir, u8 type)
  2393. {
  2394. struct xfrm_policy *pol, *ret = NULL;
  2395. struct hlist_node *entry;
  2396. struct hlist_head *chain;
  2397. u32 priority = ~0U;
  2398. read_lock_bh(&xfrm_policy_lock);
  2399. chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
  2400. hlist_for_each_entry(pol, entry, chain, bydst) {
  2401. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2402. pol->type == type) {
  2403. ret = pol;
  2404. priority = ret->priority;
  2405. break;
  2406. }
  2407. }
  2408. chain = &init_net.xfrm.policy_inexact[dir];
  2409. hlist_for_each_entry(pol, entry, chain, bydst) {
  2410. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2411. pol->type == type &&
  2412. pol->priority < priority) {
  2413. ret = pol;
  2414. break;
  2415. }
  2416. }
  2417. if (ret)
  2418. xfrm_pol_hold(ret);
  2419. read_unlock_bh(&xfrm_policy_lock);
  2420. return ret;
  2421. }
  2422. static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
  2423. {
  2424. int match = 0;
  2425. if (t->mode == m->mode && t->id.proto == m->proto &&
  2426. (m->reqid == 0 || t->reqid == m->reqid)) {
  2427. switch (t->mode) {
  2428. case XFRM_MODE_TUNNEL:
  2429. case XFRM_MODE_BEET:
  2430. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2431. m->old_family) == 0 &&
  2432. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2433. m->old_family) == 0) {
  2434. match = 1;
  2435. }
  2436. break;
  2437. case XFRM_MODE_TRANSPORT:
  2438. /* in case of transport mode, template does not store
  2439. any IP addresses, hence we just compare mode and
  2440. protocol */
  2441. match = 1;
  2442. break;
  2443. default:
  2444. break;
  2445. }
  2446. }
  2447. return match;
  2448. }
  2449. /* update endpoint address(es) of template(s) */
  2450. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2451. struct xfrm_migrate *m, int num_migrate)
  2452. {
  2453. struct xfrm_migrate *mp;
  2454. int i, j, n = 0;
  2455. write_lock_bh(&pol->lock);
  2456. if (unlikely(pol->walk.dead)) {
  2457. /* target policy has been deleted */
  2458. write_unlock_bh(&pol->lock);
  2459. return -ENOENT;
  2460. }
  2461. for (i = 0; i < pol->xfrm_nr; i++) {
  2462. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2463. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2464. continue;
  2465. n++;
  2466. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2467. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2468. continue;
  2469. /* update endpoints */
  2470. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2471. sizeof(pol->xfrm_vec[i].id.daddr));
  2472. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2473. sizeof(pol->xfrm_vec[i].saddr));
  2474. pol->xfrm_vec[i].encap_family = mp->new_family;
  2475. /* flush bundles */
  2476. atomic_inc(&pol->genid);
  2477. }
  2478. }
  2479. write_unlock_bh(&pol->lock);
  2480. if (!n)
  2481. return -ENODATA;
  2482. return 0;
  2483. }
  2484. static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
  2485. {
  2486. int i, j;
  2487. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2488. return -EINVAL;
  2489. for (i = 0; i < num_migrate; i++) {
  2490. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2491. m[i].old_family) == 0) &&
  2492. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2493. m[i].old_family) == 0))
  2494. return -EINVAL;
  2495. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2496. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2497. return -EINVAL;
  2498. /* check if there is any duplicated entry */
  2499. for (j = i + 1; j < num_migrate; j++) {
  2500. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2501. sizeof(m[i].old_daddr)) &&
  2502. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2503. sizeof(m[i].old_saddr)) &&
  2504. m[i].proto == m[j].proto &&
  2505. m[i].mode == m[j].mode &&
  2506. m[i].reqid == m[j].reqid &&
  2507. m[i].old_family == m[j].old_family)
  2508. return -EINVAL;
  2509. }
  2510. }
  2511. return 0;
  2512. }
  2513. int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
  2514. struct xfrm_migrate *m, int num_migrate,
  2515. struct xfrm_kmaddress *k)
  2516. {
  2517. int i, err, nx_cur = 0, nx_new = 0;
  2518. struct xfrm_policy *pol = NULL;
  2519. struct xfrm_state *x, *xc;
  2520. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2521. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2522. struct xfrm_migrate *mp;
  2523. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2524. goto out;
  2525. /* Stage 1 - find policy */
  2526. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2527. err = -ENOENT;
  2528. goto out;
  2529. }
  2530. /* Stage 2 - find and update state(s) */
  2531. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2532. if ((x = xfrm_migrate_state_find(mp))) {
  2533. x_cur[nx_cur] = x;
  2534. nx_cur++;
  2535. if ((xc = xfrm_state_migrate(x, mp))) {
  2536. x_new[nx_new] = xc;
  2537. nx_new++;
  2538. } else {
  2539. err = -ENODATA;
  2540. goto restore_state;
  2541. }
  2542. }
  2543. }
  2544. /* Stage 3 - update policy */
  2545. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2546. goto restore_state;
  2547. /* Stage 4 - delete old state(s) */
  2548. if (nx_cur) {
  2549. xfrm_states_put(x_cur, nx_cur);
  2550. xfrm_states_delete(x_cur, nx_cur);
  2551. }
  2552. /* Stage 5 - announce */
  2553. km_migrate(sel, dir, type, m, num_migrate, k);
  2554. xfrm_pol_put(pol);
  2555. return 0;
  2556. out:
  2557. return err;
  2558. restore_state:
  2559. if (pol)
  2560. xfrm_pol_put(pol);
  2561. if (nx_cur)
  2562. xfrm_states_put(x_cur, nx_cur);
  2563. if (nx_new)
  2564. xfrm_states_delete(x_new, nx_new);
  2565. return err;
  2566. }
  2567. EXPORT_SYMBOL(xfrm_migrate);
  2568. #endif