xfrm_user.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463
  1. /* xfrm_user.c: User interface to configure xfrm engine.
  2. *
  3. * Copyright (C) 2002 David S. Miller (davem@redhat.com)
  4. *
  5. * Changes:
  6. * Mitsuru KANDA @USAGI
  7. * Kazunori MIYAZAWA @USAGI
  8. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  9. * IPv6 support
  10. *
  11. */
  12. #include <linux/crypto.h>
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/socket.h>
  18. #include <linux/string.h>
  19. #include <linux/net.h>
  20. #include <linux/skbuff.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/pfkeyv2.h>
  23. #include <linux/ipsec.h>
  24. #include <linux/init.h>
  25. #include <linux/security.h>
  26. #include <net/sock.h>
  27. #include <net/xfrm.h>
  28. #include <net/netlink.h>
  29. #include <asm/uaccess.h>
  30. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  31. #include <linux/in6.h>
  32. #endif
  33. #include <linux/audit.h>
  34. static inline int alg_len(struct xfrm_algo *alg)
  35. {
  36. return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
  37. }
  38. static int verify_one_alg(struct rtattr **attrs, enum xfrm_attr_type_t type)
  39. {
  40. struct rtattr *rt = attrs[type];
  41. struct xfrm_algo *algp;
  42. if (!rt)
  43. return 0;
  44. algp = RTA_DATA(rt);
  45. if (RTA_PAYLOAD(rt) < alg_len(algp))
  46. return -EINVAL;
  47. switch (type) {
  48. case XFRMA_ALG_AUTH:
  49. if (!algp->alg_key_len &&
  50. strcmp(algp->alg_name, "digest_null") != 0)
  51. return -EINVAL;
  52. break;
  53. case XFRMA_ALG_CRYPT:
  54. if (!algp->alg_key_len &&
  55. strcmp(algp->alg_name, "cipher_null") != 0)
  56. return -EINVAL;
  57. break;
  58. case XFRMA_ALG_COMP:
  59. /* Zero length keys are legal. */
  60. break;
  61. default:
  62. return -EINVAL;
  63. }
  64. algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
  65. return 0;
  66. }
  67. static void verify_one_addr(struct rtattr **attrs, enum xfrm_attr_type_t type,
  68. xfrm_address_t **addrp)
  69. {
  70. struct rtattr *rt = attrs[type];
  71. if (rt && addrp)
  72. *addrp = RTA_DATA(rt);
  73. }
  74. static inline int verify_sec_ctx_len(struct rtattr **attrs)
  75. {
  76. struct rtattr *rt = attrs[XFRMA_SEC_CTX];
  77. struct xfrm_user_sec_ctx *uctx;
  78. if (!rt)
  79. return 0;
  80. uctx = RTA_DATA(rt);
  81. if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
  82. return -EINVAL;
  83. return 0;
  84. }
  85. static int verify_newsa_info(struct xfrm_usersa_info *p,
  86. struct rtattr **attrs)
  87. {
  88. int err;
  89. err = -EINVAL;
  90. switch (p->family) {
  91. case AF_INET:
  92. break;
  93. case AF_INET6:
  94. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  95. break;
  96. #else
  97. err = -EAFNOSUPPORT;
  98. goto out;
  99. #endif
  100. default:
  101. goto out;
  102. }
  103. err = -EINVAL;
  104. switch (p->id.proto) {
  105. case IPPROTO_AH:
  106. if (!attrs[XFRMA_ALG_AUTH] ||
  107. attrs[XFRMA_ALG_CRYPT] ||
  108. attrs[XFRMA_ALG_COMP])
  109. goto out;
  110. break;
  111. case IPPROTO_ESP:
  112. if ((!attrs[XFRMA_ALG_AUTH] &&
  113. !attrs[XFRMA_ALG_CRYPT]) ||
  114. attrs[XFRMA_ALG_COMP])
  115. goto out;
  116. break;
  117. case IPPROTO_COMP:
  118. if (!attrs[XFRMA_ALG_COMP] ||
  119. attrs[XFRMA_ALG_AUTH] ||
  120. attrs[XFRMA_ALG_CRYPT])
  121. goto out;
  122. break;
  123. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  124. case IPPROTO_DSTOPTS:
  125. case IPPROTO_ROUTING:
  126. if (attrs[XFRMA_ALG_COMP] ||
  127. attrs[XFRMA_ALG_AUTH] ||
  128. attrs[XFRMA_ALG_CRYPT] ||
  129. attrs[XFRMA_ENCAP] ||
  130. attrs[XFRMA_SEC_CTX] ||
  131. !attrs[XFRMA_COADDR])
  132. goto out;
  133. break;
  134. #endif
  135. default:
  136. goto out;
  137. }
  138. if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
  139. goto out;
  140. if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
  141. goto out;
  142. if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
  143. goto out;
  144. if ((err = verify_sec_ctx_len(attrs)))
  145. goto out;
  146. err = -EINVAL;
  147. switch (p->mode) {
  148. case XFRM_MODE_TRANSPORT:
  149. case XFRM_MODE_TUNNEL:
  150. case XFRM_MODE_ROUTEOPTIMIZATION:
  151. case XFRM_MODE_BEET:
  152. break;
  153. default:
  154. goto out;
  155. }
  156. err = 0;
  157. out:
  158. return err;
  159. }
  160. static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
  161. struct xfrm_algo_desc *(*get_byname)(char *, int),
  162. struct rtattr *u_arg)
  163. {
  164. struct rtattr *rta = u_arg;
  165. struct xfrm_algo *p, *ualg;
  166. struct xfrm_algo_desc *algo;
  167. if (!rta)
  168. return 0;
  169. ualg = RTA_DATA(rta);
  170. algo = get_byname(ualg->alg_name, 1);
  171. if (!algo)
  172. return -ENOSYS;
  173. *props = algo->desc.sadb_alg_id;
  174. p = kmemdup(ualg, alg_len(ualg), GFP_KERNEL);
  175. if (!p)
  176. return -ENOMEM;
  177. strcpy(p->alg_name, algo->name);
  178. *algpp = p;
  179. return 0;
  180. }
  181. static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
  182. {
  183. struct rtattr *rta = u_arg;
  184. struct xfrm_encap_tmpl *p, *uencap;
  185. if (!rta)
  186. return 0;
  187. uencap = RTA_DATA(rta);
  188. p = kmemdup(uencap, sizeof(*p), GFP_KERNEL);
  189. if (!p)
  190. return -ENOMEM;
  191. *encapp = p;
  192. return 0;
  193. }
  194. static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
  195. {
  196. int len = 0;
  197. if (xfrm_ctx) {
  198. len += sizeof(struct xfrm_user_sec_ctx);
  199. len += xfrm_ctx->ctx_len;
  200. }
  201. return len;
  202. }
  203. static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg)
  204. {
  205. struct xfrm_user_sec_ctx *uctx;
  206. if (!u_arg)
  207. return 0;
  208. uctx = RTA_DATA(u_arg);
  209. return security_xfrm_state_alloc(x, uctx);
  210. }
  211. static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg)
  212. {
  213. struct rtattr *rta = u_arg;
  214. xfrm_address_t *p, *uaddrp;
  215. if (!rta)
  216. return 0;
  217. uaddrp = RTA_DATA(rta);
  218. p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL);
  219. if (!p)
  220. return -ENOMEM;
  221. *addrpp = p;
  222. return 0;
  223. }
  224. static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
  225. {
  226. memcpy(&x->id, &p->id, sizeof(x->id));
  227. memcpy(&x->sel, &p->sel, sizeof(x->sel));
  228. memcpy(&x->lft, &p->lft, sizeof(x->lft));
  229. x->props.mode = p->mode;
  230. x->props.replay_window = p->replay_window;
  231. x->props.reqid = p->reqid;
  232. x->props.family = p->family;
  233. memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
  234. x->props.flags = p->flags;
  235. /*
  236. * Set inner address family if the KM left it as zero.
  237. * See comment in validate_tmpl.
  238. */
  239. if (!x->sel.family)
  240. x->sel.family = p->family;
  241. }
  242. /*
  243. * someday when pfkey also has support, we could have the code
  244. * somehow made shareable and move it to xfrm_state.c - JHS
  245. *
  246. */
  247. static void xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **attrs)
  248. {
  249. struct rtattr *rp = attrs[XFRMA_REPLAY_VAL];
  250. struct rtattr *lt = attrs[XFRMA_LTIME_VAL];
  251. struct rtattr *et = attrs[XFRMA_ETIMER_THRESH];
  252. struct rtattr *rt = attrs[XFRMA_REPLAY_THRESH];
  253. if (rp) {
  254. struct xfrm_replay_state *replay;
  255. replay = RTA_DATA(rp);
  256. memcpy(&x->replay, replay, sizeof(*replay));
  257. memcpy(&x->preplay, replay, sizeof(*replay));
  258. }
  259. if (lt) {
  260. struct xfrm_lifetime_cur *ltime;
  261. ltime = RTA_DATA(lt);
  262. x->curlft.bytes = ltime->bytes;
  263. x->curlft.packets = ltime->packets;
  264. x->curlft.add_time = ltime->add_time;
  265. x->curlft.use_time = ltime->use_time;
  266. }
  267. if (et)
  268. x->replay_maxage = *(u32*)RTA_DATA(et);
  269. if (rt)
  270. x->replay_maxdiff = *(u32*)RTA_DATA(rt);
  271. }
  272. static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
  273. struct rtattr **attrs,
  274. int *errp)
  275. {
  276. struct xfrm_state *x = xfrm_state_alloc();
  277. int err = -ENOMEM;
  278. if (!x)
  279. goto error_no_put;
  280. copy_from_user_state(x, p);
  281. if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
  282. xfrm_aalg_get_byname,
  283. attrs[XFRMA_ALG_AUTH])))
  284. goto error;
  285. if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
  286. xfrm_ealg_get_byname,
  287. attrs[XFRMA_ALG_CRYPT])))
  288. goto error;
  289. if ((err = attach_one_algo(&x->calg, &x->props.calgo,
  290. xfrm_calg_get_byname,
  291. attrs[XFRMA_ALG_COMP])))
  292. goto error;
  293. if ((err = attach_encap_tmpl(&x->encap, attrs[XFRMA_ENCAP])))
  294. goto error;
  295. if ((err = attach_one_addr(&x->coaddr, attrs[XFRMA_COADDR])))
  296. goto error;
  297. err = xfrm_init_state(x);
  298. if (err)
  299. goto error;
  300. if ((err = attach_sec_ctx(x, attrs[XFRMA_SEC_CTX])))
  301. goto error;
  302. x->km.seq = p->seq;
  303. x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
  304. /* sysctl_xfrm_aevent_etime is in 100ms units */
  305. x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
  306. x->preplay.bitmap = 0;
  307. x->preplay.seq = x->replay.seq+x->replay_maxdiff;
  308. x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
  309. /* override default values from above */
  310. xfrm_update_ae_params(x, (struct rtattr **)attrs);
  311. return x;
  312. error:
  313. x->km.state = XFRM_STATE_DEAD;
  314. xfrm_state_put(x);
  315. error_no_put:
  316. *errp = err;
  317. return NULL;
  318. }
  319. static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
  320. struct rtattr **attrs)
  321. {
  322. struct xfrm_usersa_info *p = nlmsg_data(nlh);
  323. struct xfrm_state *x;
  324. int err;
  325. struct km_event c;
  326. err = verify_newsa_info(p, attrs);
  327. if (err)
  328. return err;
  329. x = xfrm_state_construct(p, attrs, &err);
  330. if (!x)
  331. return err;
  332. xfrm_state_hold(x);
  333. if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
  334. err = xfrm_state_add(x);
  335. else
  336. err = xfrm_state_update(x);
  337. xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
  338. AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
  339. if (err < 0) {
  340. x->km.state = XFRM_STATE_DEAD;
  341. __xfrm_state_put(x);
  342. goto out;
  343. }
  344. c.seq = nlh->nlmsg_seq;
  345. c.pid = nlh->nlmsg_pid;
  346. c.event = nlh->nlmsg_type;
  347. km_state_notify(x, &c);
  348. out:
  349. xfrm_state_put(x);
  350. return err;
  351. }
  352. static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
  353. struct rtattr **attrs,
  354. int *errp)
  355. {
  356. struct xfrm_state *x = NULL;
  357. int err;
  358. if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
  359. err = -ESRCH;
  360. x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
  361. } else {
  362. xfrm_address_t *saddr = NULL;
  363. verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
  364. if (!saddr) {
  365. err = -EINVAL;
  366. goto out;
  367. }
  368. err = -ESRCH;
  369. x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
  370. p->family);
  371. }
  372. out:
  373. if (!x && errp)
  374. *errp = err;
  375. return x;
  376. }
  377. static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
  378. struct rtattr **attrs)
  379. {
  380. struct xfrm_state *x;
  381. int err = -ESRCH;
  382. struct km_event c;
  383. struct xfrm_usersa_id *p = nlmsg_data(nlh);
  384. x = xfrm_user_state_lookup(p, attrs, &err);
  385. if (x == NULL)
  386. return err;
  387. if ((err = security_xfrm_state_delete(x)) != 0)
  388. goto out;
  389. if (xfrm_state_kern(x)) {
  390. err = -EPERM;
  391. goto out;
  392. }
  393. err = xfrm_state_delete(x);
  394. if (err < 0)
  395. goto out;
  396. c.seq = nlh->nlmsg_seq;
  397. c.pid = nlh->nlmsg_pid;
  398. c.event = nlh->nlmsg_type;
  399. km_state_notify(x, &c);
  400. out:
  401. xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
  402. AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
  403. xfrm_state_put(x);
  404. return err;
  405. }
  406. static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
  407. {
  408. memcpy(&p->id, &x->id, sizeof(p->id));
  409. memcpy(&p->sel, &x->sel, sizeof(p->sel));
  410. memcpy(&p->lft, &x->lft, sizeof(p->lft));
  411. memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
  412. memcpy(&p->stats, &x->stats, sizeof(p->stats));
  413. memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
  414. p->mode = x->props.mode;
  415. p->replay_window = x->props.replay_window;
  416. p->reqid = x->props.reqid;
  417. p->family = x->props.family;
  418. p->flags = x->props.flags;
  419. p->seq = x->km.seq;
  420. }
  421. struct xfrm_dump_info {
  422. struct sk_buff *in_skb;
  423. struct sk_buff *out_skb;
  424. u32 nlmsg_seq;
  425. u16 nlmsg_flags;
  426. int start_idx;
  427. int this_idx;
  428. };
  429. static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
  430. {
  431. int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len;
  432. struct xfrm_user_sec_ctx *uctx;
  433. struct nlattr *attr;
  434. attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
  435. if (attr == NULL)
  436. return -EMSGSIZE;
  437. uctx = nla_data(attr);
  438. uctx->exttype = XFRMA_SEC_CTX;
  439. uctx->len = ctx_size;
  440. uctx->ctx_doi = s->ctx_doi;
  441. uctx->ctx_alg = s->ctx_alg;
  442. uctx->ctx_len = s->ctx_len;
  443. memcpy(uctx + 1, s->ctx_str, s->ctx_len);
  444. return 0;
  445. }
  446. static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
  447. {
  448. struct xfrm_dump_info *sp = ptr;
  449. struct sk_buff *in_skb = sp->in_skb;
  450. struct sk_buff *skb = sp->out_skb;
  451. struct xfrm_usersa_info *p;
  452. struct nlmsghdr *nlh;
  453. if (sp->this_idx < sp->start_idx)
  454. goto out;
  455. nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
  456. XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
  457. if (nlh == NULL)
  458. return -EMSGSIZE;
  459. p = nlmsg_data(nlh);
  460. copy_to_user_state(x, p);
  461. if (x->aalg)
  462. NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);
  463. if (x->ealg)
  464. NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg);
  465. if (x->calg)
  466. NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
  467. if (x->encap)
  468. NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
  469. if (x->security && copy_sec_ctx(x->security, skb) < 0)
  470. goto nla_put_failure;
  471. if (x->coaddr)
  472. NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
  473. if (x->lastused)
  474. NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
  475. nlmsg_end(skb, nlh);
  476. out:
  477. sp->this_idx++;
  478. return 0;
  479. nla_put_failure:
  480. nlmsg_cancel(skb, nlh);
  481. return -EMSGSIZE;
  482. }
  483. static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
  484. {
  485. struct xfrm_dump_info info;
  486. info.in_skb = cb->skb;
  487. info.out_skb = skb;
  488. info.nlmsg_seq = cb->nlh->nlmsg_seq;
  489. info.nlmsg_flags = NLM_F_MULTI;
  490. info.this_idx = 0;
  491. info.start_idx = cb->args[0];
  492. (void) xfrm_state_walk(0, dump_one_state, &info);
  493. cb->args[0] = info.this_idx;
  494. return skb->len;
  495. }
  496. static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
  497. struct xfrm_state *x, u32 seq)
  498. {
  499. struct xfrm_dump_info info;
  500. struct sk_buff *skb;
  501. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  502. if (!skb)
  503. return ERR_PTR(-ENOMEM);
  504. info.in_skb = in_skb;
  505. info.out_skb = skb;
  506. info.nlmsg_seq = seq;
  507. info.nlmsg_flags = 0;
  508. info.this_idx = info.start_idx = 0;
  509. if (dump_one_state(x, 0, &info)) {
  510. kfree_skb(skb);
  511. return NULL;
  512. }
  513. return skb;
  514. }
  515. static inline size_t xfrm_spdinfo_msgsize(void)
  516. {
  517. return NLMSG_ALIGN(4)
  518. + nla_total_size(sizeof(struct xfrmu_spdinfo))
  519. + nla_total_size(sizeof(struct xfrmu_spdhinfo));
  520. }
  521. static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
  522. {
  523. struct xfrmk_spdinfo si;
  524. struct xfrmu_spdinfo spc;
  525. struct xfrmu_spdhinfo sph;
  526. struct nlmsghdr *nlh;
  527. u32 *f;
  528. nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
  529. if (nlh == NULL) /* shouldnt really happen ... */
  530. return -EMSGSIZE;
  531. f = nlmsg_data(nlh);
  532. *f = flags;
  533. xfrm_spd_getinfo(&si);
  534. spc.incnt = si.incnt;
  535. spc.outcnt = si.outcnt;
  536. spc.fwdcnt = si.fwdcnt;
  537. spc.inscnt = si.inscnt;
  538. spc.outscnt = si.outscnt;
  539. spc.fwdscnt = si.fwdscnt;
  540. sph.spdhcnt = si.spdhcnt;
  541. sph.spdhmcnt = si.spdhmcnt;
  542. NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
  543. NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
  544. return nlmsg_end(skb, nlh);
  545. nla_put_failure:
  546. nlmsg_cancel(skb, nlh);
  547. return -EMSGSIZE;
  548. }
  549. static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
  550. struct rtattr **attrs)
  551. {
  552. struct sk_buff *r_skb;
  553. u32 *flags = nlmsg_data(nlh);
  554. u32 spid = NETLINK_CB(skb).pid;
  555. u32 seq = nlh->nlmsg_seq;
  556. r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
  557. if (r_skb == NULL)
  558. return -ENOMEM;
  559. if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
  560. BUG();
  561. return nlmsg_unicast(xfrm_nl, r_skb, spid);
  562. }
  563. static inline size_t xfrm_sadinfo_msgsize(void)
  564. {
  565. return NLMSG_ALIGN(4)
  566. + nla_total_size(sizeof(struct xfrmu_sadhinfo))
  567. + nla_total_size(4); /* XFRMA_SAD_CNT */
  568. }
  569. static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
  570. {
  571. struct xfrmk_sadinfo si;
  572. struct xfrmu_sadhinfo sh;
  573. struct nlmsghdr *nlh;
  574. u32 *f;
  575. nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
  576. if (nlh == NULL) /* shouldnt really happen ... */
  577. return -EMSGSIZE;
  578. f = nlmsg_data(nlh);
  579. *f = flags;
  580. xfrm_sad_getinfo(&si);
  581. sh.sadhmcnt = si.sadhmcnt;
  582. sh.sadhcnt = si.sadhcnt;
  583. NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
  584. NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
  585. return nlmsg_end(skb, nlh);
  586. nla_put_failure:
  587. nlmsg_cancel(skb, nlh);
  588. return -EMSGSIZE;
  589. }
  590. static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
  591. struct rtattr **attrs)
  592. {
  593. struct sk_buff *r_skb;
  594. u32 *flags = nlmsg_data(nlh);
  595. u32 spid = NETLINK_CB(skb).pid;
  596. u32 seq = nlh->nlmsg_seq;
  597. r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
  598. if (r_skb == NULL)
  599. return -ENOMEM;
  600. if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
  601. BUG();
  602. return nlmsg_unicast(xfrm_nl, r_skb, spid);
  603. }
  604. static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
  605. struct rtattr **attrs)
  606. {
  607. struct xfrm_usersa_id *p = nlmsg_data(nlh);
  608. struct xfrm_state *x;
  609. struct sk_buff *resp_skb;
  610. int err = -ESRCH;
  611. x = xfrm_user_state_lookup(p, attrs, &err);
  612. if (x == NULL)
  613. goto out_noput;
  614. resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
  615. if (IS_ERR(resp_skb)) {
  616. err = PTR_ERR(resp_skb);
  617. } else {
  618. err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
  619. }
  620. xfrm_state_put(x);
  621. out_noput:
  622. return err;
  623. }
  624. static int verify_userspi_info(struct xfrm_userspi_info *p)
  625. {
  626. switch (p->info.id.proto) {
  627. case IPPROTO_AH:
  628. case IPPROTO_ESP:
  629. break;
  630. case IPPROTO_COMP:
  631. /* IPCOMP spi is 16-bits. */
  632. if (p->max >= 0x10000)
  633. return -EINVAL;
  634. break;
  635. default:
  636. return -EINVAL;
  637. }
  638. if (p->min > p->max)
  639. return -EINVAL;
  640. return 0;
  641. }
  642. static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
  643. struct rtattr **attrs)
  644. {
  645. struct xfrm_state *x;
  646. struct xfrm_userspi_info *p;
  647. struct sk_buff *resp_skb;
  648. xfrm_address_t *daddr;
  649. int family;
  650. int err;
  651. p = nlmsg_data(nlh);
  652. err = verify_userspi_info(p);
  653. if (err)
  654. goto out_noput;
  655. family = p->info.family;
  656. daddr = &p->info.id.daddr;
  657. x = NULL;
  658. if (p->info.seq) {
  659. x = xfrm_find_acq_byseq(p->info.seq);
  660. if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
  661. xfrm_state_put(x);
  662. x = NULL;
  663. }
  664. }
  665. if (!x)
  666. x = xfrm_find_acq(p->info.mode, p->info.reqid,
  667. p->info.id.proto, daddr,
  668. &p->info.saddr, 1,
  669. family);
  670. err = -ENOENT;
  671. if (x == NULL)
  672. goto out_noput;
  673. resp_skb = ERR_PTR(-ENOENT);
  674. spin_lock_bh(&x->lock);
  675. if (x->km.state != XFRM_STATE_DEAD) {
  676. xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
  677. if (x->id.spi)
  678. resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
  679. }
  680. spin_unlock_bh(&x->lock);
  681. if (IS_ERR(resp_skb)) {
  682. err = PTR_ERR(resp_skb);
  683. goto out;
  684. }
  685. err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
  686. out:
  687. xfrm_state_put(x);
  688. out_noput:
  689. return err;
  690. }
  691. static int verify_policy_dir(u8 dir)
  692. {
  693. switch (dir) {
  694. case XFRM_POLICY_IN:
  695. case XFRM_POLICY_OUT:
  696. case XFRM_POLICY_FWD:
  697. break;
  698. default:
  699. return -EINVAL;
  700. }
  701. return 0;
  702. }
  703. static int verify_policy_type(u8 type)
  704. {
  705. switch (type) {
  706. case XFRM_POLICY_TYPE_MAIN:
  707. #ifdef CONFIG_XFRM_SUB_POLICY
  708. case XFRM_POLICY_TYPE_SUB:
  709. #endif
  710. break;
  711. default:
  712. return -EINVAL;
  713. }
  714. return 0;
  715. }
  716. static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
  717. {
  718. switch (p->share) {
  719. case XFRM_SHARE_ANY:
  720. case XFRM_SHARE_SESSION:
  721. case XFRM_SHARE_USER:
  722. case XFRM_SHARE_UNIQUE:
  723. break;
  724. default:
  725. return -EINVAL;
  726. }
  727. switch (p->action) {
  728. case XFRM_POLICY_ALLOW:
  729. case XFRM_POLICY_BLOCK:
  730. break;
  731. default:
  732. return -EINVAL;
  733. }
  734. switch (p->sel.family) {
  735. case AF_INET:
  736. break;
  737. case AF_INET6:
  738. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  739. break;
  740. #else
  741. return -EAFNOSUPPORT;
  742. #endif
  743. default:
  744. return -EINVAL;
  745. }
  746. return verify_policy_dir(p->dir);
  747. }
  748. static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **attrs)
  749. {
  750. struct rtattr *rt = attrs[XFRMA_SEC_CTX];
  751. struct xfrm_user_sec_ctx *uctx;
  752. if (!rt)
  753. return 0;
  754. uctx = RTA_DATA(rt);
  755. return security_xfrm_policy_alloc(pol, uctx);
  756. }
  757. static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
  758. int nr)
  759. {
  760. int i;
  761. xp->xfrm_nr = nr;
  762. for (i = 0; i < nr; i++, ut++) {
  763. struct xfrm_tmpl *t = &xp->xfrm_vec[i];
  764. memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
  765. memcpy(&t->saddr, &ut->saddr,
  766. sizeof(xfrm_address_t));
  767. t->reqid = ut->reqid;
  768. t->mode = ut->mode;
  769. t->share = ut->share;
  770. t->optional = ut->optional;
  771. t->aalgos = ut->aalgos;
  772. t->ealgos = ut->ealgos;
  773. t->calgos = ut->calgos;
  774. t->encap_family = ut->family;
  775. }
  776. }
  777. static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
  778. {
  779. int i;
  780. if (nr > XFRM_MAX_DEPTH)
  781. return -EINVAL;
  782. for (i = 0; i < nr; i++) {
  783. /* We never validated the ut->family value, so many
  784. * applications simply leave it at zero. The check was
  785. * never made and ut->family was ignored because all
  786. * templates could be assumed to have the same family as
  787. * the policy itself. Now that we will have ipv4-in-ipv6
  788. * and ipv6-in-ipv4 tunnels, this is no longer true.
  789. */
  790. if (!ut[i].family)
  791. ut[i].family = family;
  792. switch (ut[i].family) {
  793. case AF_INET:
  794. break;
  795. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  796. case AF_INET6:
  797. break;
  798. #endif
  799. default:
  800. return -EINVAL;
  801. }
  802. }
  803. return 0;
  804. }
  805. static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **attrs)
  806. {
  807. struct rtattr *rt = attrs[XFRMA_TMPL];
  808. if (!rt) {
  809. pol->xfrm_nr = 0;
  810. } else {
  811. struct xfrm_user_tmpl *utmpl = RTA_DATA(rt);
  812. int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
  813. int err;
  814. err = validate_tmpl(nr, utmpl, pol->family);
  815. if (err)
  816. return err;
  817. copy_templates(pol, RTA_DATA(rt), nr);
  818. }
  819. return 0;
  820. }
  821. static int copy_from_user_policy_type(u8 *tp, struct rtattr **attrs)
  822. {
  823. struct rtattr *rt = attrs[XFRMA_POLICY_TYPE];
  824. struct xfrm_userpolicy_type *upt;
  825. u8 type = XFRM_POLICY_TYPE_MAIN;
  826. int err;
  827. if (rt) {
  828. upt = RTA_DATA(rt);
  829. type = upt->type;
  830. }
  831. err = verify_policy_type(type);
  832. if (err)
  833. return err;
  834. *tp = type;
  835. return 0;
  836. }
  837. static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
  838. {
  839. xp->priority = p->priority;
  840. xp->index = p->index;
  841. memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
  842. memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
  843. xp->action = p->action;
  844. xp->flags = p->flags;
  845. xp->family = p->sel.family;
  846. /* XXX xp->share = p->share; */
  847. }
  848. static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
  849. {
  850. memcpy(&p->sel, &xp->selector, sizeof(p->sel));
  851. memcpy(&p->lft, &xp->lft, sizeof(p->lft));
  852. memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
  853. p->priority = xp->priority;
  854. p->index = xp->index;
  855. p->sel.family = xp->family;
  856. p->dir = dir;
  857. p->action = xp->action;
  858. p->flags = xp->flags;
  859. p->share = XFRM_SHARE_ANY; /* XXX xp->share */
  860. }
  861. static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **attrs, int *errp)
  862. {
  863. struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
  864. int err;
  865. if (!xp) {
  866. *errp = -ENOMEM;
  867. return NULL;
  868. }
  869. copy_from_user_policy(xp, p);
  870. err = copy_from_user_policy_type(&xp->type, attrs);
  871. if (err)
  872. goto error;
  873. if (!(err = copy_from_user_tmpl(xp, attrs)))
  874. err = copy_from_user_sec_ctx(xp, attrs);
  875. if (err)
  876. goto error;
  877. return xp;
  878. error:
  879. *errp = err;
  880. kfree(xp);
  881. return NULL;
  882. }
  883. static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
  884. struct rtattr **attrs)
  885. {
  886. struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
  887. struct xfrm_policy *xp;
  888. struct km_event c;
  889. int err;
  890. int excl;
  891. err = verify_newpolicy_info(p);
  892. if (err)
  893. return err;
  894. err = verify_sec_ctx_len(attrs);
  895. if (err)
  896. return err;
  897. xp = xfrm_policy_construct(p, attrs, &err);
  898. if (!xp)
  899. return err;
  900. /* shouldnt excl be based on nlh flags??
  901. * Aha! this is anti-netlink really i.e more pfkey derived
  902. * in netlink excl is a flag and you wouldnt need
  903. * a type XFRM_MSG_UPDPOLICY - JHS */
  904. excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
  905. err = xfrm_policy_insert(p->dir, xp, excl);
  906. xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
  907. AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
  908. if (err) {
  909. security_xfrm_policy_free(xp);
  910. kfree(xp);
  911. return err;
  912. }
  913. c.event = nlh->nlmsg_type;
  914. c.seq = nlh->nlmsg_seq;
  915. c.pid = nlh->nlmsg_pid;
  916. km_policy_notify(xp, p->dir, &c);
  917. xfrm_pol_put(xp);
  918. return 0;
  919. }
  920. static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
  921. {
  922. struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
  923. int i;
  924. if (xp->xfrm_nr == 0)
  925. return 0;
  926. for (i = 0; i < xp->xfrm_nr; i++) {
  927. struct xfrm_user_tmpl *up = &vec[i];
  928. struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
  929. memcpy(&up->id, &kp->id, sizeof(up->id));
  930. up->family = kp->encap_family;
  931. memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
  932. up->reqid = kp->reqid;
  933. up->mode = kp->mode;
  934. up->share = kp->share;
  935. up->optional = kp->optional;
  936. up->aalgos = kp->aalgos;
  937. up->ealgos = kp->ealgos;
  938. up->calgos = kp->calgos;
  939. }
  940. return nla_put(skb, XFRMA_TMPL,
  941. sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
  942. }
  943. static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
  944. {
  945. if (x->security) {
  946. return copy_sec_ctx(x->security, skb);
  947. }
  948. return 0;
  949. }
  950. static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
  951. {
  952. if (xp->security) {
  953. return copy_sec_ctx(xp->security, skb);
  954. }
  955. return 0;
  956. }
  957. static inline size_t userpolicy_type_attrsize(void)
  958. {
  959. #ifdef CONFIG_XFRM_SUB_POLICY
  960. return nla_total_size(sizeof(struct xfrm_userpolicy_type));
  961. #else
  962. return 0;
  963. #endif
  964. }
  965. #ifdef CONFIG_XFRM_SUB_POLICY
  966. static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
  967. {
  968. struct xfrm_userpolicy_type upt = {
  969. .type = type,
  970. };
  971. return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
  972. }
  973. #else
  974. static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
  975. {
  976. return 0;
  977. }
  978. #endif
  979. static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
  980. {
  981. struct xfrm_dump_info *sp = ptr;
  982. struct xfrm_userpolicy_info *p;
  983. struct sk_buff *in_skb = sp->in_skb;
  984. struct sk_buff *skb = sp->out_skb;
  985. struct nlmsghdr *nlh;
  986. if (sp->this_idx < sp->start_idx)
  987. goto out;
  988. nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
  989. XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
  990. if (nlh == NULL)
  991. return -EMSGSIZE;
  992. p = nlmsg_data(nlh);
  993. copy_to_user_policy(xp, p, dir);
  994. if (copy_to_user_tmpl(xp, skb) < 0)
  995. goto nlmsg_failure;
  996. if (copy_to_user_sec_ctx(xp, skb))
  997. goto nlmsg_failure;
  998. if (copy_to_user_policy_type(xp->type, skb) < 0)
  999. goto nlmsg_failure;
  1000. nlmsg_end(skb, nlh);
  1001. out:
  1002. sp->this_idx++;
  1003. return 0;
  1004. nlmsg_failure:
  1005. nlmsg_cancel(skb, nlh);
  1006. return -EMSGSIZE;
  1007. }
  1008. static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
  1009. {
  1010. struct xfrm_dump_info info;
  1011. info.in_skb = cb->skb;
  1012. info.out_skb = skb;
  1013. info.nlmsg_seq = cb->nlh->nlmsg_seq;
  1014. info.nlmsg_flags = NLM_F_MULTI;
  1015. info.this_idx = 0;
  1016. info.start_idx = cb->args[0];
  1017. (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info);
  1018. #ifdef CONFIG_XFRM_SUB_POLICY
  1019. (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info);
  1020. #endif
  1021. cb->args[0] = info.this_idx;
  1022. return skb->len;
  1023. }
  1024. static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
  1025. struct xfrm_policy *xp,
  1026. int dir, u32 seq)
  1027. {
  1028. struct xfrm_dump_info info;
  1029. struct sk_buff *skb;
  1030. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1031. if (!skb)
  1032. return ERR_PTR(-ENOMEM);
  1033. info.in_skb = in_skb;
  1034. info.out_skb = skb;
  1035. info.nlmsg_seq = seq;
  1036. info.nlmsg_flags = 0;
  1037. info.this_idx = info.start_idx = 0;
  1038. if (dump_one_policy(xp, dir, 0, &info) < 0) {
  1039. kfree_skb(skb);
  1040. return NULL;
  1041. }
  1042. return skb;
  1043. }
  1044. static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
  1045. struct rtattr **attrs)
  1046. {
  1047. struct xfrm_policy *xp;
  1048. struct xfrm_userpolicy_id *p;
  1049. u8 type = XFRM_POLICY_TYPE_MAIN;
  1050. int err;
  1051. struct km_event c;
  1052. int delete;
  1053. p = nlmsg_data(nlh);
  1054. delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
  1055. err = copy_from_user_policy_type(&type, attrs);
  1056. if (err)
  1057. return err;
  1058. err = verify_policy_dir(p->dir);
  1059. if (err)
  1060. return err;
  1061. if (p->index)
  1062. xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
  1063. else {
  1064. struct rtattr *rt = attrs[XFRMA_SEC_CTX];
  1065. struct xfrm_policy tmp;
  1066. err = verify_sec_ctx_len(attrs);
  1067. if (err)
  1068. return err;
  1069. memset(&tmp, 0, sizeof(struct xfrm_policy));
  1070. if (rt) {
  1071. struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
  1072. if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
  1073. return err;
  1074. }
  1075. xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
  1076. delete, &err);
  1077. security_xfrm_policy_free(&tmp);
  1078. }
  1079. if (xp == NULL)
  1080. return -ENOENT;
  1081. if (!delete) {
  1082. struct sk_buff *resp_skb;
  1083. resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
  1084. if (IS_ERR(resp_skb)) {
  1085. err = PTR_ERR(resp_skb);
  1086. } else {
  1087. err = nlmsg_unicast(xfrm_nl, resp_skb,
  1088. NETLINK_CB(skb).pid);
  1089. }
  1090. } else {
  1091. xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
  1092. AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
  1093. if (err != 0)
  1094. goto out;
  1095. c.data.byid = p->index;
  1096. c.event = nlh->nlmsg_type;
  1097. c.seq = nlh->nlmsg_seq;
  1098. c.pid = nlh->nlmsg_pid;
  1099. km_policy_notify(xp, p->dir, &c);
  1100. }
  1101. out:
  1102. xfrm_pol_put(xp);
  1103. return err;
  1104. }
  1105. static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
  1106. struct rtattr **attrs)
  1107. {
  1108. struct km_event c;
  1109. struct xfrm_usersa_flush *p = nlmsg_data(nlh);
  1110. struct xfrm_audit audit_info;
  1111. int err;
  1112. audit_info.loginuid = NETLINK_CB(skb).loginuid;
  1113. audit_info.secid = NETLINK_CB(skb).sid;
  1114. err = xfrm_state_flush(p->proto, &audit_info);
  1115. if (err)
  1116. return err;
  1117. c.data.proto = p->proto;
  1118. c.event = nlh->nlmsg_type;
  1119. c.seq = nlh->nlmsg_seq;
  1120. c.pid = nlh->nlmsg_pid;
  1121. km_state_notify(NULL, &c);
  1122. return 0;
  1123. }
  1124. static inline size_t xfrm_aevent_msgsize(void)
  1125. {
  1126. return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
  1127. + nla_total_size(sizeof(struct xfrm_replay_state))
  1128. + nla_total_size(sizeof(struct xfrm_lifetime_cur))
  1129. + nla_total_size(4) /* XFRM_AE_RTHR */
  1130. + nla_total_size(4); /* XFRM_AE_ETHR */
  1131. }
  1132. static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
  1133. {
  1134. struct xfrm_aevent_id *id;
  1135. struct nlmsghdr *nlh;
  1136. nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
  1137. if (nlh == NULL)
  1138. return -EMSGSIZE;
  1139. id = nlmsg_data(nlh);
  1140. memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
  1141. id->sa_id.spi = x->id.spi;
  1142. id->sa_id.family = x->props.family;
  1143. id->sa_id.proto = x->id.proto;
  1144. memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
  1145. id->reqid = x->props.reqid;
  1146. id->flags = c->data.aevent;
  1147. NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
  1148. NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
  1149. if (id->flags & XFRM_AE_RTHR)
  1150. NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
  1151. if (id->flags & XFRM_AE_ETHR)
  1152. NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
  1153. x->replay_maxage * 10 / HZ);
  1154. return nlmsg_end(skb, nlh);
  1155. nla_put_failure:
  1156. nlmsg_cancel(skb, nlh);
  1157. return -EMSGSIZE;
  1158. }
  1159. static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
  1160. struct rtattr **attrs)
  1161. {
  1162. struct xfrm_state *x;
  1163. struct sk_buff *r_skb;
  1164. int err;
  1165. struct km_event c;
  1166. struct xfrm_aevent_id *p = nlmsg_data(nlh);
  1167. struct xfrm_usersa_id *id = &p->sa_id;
  1168. r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
  1169. if (r_skb == NULL)
  1170. return -ENOMEM;
  1171. x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
  1172. if (x == NULL) {
  1173. kfree_skb(r_skb);
  1174. return -ESRCH;
  1175. }
  1176. /*
  1177. * XXX: is this lock really needed - none of the other
  1178. * gets lock (the concern is things getting updated
  1179. * while we are still reading) - jhs
  1180. */
  1181. spin_lock_bh(&x->lock);
  1182. c.data.aevent = p->flags;
  1183. c.seq = nlh->nlmsg_seq;
  1184. c.pid = nlh->nlmsg_pid;
  1185. if (build_aevent(r_skb, x, &c) < 0)
  1186. BUG();
  1187. err = nlmsg_unicast(xfrm_nl, r_skb, NETLINK_CB(skb).pid);
  1188. spin_unlock_bh(&x->lock);
  1189. xfrm_state_put(x);
  1190. return err;
  1191. }
  1192. static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
  1193. struct rtattr **attrs)
  1194. {
  1195. struct xfrm_state *x;
  1196. struct km_event c;
  1197. int err = - EINVAL;
  1198. struct xfrm_aevent_id *p = nlmsg_data(nlh);
  1199. struct rtattr *rp = attrs[XFRMA_REPLAY_VAL];
  1200. struct rtattr *lt = attrs[XFRMA_LTIME_VAL];
  1201. if (!lt && !rp)
  1202. return err;
  1203. /* pedantic mode - thou shalt sayeth replaceth */
  1204. if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
  1205. return err;
  1206. x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
  1207. if (x == NULL)
  1208. return -ESRCH;
  1209. if (x->km.state != XFRM_STATE_VALID)
  1210. goto out;
  1211. spin_lock_bh(&x->lock);
  1212. xfrm_update_ae_params(x, attrs);
  1213. spin_unlock_bh(&x->lock);
  1214. c.event = nlh->nlmsg_type;
  1215. c.seq = nlh->nlmsg_seq;
  1216. c.pid = nlh->nlmsg_pid;
  1217. c.data.aevent = XFRM_AE_CU;
  1218. km_state_notify(x, &c);
  1219. err = 0;
  1220. out:
  1221. xfrm_state_put(x);
  1222. return err;
  1223. }
  1224. static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
  1225. struct rtattr **attrs)
  1226. {
  1227. struct km_event c;
  1228. u8 type = XFRM_POLICY_TYPE_MAIN;
  1229. int err;
  1230. struct xfrm_audit audit_info;
  1231. err = copy_from_user_policy_type(&type, attrs);
  1232. if (err)
  1233. return err;
  1234. audit_info.loginuid = NETLINK_CB(skb).loginuid;
  1235. audit_info.secid = NETLINK_CB(skb).sid;
  1236. err = xfrm_policy_flush(type, &audit_info);
  1237. if (err)
  1238. return err;
  1239. c.data.type = type;
  1240. c.event = nlh->nlmsg_type;
  1241. c.seq = nlh->nlmsg_seq;
  1242. c.pid = nlh->nlmsg_pid;
  1243. km_policy_notify(NULL, 0, &c);
  1244. return 0;
  1245. }
  1246. static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
  1247. struct rtattr **attrs)
  1248. {
  1249. struct xfrm_policy *xp;
  1250. struct xfrm_user_polexpire *up = nlmsg_data(nlh);
  1251. struct xfrm_userpolicy_info *p = &up->pol;
  1252. u8 type = XFRM_POLICY_TYPE_MAIN;
  1253. int err = -ENOENT;
  1254. err = copy_from_user_policy_type(&type, attrs);
  1255. if (err)
  1256. return err;
  1257. if (p->index)
  1258. xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
  1259. else {
  1260. struct rtattr *rt = attrs[XFRMA_SEC_CTX];
  1261. struct xfrm_policy tmp;
  1262. err = verify_sec_ctx_len(attrs);
  1263. if (err)
  1264. return err;
  1265. memset(&tmp, 0, sizeof(struct xfrm_policy));
  1266. if (rt) {
  1267. struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt);
  1268. if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
  1269. return err;
  1270. }
  1271. xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
  1272. 0, &err);
  1273. security_xfrm_policy_free(&tmp);
  1274. }
  1275. if (xp == NULL)
  1276. return -ENOENT;
  1277. read_lock(&xp->lock);
  1278. if (xp->dead) {
  1279. read_unlock(&xp->lock);
  1280. goto out;
  1281. }
  1282. read_unlock(&xp->lock);
  1283. err = 0;
  1284. if (up->hard) {
  1285. xfrm_policy_delete(xp, p->dir);
  1286. xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
  1287. AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL);
  1288. } else {
  1289. // reset the timers here?
  1290. printk("Dont know what to do with soft policy expire\n");
  1291. }
  1292. km_policy_expired(xp, p->dir, up->hard, current->pid);
  1293. out:
  1294. xfrm_pol_put(xp);
  1295. return err;
  1296. }
  1297. static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
  1298. struct rtattr **attrs)
  1299. {
  1300. struct xfrm_state *x;
  1301. int err;
  1302. struct xfrm_user_expire *ue = nlmsg_data(nlh);
  1303. struct xfrm_usersa_info *p = &ue->state;
  1304. x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
  1305. err = -ENOENT;
  1306. if (x == NULL)
  1307. return err;
  1308. spin_lock_bh(&x->lock);
  1309. err = -EINVAL;
  1310. if (x->km.state != XFRM_STATE_VALID)
  1311. goto out;
  1312. km_state_expired(x, ue->hard, current->pid);
  1313. if (ue->hard) {
  1314. __xfrm_state_delete(x);
  1315. xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
  1316. AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
  1317. }
  1318. err = 0;
  1319. out:
  1320. spin_unlock_bh(&x->lock);
  1321. xfrm_state_put(x);
  1322. return err;
  1323. }
  1324. static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
  1325. struct rtattr **attrs)
  1326. {
  1327. struct xfrm_policy *xp;
  1328. struct xfrm_user_tmpl *ut;
  1329. int i;
  1330. struct rtattr *rt = attrs[XFRMA_TMPL];
  1331. struct xfrm_user_acquire *ua = nlmsg_data(nlh);
  1332. struct xfrm_state *x = xfrm_state_alloc();
  1333. int err = -ENOMEM;
  1334. if (!x)
  1335. return err;
  1336. err = verify_newpolicy_info(&ua->policy);
  1337. if (err) {
  1338. printk("BAD policy passed\n");
  1339. kfree(x);
  1340. return err;
  1341. }
  1342. /* build an XP */
  1343. xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) attrs, &err);
  1344. if (!xp) {
  1345. kfree(x);
  1346. return err;
  1347. }
  1348. memcpy(&x->id, &ua->id, sizeof(ua->id));
  1349. memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
  1350. memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
  1351. ut = RTA_DATA(rt);
  1352. /* extract the templates and for each call km_key */
  1353. for (i = 0; i < xp->xfrm_nr; i++, ut++) {
  1354. struct xfrm_tmpl *t = &xp->xfrm_vec[i];
  1355. memcpy(&x->id, &t->id, sizeof(x->id));
  1356. x->props.mode = t->mode;
  1357. x->props.reqid = t->reqid;
  1358. x->props.family = ut->family;
  1359. t->aalgos = ua->aalgos;
  1360. t->ealgos = ua->ealgos;
  1361. t->calgos = ua->calgos;
  1362. err = km_query(x, t, xp);
  1363. }
  1364. kfree(x);
  1365. kfree(xp);
  1366. return 0;
  1367. }
  1368. #ifdef CONFIG_XFRM_MIGRATE
  1369. static int copy_from_user_migrate(struct xfrm_migrate *ma,
  1370. struct rtattr **attrs, int *num)
  1371. {
  1372. struct rtattr *rt = attrs[XFRMA_MIGRATE];
  1373. struct xfrm_user_migrate *um;
  1374. int i, num_migrate;
  1375. um = RTA_DATA(rt);
  1376. num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
  1377. if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
  1378. return -EINVAL;
  1379. for (i = 0; i < num_migrate; i++, um++, ma++) {
  1380. memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
  1381. memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
  1382. memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
  1383. memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
  1384. ma->proto = um->proto;
  1385. ma->mode = um->mode;
  1386. ma->reqid = um->reqid;
  1387. ma->old_family = um->old_family;
  1388. ma->new_family = um->new_family;
  1389. }
  1390. *num = i;
  1391. return 0;
  1392. }
  1393. static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
  1394. struct rtattr **attrs)
  1395. {
  1396. struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
  1397. struct xfrm_migrate m[XFRM_MAX_DEPTH];
  1398. u8 type;
  1399. int err;
  1400. int n = 0;
  1401. if (attrs[XFRMA_MIGRATE] == NULL)
  1402. return -EINVAL;
  1403. err = copy_from_user_policy_type(&type, (struct rtattr **)attrs);
  1404. if (err)
  1405. return err;
  1406. err = copy_from_user_migrate((struct xfrm_migrate *)m,
  1407. (struct rtattr **)attrs, &n);
  1408. if (err)
  1409. return err;
  1410. if (!n)
  1411. return 0;
  1412. xfrm_migrate(&pi->sel, pi->dir, type, m, n);
  1413. return 0;
  1414. }
  1415. #else
  1416. static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
  1417. struct rtattr **attrs)
  1418. {
  1419. return -ENOPROTOOPT;
  1420. }
  1421. #endif
  1422. #ifdef CONFIG_XFRM_MIGRATE
  1423. static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
  1424. {
  1425. struct xfrm_user_migrate um;
  1426. memset(&um, 0, sizeof(um));
  1427. um.proto = m->proto;
  1428. um.mode = m->mode;
  1429. um.reqid = m->reqid;
  1430. um.old_family = m->old_family;
  1431. memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
  1432. memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
  1433. um.new_family = m->new_family;
  1434. memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
  1435. memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
  1436. return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
  1437. }
  1438. static inline size_t xfrm_migrate_msgsize(int num_migrate)
  1439. {
  1440. return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
  1441. + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
  1442. + userpolicy_type_attrsize();
  1443. }
  1444. static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
  1445. int num_migrate, struct xfrm_selector *sel,
  1446. u8 dir, u8 type)
  1447. {
  1448. struct xfrm_migrate *mp;
  1449. struct xfrm_userpolicy_id *pol_id;
  1450. struct nlmsghdr *nlh;
  1451. int i;
  1452. nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
  1453. if (nlh == NULL)
  1454. return -EMSGSIZE;
  1455. pol_id = nlmsg_data(nlh);
  1456. /* copy data from selector, dir, and type to the pol_id */
  1457. memset(pol_id, 0, sizeof(*pol_id));
  1458. memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
  1459. pol_id->dir = dir;
  1460. if (copy_to_user_policy_type(type, skb) < 0)
  1461. goto nlmsg_failure;
  1462. for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
  1463. if (copy_to_user_migrate(mp, skb) < 0)
  1464. goto nlmsg_failure;
  1465. }
  1466. return nlmsg_end(skb, nlh);
  1467. nlmsg_failure:
  1468. nlmsg_cancel(skb, nlh);
  1469. return -EMSGSIZE;
  1470. }
  1471. static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  1472. struct xfrm_migrate *m, int num_migrate)
  1473. {
  1474. struct sk_buff *skb;
  1475. skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate), GFP_ATOMIC);
  1476. if (skb == NULL)
  1477. return -ENOMEM;
  1478. /* build migrate */
  1479. if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
  1480. BUG();
  1481. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
  1482. }
  1483. #else
  1484. static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  1485. struct xfrm_migrate *m, int num_migrate)
  1486. {
  1487. return -ENOPROTOOPT;
  1488. }
  1489. #endif
  1490. #define XMSGSIZE(type) sizeof(struct type)
  1491. static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
  1492. [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
  1493. [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
  1494. [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
  1495. [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
  1496. [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
  1497. [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
  1498. [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
  1499. [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
  1500. [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
  1501. [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
  1502. [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
  1503. [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
  1504. [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
  1505. [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
  1506. [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
  1507. [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
  1508. [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
  1509. [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
  1510. [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
  1511. [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
  1512. };
  1513. #undef XMSGSIZE
  1514. static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
  1515. [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
  1516. [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
  1517. [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
  1518. [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
  1519. [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
  1520. [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
  1521. [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
  1522. [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
  1523. [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
  1524. [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
  1525. [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
  1526. [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
  1527. [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
  1528. [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
  1529. };
  1530. static struct xfrm_link {
  1531. int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **);
  1532. int (*dump)(struct sk_buff *, struct netlink_callback *);
  1533. } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
  1534. [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
  1535. [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
  1536. [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
  1537. .dump = xfrm_dump_sa },
  1538. [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
  1539. [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
  1540. [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
  1541. .dump = xfrm_dump_policy },
  1542. [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
  1543. [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
  1544. [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
  1545. [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
  1546. [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
  1547. [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
  1548. [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
  1549. [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
  1550. [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
  1551. [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
  1552. [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
  1553. [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
  1554. [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
  1555. };
  1556. static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
  1557. {
  1558. struct nlattr *attrs[XFRMA_MAX+1];
  1559. struct xfrm_link *link;
  1560. int type, err;
  1561. type = nlh->nlmsg_type;
  1562. if (type > XFRM_MSG_MAX)
  1563. return -EINVAL;
  1564. type -= XFRM_MSG_BASE;
  1565. link = &xfrm_dispatch[type];
  1566. /* All operations require privileges, even GET */
  1567. if (security_netlink_recv(skb, CAP_NET_ADMIN))
  1568. return -EPERM;
  1569. if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
  1570. type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
  1571. (nlh->nlmsg_flags & NLM_F_DUMP)) {
  1572. if (link->dump == NULL)
  1573. return -EINVAL;
  1574. return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL);
  1575. }
  1576. err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
  1577. xfrma_policy);
  1578. if (err < 0)
  1579. return err;
  1580. if (link->doit == NULL)
  1581. return -EINVAL;
  1582. return link->doit(skb, nlh, (struct rtattr **) attrs);
  1583. }
  1584. static void xfrm_netlink_rcv(struct sock *sk, int len)
  1585. {
  1586. unsigned int qlen = 0;
  1587. do {
  1588. mutex_lock(&xfrm_cfg_mutex);
  1589. netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
  1590. mutex_unlock(&xfrm_cfg_mutex);
  1591. } while (qlen);
  1592. }
  1593. static inline size_t xfrm_expire_msgsize(void)
  1594. {
  1595. return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
  1596. }
  1597. static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
  1598. {
  1599. struct xfrm_user_expire *ue;
  1600. struct nlmsghdr *nlh;
  1601. nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
  1602. if (nlh == NULL)
  1603. return -EMSGSIZE;
  1604. ue = nlmsg_data(nlh);
  1605. copy_to_user_state(x, &ue->state);
  1606. ue->hard = (c->data.hard != 0) ? 1 : 0;
  1607. return nlmsg_end(skb, nlh);
  1608. }
  1609. static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
  1610. {
  1611. struct sk_buff *skb;
  1612. skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
  1613. if (skb == NULL)
  1614. return -ENOMEM;
  1615. if (build_expire(skb, x, c) < 0)
  1616. BUG();
  1617. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
  1618. }
  1619. static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
  1620. {
  1621. struct sk_buff *skb;
  1622. skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
  1623. if (skb == NULL)
  1624. return -ENOMEM;
  1625. if (build_aevent(skb, x, c) < 0)
  1626. BUG();
  1627. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
  1628. }
  1629. static int xfrm_notify_sa_flush(struct km_event *c)
  1630. {
  1631. struct xfrm_usersa_flush *p;
  1632. struct nlmsghdr *nlh;
  1633. struct sk_buff *skb;
  1634. int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
  1635. skb = nlmsg_new(len, GFP_ATOMIC);
  1636. if (skb == NULL)
  1637. return -ENOMEM;
  1638. nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
  1639. if (nlh == NULL) {
  1640. kfree_skb(skb);
  1641. return -EMSGSIZE;
  1642. }
  1643. p = nlmsg_data(nlh);
  1644. p->proto = c->data.proto;
  1645. nlmsg_end(skb, nlh);
  1646. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
  1647. }
  1648. static inline size_t xfrm_sa_len(struct xfrm_state *x)
  1649. {
  1650. size_t l = 0;
  1651. if (x->aalg)
  1652. l += nla_total_size(alg_len(x->aalg));
  1653. if (x->ealg)
  1654. l += nla_total_size(alg_len(x->ealg));
  1655. if (x->calg)
  1656. l += nla_total_size(sizeof(*x->calg));
  1657. if (x->encap)
  1658. l += nla_total_size(sizeof(*x->encap));
  1659. return l;
  1660. }
  1661. static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
  1662. {
  1663. struct xfrm_usersa_info *p;
  1664. struct xfrm_usersa_id *id;
  1665. struct nlmsghdr *nlh;
  1666. struct sk_buff *skb;
  1667. int len = xfrm_sa_len(x);
  1668. int headlen;
  1669. headlen = sizeof(*p);
  1670. if (c->event == XFRM_MSG_DELSA) {
  1671. len += nla_total_size(headlen);
  1672. headlen = sizeof(*id);
  1673. }
  1674. len += NLMSG_ALIGN(headlen);
  1675. skb = nlmsg_new(len, GFP_ATOMIC);
  1676. if (skb == NULL)
  1677. return -ENOMEM;
  1678. nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
  1679. if (nlh == NULL)
  1680. goto nla_put_failure;
  1681. p = nlmsg_data(nlh);
  1682. if (c->event == XFRM_MSG_DELSA) {
  1683. struct nlattr *attr;
  1684. id = nlmsg_data(nlh);
  1685. memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
  1686. id->spi = x->id.spi;
  1687. id->family = x->props.family;
  1688. id->proto = x->id.proto;
  1689. attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
  1690. if (attr == NULL)
  1691. goto nla_put_failure;
  1692. p = nla_data(attr);
  1693. }
  1694. copy_to_user_state(x, p);
  1695. if (x->aalg)
  1696. NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);
  1697. if (x->ealg)
  1698. NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg);
  1699. if (x->calg)
  1700. NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
  1701. if (x->encap)
  1702. NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
  1703. nlmsg_end(skb, nlh);
  1704. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
  1705. nla_put_failure:
  1706. kfree_skb(skb);
  1707. return -1;
  1708. }
  1709. static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
  1710. {
  1711. switch (c->event) {
  1712. case XFRM_MSG_EXPIRE:
  1713. return xfrm_exp_state_notify(x, c);
  1714. case XFRM_MSG_NEWAE:
  1715. return xfrm_aevent_state_notify(x, c);
  1716. case XFRM_MSG_DELSA:
  1717. case XFRM_MSG_UPDSA:
  1718. case XFRM_MSG_NEWSA:
  1719. return xfrm_notify_sa(x, c);
  1720. case XFRM_MSG_FLUSHSA:
  1721. return xfrm_notify_sa_flush(c);
  1722. default:
  1723. printk("xfrm_user: Unknown SA event %d\n", c->event);
  1724. break;
  1725. }
  1726. return 0;
  1727. }
  1728. static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
  1729. struct xfrm_policy *xp)
  1730. {
  1731. return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
  1732. + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
  1733. + nla_total_size(xfrm_user_sec_ctx_size(x->security))
  1734. + userpolicy_type_attrsize();
  1735. }
  1736. static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
  1737. struct xfrm_tmpl *xt, struct xfrm_policy *xp,
  1738. int dir)
  1739. {
  1740. struct xfrm_user_acquire *ua;
  1741. struct nlmsghdr *nlh;
  1742. __u32 seq = xfrm_get_acqseq();
  1743. nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
  1744. if (nlh == NULL)
  1745. return -EMSGSIZE;
  1746. ua = nlmsg_data(nlh);
  1747. memcpy(&ua->id, &x->id, sizeof(ua->id));
  1748. memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
  1749. memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
  1750. copy_to_user_policy(xp, &ua->policy, dir);
  1751. ua->aalgos = xt->aalgos;
  1752. ua->ealgos = xt->ealgos;
  1753. ua->calgos = xt->calgos;
  1754. ua->seq = x->km.seq = seq;
  1755. if (copy_to_user_tmpl(xp, skb) < 0)
  1756. goto nlmsg_failure;
  1757. if (copy_to_user_state_sec_ctx(x, skb))
  1758. goto nlmsg_failure;
  1759. if (copy_to_user_policy_type(xp->type, skb) < 0)
  1760. goto nlmsg_failure;
  1761. return nlmsg_end(skb, nlh);
  1762. nlmsg_failure:
  1763. nlmsg_cancel(skb, nlh);
  1764. return -EMSGSIZE;
  1765. }
  1766. static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
  1767. struct xfrm_policy *xp, int dir)
  1768. {
  1769. struct sk_buff *skb;
  1770. skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
  1771. if (skb == NULL)
  1772. return -ENOMEM;
  1773. if (build_acquire(skb, x, xt, xp, dir) < 0)
  1774. BUG();
  1775. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
  1776. }
  1777. /* User gives us xfrm_user_policy_info followed by an array of 0
  1778. * or more templates.
  1779. */
  1780. static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
  1781. u8 *data, int len, int *dir)
  1782. {
  1783. struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
  1784. struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
  1785. struct xfrm_policy *xp;
  1786. int nr;
  1787. switch (sk->sk_family) {
  1788. case AF_INET:
  1789. if (opt != IP_XFRM_POLICY) {
  1790. *dir = -EOPNOTSUPP;
  1791. return NULL;
  1792. }
  1793. break;
  1794. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  1795. case AF_INET6:
  1796. if (opt != IPV6_XFRM_POLICY) {
  1797. *dir = -EOPNOTSUPP;
  1798. return NULL;
  1799. }
  1800. break;
  1801. #endif
  1802. default:
  1803. *dir = -EINVAL;
  1804. return NULL;
  1805. }
  1806. *dir = -EINVAL;
  1807. if (len < sizeof(*p) ||
  1808. verify_newpolicy_info(p))
  1809. return NULL;
  1810. nr = ((len - sizeof(*p)) / sizeof(*ut));
  1811. if (validate_tmpl(nr, ut, p->sel.family))
  1812. return NULL;
  1813. if (p->dir > XFRM_POLICY_OUT)
  1814. return NULL;
  1815. xp = xfrm_policy_alloc(GFP_KERNEL);
  1816. if (xp == NULL) {
  1817. *dir = -ENOBUFS;
  1818. return NULL;
  1819. }
  1820. copy_from_user_policy(xp, p);
  1821. xp->type = XFRM_POLICY_TYPE_MAIN;
  1822. copy_templates(xp, ut, nr);
  1823. *dir = p->dir;
  1824. return xp;
  1825. }
  1826. static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
  1827. {
  1828. return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
  1829. + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
  1830. + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
  1831. + userpolicy_type_attrsize();
  1832. }
  1833. static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
  1834. int dir, struct km_event *c)
  1835. {
  1836. struct xfrm_user_polexpire *upe;
  1837. struct nlmsghdr *nlh;
  1838. int hard = c->data.hard;
  1839. nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
  1840. if (nlh == NULL)
  1841. return -EMSGSIZE;
  1842. upe = nlmsg_data(nlh);
  1843. copy_to_user_policy(xp, &upe->pol, dir);
  1844. if (copy_to_user_tmpl(xp, skb) < 0)
  1845. goto nlmsg_failure;
  1846. if (copy_to_user_sec_ctx(xp, skb))
  1847. goto nlmsg_failure;
  1848. if (copy_to_user_policy_type(xp->type, skb) < 0)
  1849. goto nlmsg_failure;
  1850. upe->hard = !!hard;
  1851. return nlmsg_end(skb, nlh);
  1852. nlmsg_failure:
  1853. nlmsg_cancel(skb, nlh);
  1854. return -EMSGSIZE;
  1855. }
  1856. static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
  1857. {
  1858. struct sk_buff *skb;
  1859. skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
  1860. if (skb == NULL)
  1861. return -ENOMEM;
  1862. if (build_polexpire(skb, xp, dir, c) < 0)
  1863. BUG();
  1864. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
  1865. }
  1866. static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
  1867. {
  1868. struct xfrm_userpolicy_info *p;
  1869. struct xfrm_userpolicy_id *id;
  1870. struct nlmsghdr *nlh;
  1871. struct sk_buff *skb;
  1872. int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
  1873. int headlen;
  1874. headlen = sizeof(*p);
  1875. if (c->event == XFRM_MSG_DELPOLICY) {
  1876. len += nla_total_size(headlen);
  1877. headlen = sizeof(*id);
  1878. }
  1879. len += userpolicy_type_attrsize();
  1880. len += NLMSG_ALIGN(headlen);
  1881. skb = nlmsg_new(len, GFP_ATOMIC);
  1882. if (skb == NULL)
  1883. return -ENOMEM;
  1884. nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
  1885. if (nlh == NULL)
  1886. goto nlmsg_failure;
  1887. p = nlmsg_data(nlh);
  1888. if (c->event == XFRM_MSG_DELPOLICY) {
  1889. struct nlattr *attr;
  1890. id = nlmsg_data(nlh);
  1891. memset(id, 0, sizeof(*id));
  1892. id->dir = dir;
  1893. if (c->data.byid)
  1894. id->index = xp->index;
  1895. else
  1896. memcpy(&id->sel, &xp->selector, sizeof(id->sel));
  1897. attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
  1898. if (attr == NULL)
  1899. goto nlmsg_failure;
  1900. p = nla_data(attr);
  1901. }
  1902. copy_to_user_policy(xp, p, dir);
  1903. if (copy_to_user_tmpl(xp, skb) < 0)
  1904. goto nlmsg_failure;
  1905. if (copy_to_user_policy_type(xp->type, skb) < 0)
  1906. goto nlmsg_failure;
  1907. nlmsg_end(skb, nlh);
  1908. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
  1909. nlmsg_failure:
  1910. kfree_skb(skb);
  1911. return -1;
  1912. }
  1913. static int xfrm_notify_policy_flush(struct km_event *c)
  1914. {
  1915. struct nlmsghdr *nlh;
  1916. struct sk_buff *skb;
  1917. skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
  1918. if (skb == NULL)
  1919. return -ENOMEM;
  1920. nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
  1921. if (nlh == NULL)
  1922. goto nlmsg_failure;
  1923. if (copy_to_user_policy_type(c->data.type, skb) < 0)
  1924. goto nlmsg_failure;
  1925. nlmsg_end(skb, nlh);
  1926. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
  1927. nlmsg_failure:
  1928. kfree_skb(skb);
  1929. return -1;
  1930. }
  1931. static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
  1932. {
  1933. switch (c->event) {
  1934. case XFRM_MSG_NEWPOLICY:
  1935. case XFRM_MSG_UPDPOLICY:
  1936. case XFRM_MSG_DELPOLICY:
  1937. return xfrm_notify_policy(xp, dir, c);
  1938. case XFRM_MSG_FLUSHPOLICY:
  1939. return xfrm_notify_policy_flush(c);
  1940. case XFRM_MSG_POLEXPIRE:
  1941. return xfrm_exp_policy_notify(xp, dir, c);
  1942. default:
  1943. printk("xfrm_user: Unknown Policy event %d\n", c->event);
  1944. }
  1945. return 0;
  1946. }
  1947. static inline size_t xfrm_report_msgsize(void)
  1948. {
  1949. return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
  1950. }
  1951. static int build_report(struct sk_buff *skb, u8 proto,
  1952. struct xfrm_selector *sel, xfrm_address_t *addr)
  1953. {
  1954. struct xfrm_user_report *ur;
  1955. struct nlmsghdr *nlh;
  1956. nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
  1957. if (nlh == NULL)
  1958. return -EMSGSIZE;
  1959. ur = nlmsg_data(nlh);
  1960. ur->proto = proto;
  1961. memcpy(&ur->sel, sel, sizeof(ur->sel));
  1962. if (addr)
  1963. NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
  1964. return nlmsg_end(skb, nlh);
  1965. nla_put_failure:
  1966. nlmsg_cancel(skb, nlh);
  1967. return -EMSGSIZE;
  1968. }
  1969. static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
  1970. xfrm_address_t *addr)
  1971. {
  1972. struct sk_buff *skb;
  1973. skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
  1974. if (skb == NULL)
  1975. return -ENOMEM;
  1976. if (build_report(skb, proto, sel, addr) < 0)
  1977. BUG();
  1978. return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
  1979. }
  1980. static struct xfrm_mgr netlink_mgr = {
  1981. .id = "netlink",
  1982. .notify = xfrm_send_state_notify,
  1983. .acquire = xfrm_send_acquire,
  1984. .compile_policy = xfrm_compile_policy,
  1985. .notify_policy = xfrm_send_policy_notify,
  1986. .report = xfrm_send_report,
  1987. .migrate = xfrm_send_migrate,
  1988. };
  1989. static int __init xfrm_user_init(void)
  1990. {
  1991. struct sock *nlsk;
  1992. printk(KERN_INFO "Initializing XFRM netlink socket\n");
  1993. nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
  1994. xfrm_netlink_rcv, NULL, THIS_MODULE);
  1995. if (nlsk == NULL)
  1996. return -ENOMEM;
  1997. rcu_assign_pointer(xfrm_nl, nlsk);
  1998. xfrm_register_km(&netlink_mgr);
  1999. return 0;
  2000. }
  2001. static void __exit xfrm_user_exit(void)
  2002. {
  2003. struct sock *nlsk = xfrm_nl;
  2004. xfrm_unregister_km(&netlink_mgr);
  2005. rcu_assign_pointer(xfrm_nl, NULL);
  2006. synchronize_rcu();
  2007. sock_release(nlsk->sk_socket);
  2008. }
  2009. module_init(xfrm_user_init);
  2010. module_exit(xfrm_user_exit);
  2011. MODULE_LICENSE("GPL");
  2012. MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);