xfrm_algo.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * xfrm algorithm interface
  3. *
  4. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pfkeyv2.h>
  14. #include <linux/crypto.h>
  15. #include <net/xfrm.h>
  16. #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
  17. #include <net/ah.h>
  18. #endif
  19. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  20. #include <net/esp.h>
  21. #endif
  22. #include <asm/scatterlist.h>
  23. /*
  24. * Algorithms supported by IPsec. These entries contain properties which
  25. * are used in key negotiation and xfrm processing, and are used to verify
  26. * that instantiated crypto transforms have correct parameters for IPsec
  27. * purposes.
  28. */
  29. static struct xfrm_algo_desc aalg_list[] = {
  30. {
  31. .name = "hmac(digest_null)",
  32. .compat = "digest_null",
  33. .uinfo = {
  34. .auth = {
  35. .icv_truncbits = 0,
  36. .icv_fullbits = 0,
  37. }
  38. },
  39. .desc = {
  40. .sadb_alg_id = SADB_X_AALG_NULL,
  41. .sadb_alg_ivlen = 0,
  42. .sadb_alg_minbits = 0,
  43. .sadb_alg_maxbits = 0
  44. }
  45. },
  46. {
  47. .name = "hmac(md5)",
  48. .compat = "md5",
  49. .uinfo = {
  50. .auth = {
  51. .icv_truncbits = 96,
  52. .icv_fullbits = 128,
  53. }
  54. },
  55. .desc = {
  56. .sadb_alg_id = SADB_AALG_MD5HMAC,
  57. .sadb_alg_ivlen = 0,
  58. .sadb_alg_minbits = 128,
  59. .sadb_alg_maxbits = 128
  60. }
  61. },
  62. {
  63. .name = "hmac(sha1)",
  64. .compat = "sha1",
  65. .uinfo = {
  66. .auth = {
  67. .icv_truncbits = 96,
  68. .icv_fullbits = 160,
  69. }
  70. },
  71. .desc = {
  72. .sadb_alg_id = SADB_AALG_SHA1HMAC,
  73. .sadb_alg_ivlen = 0,
  74. .sadb_alg_minbits = 160,
  75. .sadb_alg_maxbits = 160
  76. }
  77. },
  78. {
  79. .name = "hmac(sha256)",
  80. .compat = "sha256",
  81. .uinfo = {
  82. .auth = {
  83. .icv_truncbits = 96,
  84. .icv_fullbits = 256,
  85. }
  86. },
  87. .desc = {
  88. .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
  89. .sadb_alg_ivlen = 0,
  90. .sadb_alg_minbits = 256,
  91. .sadb_alg_maxbits = 256
  92. }
  93. },
  94. {
  95. .name = "hmac(ripemd160)",
  96. .compat = "ripemd160",
  97. .uinfo = {
  98. .auth = {
  99. .icv_truncbits = 96,
  100. .icv_fullbits = 160,
  101. }
  102. },
  103. .desc = {
  104. .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
  105. .sadb_alg_ivlen = 0,
  106. .sadb_alg_minbits = 160,
  107. .sadb_alg_maxbits = 160
  108. }
  109. },
  110. };
  111. static struct xfrm_algo_desc ealg_list[] = {
  112. {
  113. .name = "ecb(cipher_null)",
  114. .compat = "cipher_null",
  115. .uinfo = {
  116. .encr = {
  117. .blockbits = 8,
  118. .defkeybits = 0,
  119. }
  120. },
  121. .desc = {
  122. .sadb_alg_id = SADB_EALG_NULL,
  123. .sadb_alg_ivlen = 0,
  124. .sadb_alg_minbits = 0,
  125. .sadb_alg_maxbits = 0
  126. }
  127. },
  128. {
  129. .name = "cbc(des)",
  130. .compat = "des",
  131. .uinfo = {
  132. .encr = {
  133. .blockbits = 64,
  134. .defkeybits = 64,
  135. }
  136. },
  137. .desc = {
  138. .sadb_alg_id = SADB_EALG_DESCBC,
  139. .sadb_alg_ivlen = 8,
  140. .sadb_alg_minbits = 64,
  141. .sadb_alg_maxbits = 64
  142. }
  143. },
  144. {
  145. .name = "cbc(des3_ede)",
  146. .compat = "des3_ede",
  147. .uinfo = {
  148. .encr = {
  149. .blockbits = 64,
  150. .defkeybits = 192,
  151. }
  152. },
  153. .desc = {
  154. .sadb_alg_id = SADB_EALG_3DESCBC,
  155. .sadb_alg_ivlen = 8,
  156. .sadb_alg_minbits = 192,
  157. .sadb_alg_maxbits = 192
  158. }
  159. },
  160. {
  161. .name = "cbc(cast128)",
  162. .compat = "cast128",
  163. .uinfo = {
  164. .encr = {
  165. .blockbits = 64,
  166. .defkeybits = 128,
  167. }
  168. },
  169. .desc = {
  170. .sadb_alg_id = SADB_X_EALG_CASTCBC,
  171. .sadb_alg_ivlen = 8,
  172. .sadb_alg_minbits = 40,
  173. .sadb_alg_maxbits = 128
  174. }
  175. },
  176. {
  177. .name = "cbc(blowfish)",
  178. .compat = "blowfish",
  179. .uinfo = {
  180. .encr = {
  181. .blockbits = 64,
  182. .defkeybits = 128,
  183. }
  184. },
  185. .desc = {
  186. .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
  187. .sadb_alg_ivlen = 8,
  188. .sadb_alg_minbits = 40,
  189. .sadb_alg_maxbits = 448
  190. }
  191. },
  192. {
  193. .name = "cbc(aes)",
  194. .compat = "aes",
  195. .uinfo = {
  196. .encr = {
  197. .blockbits = 128,
  198. .defkeybits = 128,
  199. }
  200. },
  201. .desc = {
  202. .sadb_alg_id = SADB_X_EALG_AESCBC,
  203. .sadb_alg_ivlen = 8,
  204. .sadb_alg_minbits = 128,
  205. .sadb_alg_maxbits = 256
  206. }
  207. },
  208. {
  209. .name = "cbc(serpent)",
  210. .compat = "serpent",
  211. .uinfo = {
  212. .encr = {
  213. .blockbits = 128,
  214. .defkeybits = 128,
  215. }
  216. },
  217. .desc = {
  218. .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
  219. .sadb_alg_ivlen = 8,
  220. .sadb_alg_minbits = 128,
  221. .sadb_alg_maxbits = 256,
  222. }
  223. },
  224. {
  225. .name = "cbc(twofish)",
  226. .compat = "twofish",
  227. .uinfo = {
  228. .encr = {
  229. .blockbits = 128,
  230. .defkeybits = 128,
  231. }
  232. },
  233. .desc = {
  234. .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
  235. .sadb_alg_ivlen = 8,
  236. .sadb_alg_minbits = 128,
  237. .sadb_alg_maxbits = 256
  238. }
  239. },
  240. };
  241. static struct xfrm_algo_desc calg_list[] = {
  242. {
  243. .name = "deflate",
  244. .uinfo = {
  245. .comp = {
  246. .threshold = 90,
  247. }
  248. },
  249. .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
  250. },
  251. {
  252. .name = "lzs",
  253. .uinfo = {
  254. .comp = {
  255. .threshold = 90,
  256. }
  257. },
  258. .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
  259. },
  260. {
  261. .name = "lzjh",
  262. .uinfo = {
  263. .comp = {
  264. .threshold = 50,
  265. }
  266. },
  267. .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
  268. },
  269. };
  270. static inline int aalg_entries(void)
  271. {
  272. return ARRAY_SIZE(aalg_list);
  273. }
  274. static inline int ealg_entries(void)
  275. {
  276. return ARRAY_SIZE(ealg_list);
  277. }
  278. static inline int calg_entries(void)
  279. {
  280. return ARRAY_SIZE(calg_list);
  281. }
  282. /* Todo: generic iterators */
  283. struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
  284. {
  285. int i;
  286. for (i = 0; i < aalg_entries(); i++) {
  287. if (aalg_list[i].desc.sadb_alg_id == alg_id) {
  288. if (aalg_list[i].available)
  289. return &aalg_list[i];
  290. else
  291. break;
  292. }
  293. }
  294. return NULL;
  295. }
  296. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
  297. struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
  298. {
  299. int i;
  300. for (i = 0; i < ealg_entries(); i++) {
  301. if (ealg_list[i].desc.sadb_alg_id == alg_id) {
  302. if (ealg_list[i].available)
  303. return &ealg_list[i];
  304. else
  305. break;
  306. }
  307. }
  308. return NULL;
  309. }
  310. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
  311. struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
  312. {
  313. int i;
  314. for (i = 0; i < calg_entries(); i++) {
  315. if (calg_list[i].desc.sadb_alg_id == alg_id) {
  316. if (calg_list[i].available)
  317. return &calg_list[i];
  318. else
  319. break;
  320. }
  321. }
  322. return NULL;
  323. }
  324. EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
  325. static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
  326. int entries, char *name,
  327. int probe)
  328. {
  329. int i, status;
  330. if (!name)
  331. return NULL;
  332. for (i = 0; i < entries; i++) {
  333. if (strcmp(name, list[i].name) &&
  334. (!list[i].compat || strcmp(name, list[i].compat)))
  335. continue;
  336. if (list[i].available)
  337. return &list[i];
  338. if (!probe)
  339. break;
  340. status = crypto_alg_available(name, 0);
  341. if (!status)
  342. break;
  343. list[i].available = status;
  344. return &list[i];
  345. }
  346. return NULL;
  347. }
  348. struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
  349. {
  350. return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
  351. }
  352. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
  353. struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
  354. {
  355. return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
  356. }
  357. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
  358. struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
  359. {
  360. return xfrm_get_byname(calg_list, calg_entries(), name, probe);
  361. }
  362. EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
  363. struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
  364. {
  365. if (idx >= aalg_entries())
  366. return NULL;
  367. return &aalg_list[idx];
  368. }
  369. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
  370. struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
  371. {
  372. if (idx >= ealg_entries())
  373. return NULL;
  374. return &ealg_list[idx];
  375. }
  376. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
  377. /*
  378. * Probe for the availability of crypto algorithms, and set the available
  379. * flag for any algorithms found on the system. This is typically called by
  380. * pfkey during userspace SA add, update or register.
  381. */
  382. void xfrm_probe_algs(void)
  383. {
  384. #ifdef CONFIG_CRYPTO
  385. int i, status;
  386. BUG_ON(in_softirq());
  387. for (i = 0; i < aalg_entries(); i++) {
  388. status = crypto_alg_available(aalg_list[i].name, 0);
  389. if (aalg_list[i].available != status)
  390. aalg_list[i].available = status;
  391. }
  392. for (i = 0; i < ealg_entries(); i++) {
  393. status = crypto_alg_available(ealg_list[i].name, 0);
  394. if (ealg_list[i].available != status)
  395. ealg_list[i].available = status;
  396. }
  397. for (i = 0; i < calg_entries(); i++) {
  398. status = crypto_alg_available(calg_list[i].name, 0);
  399. if (calg_list[i].available != status)
  400. calg_list[i].available = status;
  401. }
  402. #endif
  403. }
  404. EXPORT_SYMBOL_GPL(xfrm_probe_algs);
  405. int xfrm_count_auth_supported(void)
  406. {
  407. int i, n;
  408. for (i = 0, n = 0; i < aalg_entries(); i++)
  409. if (aalg_list[i].available)
  410. n++;
  411. return n;
  412. }
  413. EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
  414. int xfrm_count_enc_supported(void)
  415. {
  416. int i, n;
  417. for (i = 0, n = 0; i < ealg_entries(); i++)
  418. if (ealg_list[i].available)
  419. n++;
  420. return n;
  421. }
  422. EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
  423. /* Move to common area: it is shared with AH. */
  424. int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
  425. int offset, int len, icv_update_fn_t icv_update)
  426. {
  427. int start = skb_headlen(skb);
  428. int i, copy = start - offset;
  429. int err;
  430. struct scatterlist sg;
  431. /* Checksum header. */
  432. if (copy > 0) {
  433. if (copy > len)
  434. copy = len;
  435. sg.page = virt_to_page(skb->data + offset);
  436. sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  437. sg.length = copy;
  438. err = icv_update(desc, &sg, copy);
  439. if (unlikely(err))
  440. return err;
  441. if ((len -= copy) == 0)
  442. return 0;
  443. offset += copy;
  444. }
  445. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  446. int end;
  447. BUG_TRAP(start <= offset + len);
  448. end = start + skb_shinfo(skb)->frags[i].size;
  449. if ((copy = end - offset) > 0) {
  450. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  451. if (copy > len)
  452. copy = len;
  453. sg.page = frag->page;
  454. sg.offset = frag->page_offset + offset-start;
  455. sg.length = copy;
  456. err = icv_update(desc, &sg, copy);
  457. if (unlikely(err))
  458. return err;
  459. if (!(len -= copy))
  460. return 0;
  461. offset += copy;
  462. }
  463. start = end;
  464. }
  465. if (skb_shinfo(skb)->frag_list) {
  466. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  467. for (; list; list = list->next) {
  468. int end;
  469. BUG_TRAP(start <= offset + len);
  470. end = start + list->len;
  471. if ((copy = end - offset) > 0) {
  472. if (copy > len)
  473. copy = len;
  474. err = skb_icv_walk(list, desc, offset-start,
  475. copy, icv_update);
  476. if (unlikely(err))
  477. return err;
  478. if ((len -= copy) == 0)
  479. return 0;
  480. offset += copy;
  481. }
  482. start = end;
  483. }
  484. }
  485. BUG_ON(len);
  486. return 0;
  487. }
  488. EXPORT_SYMBOL_GPL(skb_icv_walk);
  489. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  490. /* Looking generic it is not used in another places. */
  491. int
  492. skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  493. {
  494. int start = skb_headlen(skb);
  495. int i, copy = start - offset;
  496. int elt = 0;
  497. if (copy > 0) {
  498. if (copy > len)
  499. copy = len;
  500. sg[elt].page = virt_to_page(skb->data + offset);
  501. sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  502. sg[elt].length = copy;
  503. elt++;
  504. if ((len -= copy) == 0)
  505. return elt;
  506. offset += copy;
  507. }
  508. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  509. int end;
  510. BUG_TRAP(start <= offset + len);
  511. end = start + skb_shinfo(skb)->frags[i].size;
  512. if ((copy = end - offset) > 0) {
  513. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  514. if (copy > len)
  515. copy = len;
  516. sg[elt].page = frag->page;
  517. sg[elt].offset = frag->page_offset+offset-start;
  518. sg[elt].length = copy;
  519. elt++;
  520. if (!(len -= copy))
  521. return elt;
  522. offset += copy;
  523. }
  524. start = end;
  525. }
  526. if (skb_shinfo(skb)->frag_list) {
  527. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  528. for (; list; list = list->next) {
  529. int end;
  530. BUG_TRAP(start <= offset + len);
  531. end = start + list->len;
  532. if ((copy = end - offset) > 0) {
  533. if (copy > len)
  534. copy = len;
  535. elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
  536. if ((len -= copy) == 0)
  537. return elt;
  538. offset += copy;
  539. }
  540. start = end;
  541. }
  542. }
  543. BUG_ON(len);
  544. return elt;
  545. }
  546. EXPORT_SYMBOL_GPL(skb_to_sgvec);
  547. /* Check that skb data bits are writable. If they are not, copy data
  548. * to newly created private area. If "tailbits" is given, make sure that
  549. * tailbits bytes beyond current end of skb are writable.
  550. *
  551. * Returns amount of elements of scatterlist to load for subsequent
  552. * transformations and pointer to writable trailer skb.
  553. */
  554. int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  555. {
  556. int copyflag;
  557. int elt;
  558. struct sk_buff *skb1, **skb_p;
  559. /* If skb is cloned or its head is paged, reallocate
  560. * head pulling out all the pages (pages are considered not writable
  561. * at the moment even if they are anonymous).
  562. */
  563. if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
  564. __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
  565. return -ENOMEM;
  566. /* Easy case. Most of packets will go this way. */
  567. if (!skb_shinfo(skb)->frag_list) {
  568. /* A little of trouble, not enough of space for trailer.
  569. * This should not happen, when stack is tuned to generate
  570. * good frames. OK, on miss we reallocate and reserve even more
  571. * space, 128 bytes is fair. */
  572. if (skb_tailroom(skb) < tailbits &&
  573. pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
  574. return -ENOMEM;
  575. /* Voila! */
  576. *trailer = skb;
  577. return 1;
  578. }
  579. /* Misery. We are in troubles, going to mincer fragments... */
  580. elt = 1;
  581. skb_p = &skb_shinfo(skb)->frag_list;
  582. copyflag = 0;
  583. while ((skb1 = *skb_p) != NULL) {
  584. int ntail = 0;
  585. /* The fragment is partially pulled by someone,
  586. * this can happen on input. Copy it and everything
  587. * after it. */
  588. if (skb_shared(skb1))
  589. copyflag = 1;
  590. /* If the skb is the last, worry about trailer. */
  591. if (skb1->next == NULL && tailbits) {
  592. if (skb_shinfo(skb1)->nr_frags ||
  593. skb_shinfo(skb1)->frag_list ||
  594. skb_tailroom(skb1) < tailbits)
  595. ntail = tailbits + 128;
  596. }
  597. if (copyflag ||
  598. skb_cloned(skb1) ||
  599. ntail ||
  600. skb_shinfo(skb1)->nr_frags ||
  601. skb_shinfo(skb1)->frag_list) {
  602. struct sk_buff *skb2;
  603. /* Fuck, we are miserable poor guys... */
  604. if (ntail == 0)
  605. skb2 = skb_copy(skb1, GFP_ATOMIC);
  606. else
  607. skb2 = skb_copy_expand(skb1,
  608. skb_headroom(skb1),
  609. ntail,
  610. GFP_ATOMIC);
  611. if (unlikely(skb2 == NULL))
  612. return -ENOMEM;
  613. if (skb1->sk)
  614. skb_set_owner_w(skb2, skb1->sk);
  615. /* Looking around. Are we still alive?
  616. * OK, link new skb, drop old one */
  617. skb2->next = skb1->next;
  618. *skb_p = skb2;
  619. kfree_skb(skb1);
  620. skb1 = skb2;
  621. }
  622. elt++;
  623. *trailer = skb1;
  624. skb_p = &skb1->next;
  625. }
  626. return elt;
  627. }
  628. EXPORT_SYMBOL_GPL(skb_cow_data);
  629. void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
  630. {
  631. if (tail != skb) {
  632. skb->data_len += len;
  633. skb->len += len;
  634. }
  635. return skb_put(tail, len);
  636. }
  637. EXPORT_SYMBOL_GPL(pskb_put);
  638. #endif