xfrm_algo.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * xfrm algorithm interface
  3. *
  4. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. */
  11. #include <linux/config.h>
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/pfkeyv2.h>
  15. #include <linux/crypto.h>
  16. #include <net/xfrm.h>
  17. #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
  18. #include <net/ah.h>
  19. #endif
  20. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  21. #include <net/esp.h>
  22. #endif
  23. #include <asm/scatterlist.h>
  24. /*
  25. * Algorithms supported by IPsec. These entries contain properties which
  26. * are used in key negotiation and xfrm processing, and are used to verify
  27. * that instantiated crypto transforms have correct parameters for IPsec
  28. * purposes.
  29. */
  30. static struct xfrm_algo_desc aalg_list[] = {
  31. {
  32. .name = "digest_null",
  33. .uinfo = {
  34. .auth = {
  35. .icv_truncbits = 0,
  36. .icv_fullbits = 0,
  37. }
  38. },
  39. .desc = {
  40. .sadb_alg_id = SADB_X_AALG_NULL,
  41. .sadb_alg_ivlen = 0,
  42. .sadb_alg_minbits = 0,
  43. .sadb_alg_maxbits = 0
  44. }
  45. },
  46. {
  47. .name = "md5",
  48. .uinfo = {
  49. .auth = {
  50. .icv_truncbits = 96,
  51. .icv_fullbits = 128,
  52. }
  53. },
  54. .desc = {
  55. .sadb_alg_id = SADB_AALG_MD5HMAC,
  56. .sadb_alg_ivlen = 0,
  57. .sadb_alg_minbits = 128,
  58. .sadb_alg_maxbits = 128
  59. }
  60. },
  61. {
  62. .name = "sha1",
  63. .uinfo = {
  64. .auth = {
  65. .icv_truncbits = 96,
  66. .icv_fullbits = 160,
  67. }
  68. },
  69. .desc = {
  70. .sadb_alg_id = SADB_AALG_SHA1HMAC,
  71. .sadb_alg_ivlen = 0,
  72. .sadb_alg_minbits = 160,
  73. .sadb_alg_maxbits = 160
  74. }
  75. },
  76. {
  77. .name = "sha256",
  78. .uinfo = {
  79. .auth = {
  80. .icv_truncbits = 96,
  81. .icv_fullbits = 256,
  82. }
  83. },
  84. .desc = {
  85. .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
  86. .sadb_alg_ivlen = 0,
  87. .sadb_alg_minbits = 256,
  88. .sadb_alg_maxbits = 256
  89. }
  90. },
  91. {
  92. .name = "ripemd160",
  93. .uinfo = {
  94. .auth = {
  95. .icv_truncbits = 96,
  96. .icv_fullbits = 160,
  97. }
  98. },
  99. .desc = {
  100. .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
  101. .sadb_alg_ivlen = 0,
  102. .sadb_alg_minbits = 160,
  103. .sadb_alg_maxbits = 160
  104. }
  105. },
  106. };
  107. static struct xfrm_algo_desc ealg_list[] = {
  108. {
  109. .name = "cipher_null",
  110. .uinfo = {
  111. .encr = {
  112. .blockbits = 8,
  113. .defkeybits = 0,
  114. }
  115. },
  116. .desc = {
  117. .sadb_alg_id = SADB_EALG_NULL,
  118. .sadb_alg_ivlen = 0,
  119. .sadb_alg_minbits = 0,
  120. .sadb_alg_maxbits = 0
  121. }
  122. },
  123. {
  124. .name = "des",
  125. .uinfo = {
  126. .encr = {
  127. .blockbits = 64,
  128. .defkeybits = 64,
  129. }
  130. },
  131. .desc = {
  132. .sadb_alg_id = SADB_EALG_DESCBC,
  133. .sadb_alg_ivlen = 8,
  134. .sadb_alg_minbits = 64,
  135. .sadb_alg_maxbits = 64
  136. }
  137. },
  138. {
  139. .name = "des3_ede",
  140. .uinfo = {
  141. .encr = {
  142. .blockbits = 64,
  143. .defkeybits = 192,
  144. }
  145. },
  146. .desc = {
  147. .sadb_alg_id = SADB_EALG_3DESCBC,
  148. .sadb_alg_ivlen = 8,
  149. .sadb_alg_minbits = 192,
  150. .sadb_alg_maxbits = 192
  151. }
  152. },
  153. {
  154. .name = "cast128",
  155. .uinfo = {
  156. .encr = {
  157. .blockbits = 64,
  158. .defkeybits = 128,
  159. }
  160. },
  161. .desc = {
  162. .sadb_alg_id = SADB_X_EALG_CASTCBC,
  163. .sadb_alg_ivlen = 8,
  164. .sadb_alg_minbits = 40,
  165. .sadb_alg_maxbits = 128
  166. }
  167. },
  168. {
  169. .name = "blowfish",
  170. .uinfo = {
  171. .encr = {
  172. .blockbits = 64,
  173. .defkeybits = 128,
  174. }
  175. },
  176. .desc = {
  177. .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
  178. .sadb_alg_ivlen = 8,
  179. .sadb_alg_minbits = 40,
  180. .sadb_alg_maxbits = 448
  181. }
  182. },
  183. {
  184. .name = "aes",
  185. .uinfo = {
  186. .encr = {
  187. .blockbits = 128,
  188. .defkeybits = 128,
  189. }
  190. },
  191. .desc = {
  192. .sadb_alg_id = SADB_X_EALG_AESCBC,
  193. .sadb_alg_ivlen = 8,
  194. .sadb_alg_minbits = 128,
  195. .sadb_alg_maxbits = 256
  196. }
  197. },
  198. {
  199. .name = "serpent",
  200. .uinfo = {
  201. .encr = {
  202. .blockbits = 128,
  203. .defkeybits = 128,
  204. }
  205. },
  206. .desc = {
  207. .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
  208. .sadb_alg_ivlen = 8,
  209. .sadb_alg_minbits = 128,
  210. .sadb_alg_maxbits = 256,
  211. }
  212. },
  213. {
  214. .name = "twofish",
  215. .uinfo = {
  216. .encr = {
  217. .blockbits = 128,
  218. .defkeybits = 128,
  219. }
  220. },
  221. .desc = {
  222. .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
  223. .sadb_alg_ivlen = 8,
  224. .sadb_alg_minbits = 128,
  225. .sadb_alg_maxbits = 256
  226. }
  227. },
  228. };
  229. static struct xfrm_algo_desc calg_list[] = {
  230. {
  231. .name = "deflate",
  232. .uinfo = {
  233. .comp = {
  234. .threshold = 90,
  235. }
  236. },
  237. .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
  238. },
  239. {
  240. .name = "lzs",
  241. .uinfo = {
  242. .comp = {
  243. .threshold = 90,
  244. }
  245. },
  246. .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
  247. },
  248. {
  249. .name = "lzjh",
  250. .uinfo = {
  251. .comp = {
  252. .threshold = 50,
  253. }
  254. },
  255. .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
  256. },
  257. };
  258. static inline int aalg_entries(void)
  259. {
  260. return ARRAY_SIZE(aalg_list);
  261. }
  262. static inline int ealg_entries(void)
  263. {
  264. return ARRAY_SIZE(ealg_list);
  265. }
  266. static inline int calg_entries(void)
  267. {
  268. return ARRAY_SIZE(calg_list);
  269. }
  270. /* Todo: generic iterators */
  271. struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
  272. {
  273. int i;
  274. for (i = 0; i < aalg_entries(); i++) {
  275. if (aalg_list[i].desc.sadb_alg_id == alg_id) {
  276. if (aalg_list[i].available)
  277. return &aalg_list[i];
  278. else
  279. break;
  280. }
  281. }
  282. return NULL;
  283. }
  284. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
  285. struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
  286. {
  287. int i;
  288. for (i = 0; i < ealg_entries(); i++) {
  289. if (ealg_list[i].desc.sadb_alg_id == alg_id) {
  290. if (ealg_list[i].available)
  291. return &ealg_list[i];
  292. else
  293. break;
  294. }
  295. }
  296. return NULL;
  297. }
  298. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
  299. struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
  300. {
  301. int i;
  302. for (i = 0; i < calg_entries(); i++) {
  303. if (calg_list[i].desc.sadb_alg_id == alg_id) {
  304. if (calg_list[i].available)
  305. return &calg_list[i];
  306. else
  307. break;
  308. }
  309. }
  310. return NULL;
  311. }
  312. EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
  313. static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
  314. int entries, char *name,
  315. int probe)
  316. {
  317. int i, status;
  318. if (!name)
  319. return NULL;
  320. for (i = 0; i < entries; i++) {
  321. if (strcmp(name, list[i].name))
  322. continue;
  323. if (list[i].available)
  324. return &list[i];
  325. if (!probe)
  326. break;
  327. status = crypto_alg_available(name, 0);
  328. if (!status)
  329. break;
  330. list[i].available = status;
  331. return &list[i];
  332. }
  333. return NULL;
  334. }
  335. struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
  336. {
  337. return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
  338. }
  339. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
  340. struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
  341. {
  342. return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
  343. }
  344. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
  345. struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
  346. {
  347. return xfrm_get_byname(calg_list, calg_entries(), name, probe);
  348. }
  349. EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
  350. struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
  351. {
  352. if (idx >= aalg_entries())
  353. return NULL;
  354. return &aalg_list[idx];
  355. }
  356. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
  357. struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
  358. {
  359. if (idx >= ealg_entries())
  360. return NULL;
  361. return &ealg_list[idx];
  362. }
  363. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
  364. /*
  365. * Probe for the availability of crypto algorithms, and set the available
  366. * flag for any algorithms found on the system. This is typically called by
  367. * pfkey during userspace SA add, update or register.
  368. */
  369. void xfrm_probe_algs(void)
  370. {
  371. #ifdef CONFIG_CRYPTO
  372. int i, status;
  373. BUG_ON(in_softirq());
  374. for (i = 0; i < aalg_entries(); i++) {
  375. status = crypto_alg_available(aalg_list[i].name, 0);
  376. if (aalg_list[i].available != status)
  377. aalg_list[i].available = status;
  378. }
  379. for (i = 0; i < ealg_entries(); i++) {
  380. status = crypto_alg_available(ealg_list[i].name, 0);
  381. if (ealg_list[i].available != status)
  382. ealg_list[i].available = status;
  383. }
  384. for (i = 0; i < calg_entries(); i++) {
  385. status = crypto_alg_available(calg_list[i].name, 0);
  386. if (calg_list[i].available != status)
  387. calg_list[i].available = status;
  388. }
  389. #endif
  390. }
  391. EXPORT_SYMBOL_GPL(xfrm_probe_algs);
  392. int xfrm_count_auth_supported(void)
  393. {
  394. int i, n;
  395. for (i = 0, n = 0; i < aalg_entries(); i++)
  396. if (aalg_list[i].available)
  397. n++;
  398. return n;
  399. }
  400. EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
  401. int xfrm_count_enc_supported(void)
  402. {
  403. int i, n;
  404. for (i = 0, n = 0; i < ealg_entries(); i++)
  405. if (ealg_list[i].available)
  406. n++;
  407. return n;
  408. }
  409. EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
  410. /* Move to common area: it is shared with AH. */
  411. void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
  412. int offset, int len, icv_update_fn_t icv_update)
  413. {
  414. int start = skb_headlen(skb);
  415. int i, copy = start - offset;
  416. struct scatterlist sg;
  417. /* Checksum header. */
  418. if (copy > 0) {
  419. if (copy > len)
  420. copy = len;
  421. sg.page = virt_to_page(skb->data + offset);
  422. sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  423. sg.length = copy;
  424. icv_update(tfm, &sg, 1);
  425. if ((len -= copy) == 0)
  426. return;
  427. offset += copy;
  428. }
  429. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  430. int end;
  431. BUG_TRAP(start <= offset + len);
  432. end = start + skb_shinfo(skb)->frags[i].size;
  433. if ((copy = end - offset) > 0) {
  434. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  435. if (copy > len)
  436. copy = len;
  437. sg.page = frag->page;
  438. sg.offset = frag->page_offset + offset-start;
  439. sg.length = copy;
  440. icv_update(tfm, &sg, 1);
  441. if (!(len -= copy))
  442. return;
  443. offset += copy;
  444. }
  445. start = end;
  446. }
  447. if (skb_shinfo(skb)->frag_list) {
  448. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  449. for (; list; list = list->next) {
  450. int end;
  451. BUG_TRAP(start <= offset + len);
  452. end = start + list->len;
  453. if ((copy = end - offset) > 0) {
  454. if (copy > len)
  455. copy = len;
  456. skb_icv_walk(list, tfm, offset-start, copy, icv_update);
  457. if ((len -= copy) == 0)
  458. return;
  459. offset += copy;
  460. }
  461. start = end;
  462. }
  463. }
  464. BUG_ON(len);
  465. }
  466. EXPORT_SYMBOL_GPL(skb_icv_walk);
  467. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  468. /* Looking generic it is not used in another places. */
  469. int
  470. skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  471. {
  472. int start = skb_headlen(skb);
  473. int i, copy = start - offset;
  474. int elt = 0;
  475. if (copy > 0) {
  476. if (copy > len)
  477. copy = len;
  478. sg[elt].page = virt_to_page(skb->data + offset);
  479. sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  480. sg[elt].length = copy;
  481. elt++;
  482. if ((len -= copy) == 0)
  483. return elt;
  484. offset += copy;
  485. }
  486. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  487. int end;
  488. BUG_TRAP(start <= offset + len);
  489. end = start + skb_shinfo(skb)->frags[i].size;
  490. if ((copy = end - offset) > 0) {
  491. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  492. if (copy > len)
  493. copy = len;
  494. sg[elt].page = frag->page;
  495. sg[elt].offset = frag->page_offset+offset-start;
  496. sg[elt].length = copy;
  497. elt++;
  498. if (!(len -= copy))
  499. return elt;
  500. offset += copy;
  501. }
  502. start = end;
  503. }
  504. if (skb_shinfo(skb)->frag_list) {
  505. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  506. for (; list; list = list->next) {
  507. int end;
  508. BUG_TRAP(start <= offset + len);
  509. end = start + list->len;
  510. if ((copy = end - offset) > 0) {
  511. if (copy > len)
  512. copy = len;
  513. elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
  514. if ((len -= copy) == 0)
  515. return elt;
  516. offset += copy;
  517. }
  518. start = end;
  519. }
  520. }
  521. BUG_ON(len);
  522. return elt;
  523. }
  524. EXPORT_SYMBOL_GPL(skb_to_sgvec);
  525. /* Check that skb data bits are writable. If they are not, copy data
  526. * to newly created private area. If "tailbits" is given, make sure that
  527. * tailbits bytes beyond current end of skb are writable.
  528. *
  529. * Returns amount of elements of scatterlist to load for subsequent
  530. * transformations and pointer to writable trailer skb.
  531. */
  532. int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  533. {
  534. int copyflag;
  535. int elt;
  536. struct sk_buff *skb1, **skb_p;
  537. /* If skb is cloned or its head is paged, reallocate
  538. * head pulling out all the pages (pages are considered not writable
  539. * at the moment even if they are anonymous).
  540. */
  541. if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
  542. __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
  543. return -ENOMEM;
  544. /* Easy case. Most of packets will go this way. */
  545. if (!skb_shinfo(skb)->frag_list) {
  546. /* A little of trouble, not enough of space for trailer.
  547. * This should not happen, when stack is tuned to generate
  548. * good frames. OK, on miss we reallocate and reserve even more
  549. * space, 128 bytes is fair. */
  550. if (skb_tailroom(skb) < tailbits &&
  551. pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
  552. return -ENOMEM;
  553. /* Voila! */
  554. *trailer = skb;
  555. return 1;
  556. }
  557. /* Misery. We are in troubles, going to mincer fragments... */
  558. elt = 1;
  559. skb_p = &skb_shinfo(skb)->frag_list;
  560. copyflag = 0;
  561. while ((skb1 = *skb_p) != NULL) {
  562. int ntail = 0;
  563. /* The fragment is partially pulled by someone,
  564. * this can happen on input. Copy it and everything
  565. * after it. */
  566. if (skb_shared(skb1))
  567. copyflag = 1;
  568. /* If the skb is the last, worry about trailer. */
  569. if (skb1->next == NULL && tailbits) {
  570. if (skb_shinfo(skb1)->nr_frags ||
  571. skb_shinfo(skb1)->frag_list ||
  572. skb_tailroom(skb1) < tailbits)
  573. ntail = tailbits + 128;
  574. }
  575. if (copyflag ||
  576. skb_cloned(skb1) ||
  577. ntail ||
  578. skb_shinfo(skb1)->nr_frags ||
  579. skb_shinfo(skb1)->frag_list) {
  580. struct sk_buff *skb2;
  581. /* Fuck, we are miserable poor guys... */
  582. if (ntail == 0)
  583. skb2 = skb_copy(skb1, GFP_ATOMIC);
  584. else
  585. skb2 = skb_copy_expand(skb1,
  586. skb_headroom(skb1),
  587. ntail,
  588. GFP_ATOMIC);
  589. if (unlikely(skb2 == NULL))
  590. return -ENOMEM;
  591. if (skb1->sk)
  592. skb_set_owner_w(skb2, skb1->sk);
  593. /* Looking around. Are we still alive?
  594. * OK, link new skb, drop old one */
  595. skb2->next = skb1->next;
  596. *skb_p = skb2;
  597. kfree_skb(skb1);
  598. skb1 = skb2;
  599. }
  600. elt++;
  601. *trailer = skb1;
  602. skb_p = &skb1->next;
  603. }
  604. return elt;
  605. }
  606. EXPORT_SYMBOL_GPL(skb_cow_data);
  607. void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
  608. {
  609. if (tail != skb) {
  610. skb->data_len += len;
  611. skb->len += len;
  612. }
  613. return skb_put(tail, len);
  614. }
  615. EXPORT_SYMBOL_GPL(pskb_put);
  616. #endif