xfrm_algo.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * xfrm algorithm interface
  3. *
  4. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pfkeyv2.h>
  14. #include <linux/crypto.h>
  15. #include <net/xfrm.h>
  16. #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
  17. #include <net/ah.h>
  18. #endif
  19. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  20. #include <net/esp.h>
  21. #endif
  22. #include <asm/scatterlist.h>
  23. /*
  24. * Algorithms supported by IPsec. These entries contain properties which
  25. * are used in key negotiation and xfrm processing, and are used to verify
  26. * that instantiated crypto transforms have correct parameters for IPsec
  27. * purposes.
  28. */
  29. static struct xfrm_algo_desc aalg_list[] = {
  30. {
  31. .name = "digest_null",
  32. .uinfo = {
  33. .auth = {
  34. .icv_truncbits = 0,
  35. .icv_fullbits = 0,
  36. }
  37. },
  38. .desc = {
  39. .sadb_alg_id = SADB_X_AALG_NULL,
  40. .sadb_alg_ivlen = 0,
  41. .sadb_alg_minbits = 0,
  42. .sadb_alg_maxbits = 0
  43. }
  44. },
  45. {
  46. .name = "md5",
  47. .uinfo = {
  48. .auth = {
  49. .icv_truncbits = 96,
  50. .icv_fullbits = 128,
  51. }
  52. },
  53. .desc = {
  54. .sadb_alg_id = SADB_AALG_MD5HMAC,
  55. .sadb_alg_ivlen = 0,
  56. .sadb_alg_minbits = 128,
  57. .sadb_alg_maxbits = 128
  58. }
  59. },
  60. {
  61. .name = "sha1",
  62. .uinfo = {
  63. .auth = {
  64. .icv_truncbits = 96,
  65. .icv_fullbits = 160,
  66. }
  67. },
  68. .desc = {
  69. .sadb_alg_id = SADB_AALG_SHA1HMAC,
  70. .sadb_alg_ivlen = 0,
  71. .sadb_alg_minbits = 160,
  72. .sadb_alg_maxbits = 160
  73. }
  74. },
  75. {
  76. .name = "sha256",
  77. .uinfo = {
  78. .auth = {
  79. .icv_truncbits = 96,
  80. .icv_fullbits = 256,
  81. }
  82. },
  83. .desc = {
  84. .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
  85. .sadb_alg_ivlen = 0,
  86. .sadb_alg_minbits = 256,
  87. .sadb_alg_maxbits = 256
  88. }
  89. },
  90. {
  91. .name = "ripemd160",
  92. .uinfo = {
  93. .auth = {
  94. .icv_truncbits = 96,
  95. .icv_fullbits = 160,
  96. }
  97. },
  98. .desc = {
  99. .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
  100. .sadb_alg_ivlen = 0,
  101. .sadb_alg_minbits = 160,
  102. .sadb_alg_maxbits = 160
  103. }
  104. },
  105. };
  106. static struct xfrm_algo_desc ealg_list[] = {
  107. {
  108. .name = "cipher_null",
  109. .uinfo = {
  110. .encr = {
  111. .blockbits = 8,
  112. .defkeybits = 0,
  113. }
  114. },
  115. .desc = {
  116. .sadb_alg_id = SADB_EALG_NULL,
  117. .sadb_alg_ivlen = 0,
  118. .sadb_alg_minbits = 0,
  119. .sadb_alg_maxbits = 0
  120. }
  121. },
  122. {
  123. .name = "des",
  124. .uinfo = {
  125. .encr = {
  126. .blockbits = 64,
  127. .defkeybits = 64,
  128. }
  129. },
  130. .desc = {
  131. .sadb_alg_id = SADB_EALG_DESCBC,
  132. .sadb_alg_ivlen = 8,
  133. .sadb_alg_minbits = 64,
  134. .sadb_alg_maxbits = 64
  135. }
  136. },
  137. {
  138. .name = "des3_ede",
  139. .uinfo = {
  140. .encr = {
  141. .blockbits = 64,
  142. .defkeybits = 192,
  143. }
  144. },
  145. .desc = {
  146. .sadb_alg_id = SADB_EALG_3DESCBC,
  147. .sadb_alg_ivlen = 8,
  148. .sadb_alg_minbits = 192,
  149. .sadb_alg_maxbits = 192
  150. }
  151. },
  152. {
  153. .name = "cast128",
  154. .uinfo = {
  155. .encr = {
  156. .blockbits = 64,
  157. .defkeybits = 128,
  158. }
  159. },
  160. .desc = {
  161. .sadb_alg_id = SADB_X_EALG_CASTCBC,
  162. .sadb_alg_ivlen = 8,
  163. .sadb_alg_minbits = 40,
  164. .sadb_alg_maxbits = 128
  165. }
  166. },
  167. {
  168. .name = "blowfish",
  169. .uinfo = {
  170. .encr = {
  171. .blockbits = 64,
  172. .defkeybits = 128,
  173. }
  174. },
  175. .desc = {
  176. .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
  177. .sadb_alg_ivlen = 8,
  178. .sadb_alg_minbits = 40,
  179. .sadb_alg_maxbits = 448
  180. }
  181. },
  182. {
  183. .name = "aes",
  184. .uinfo = {
  185. .encr = {
  186. .blockbits = 128,
  187. .defkeybits = 128,
  188. }
  189. },
  190. .desc = {
  191. .sadb_alg_id = SADB_X_EALG_AESCBC,
  192. .sadb_alg_ivlen = 8,
  193. .sadb_alg_minbits = 128,
  194. .sadb_alg_maxbits = 256
  195. }
  196. },
  197. {
  198. .name = "serpent",
  199. .uinfo = {
  200. .encr = {
  201. .blockbits = 128,
  202. .defkeybits = 128,
  203. }
  204. },
  205. .desc = {
  206. .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
  207. .sadb_alg_ivlen = 8,
  208. .sadb_alg_minbits = 128,
  209. .sadb_alg_maxbits = 256,
  210. }
  211. },
  212. {
  213. .name = "twofish",
  214. .uinfo = {
  215. .encr = {
  216. .blockbits = 128,
  217. .defkeybits = 128,
  218. }
  219. },
  220. .desc = {
  221. .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
  222. .sadb_alg_ivlen = 8,
  223. .sadb_alg_minbits = 128,
  224. .sadb_alg_maxbits = 256
  225. }
  226. },
  227. };
  228. static struct xfrm_algo_desc calg_list[] = {
  229. {
  230. .name = "deflate",
  231. .uinfo = {
  232. .comp = {
  233. .threshold = 90,
  234. }
  235. },
  236. .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
  237. },
  238. {
  239. .name = "lzs",
  240. .uinfo = {
  241. .comp = {
  242. .threshold = 90,
  243. }
  244. },
  245. .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
  246. },
  247. {
  248. .name = "lzjh",
  249. .uinfo = {
  250. .comp = {
  251. .threshold = 50,
  252. }
  253. },
  254. .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
  255. },
  256. };
  257. static inline int aalg_entries(void)
  258. {
  259. return ARRAY_SIZE(aalg_list);
  260. }
  261. static inline int ealg_entries(void)
  262. {
  263. return ARRAY_SIZE(ealg_list);
  264. }
  265. static inline int calg_entries(void)
  266. {
  267. return ARRAY_SIZE(calg_list);
  268. }
  269. /* Todo: generic iterators */
  270. struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
  271. {
  272. int i;
  273. for (i = 0; i < aalg_entries(); i++) {
  274. if (aalg_list[i].desc.sadb_alg_id == alg_id) {
  275. if (aalg_list[i].available)
  276. return &aalg_list[i];
  277. else
  278. break;
  279. }
  280. }
  281. return NULL;
  282. }
  283. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
  284. struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
  285. {
  286. int i;
  287. for (i = 0; i < ealg_entries(); i++) {
  288. if (ealg_list[i].desc.sadb_alg_id == alg_id) {
  289. if (ealg_list[i].available)
  290. return &ealg_list[i];
  291. else
  292. break;
  293. }
  294. }
  295. return NULL;
  296. }
  297. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
  298. struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
  299. {
  300. int i;
  301. for (i = 0; i < calg_entries(); i++) {
  302. if (calg_list[i].desc.sadb_alg_id == alg_id) {
  303. if (calg_list[i].available)
  304. return &calg_list[i];
  305. else
  306. break;
  307. }
  308. }
  309. return NULL;
  310. }
  311. EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
  312. static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
  313. int entries, char *name,
  314. int probe)
  315. {
  316. int i, status;
  317. if (!name)
  318. return NULL;
  319. for (i = 0; i < entries; i++) {
  320. if (strcmp(name, list[i].name))
  321. continue;
  322. if (list[i].available)
  323. return &list[i];
  324. if (!probe)
  325. break;
  326. status = crypto_alg_available(name, 0);
  327. if (!status)
  328. break;
  329. list[i].available = status;
  330. return &list[i];
  331. }
  332. return NULL;
  333. }
  334. struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
  335. {
  336. return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
  337. }
  338. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
  339. struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
  340. {
  341. return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
  342. }
  343. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
  344. struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
  345. {
  346. return xfrm_get_byname(calg_list, calg_entries(), name, probe);
  347. }
  348. EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
  349. struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
  350. {
  351. if (idx >= aalg_entries())
  352. return NULL;
  353. return &aalg_list[idx];
  354. }
  355. EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
  356. struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
  357. {
  358. if (idx >= ealg_entries())
  359. return NULL;
  360. return &ealg_list[idx];
  361. }
  362. EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
  363. /*
  364. * Probe for the availability of crypto algorithms, and set the available
  365. * flag for any algorithms found on the system. This is typically called by
  366. * pfkey during userspace SA add, update or register.
  367. */
  368. void xfrm_probe_algs(void)
  369. {
  370. #ifdef CONFIG_CRYPTO
  371. int i, status;
  372. BUG_ON(in_softirq());
  373. for (i = 0; i < aalg_entries(); i++) {
  374. status = crypto_alg_available(aalg_list[i].name, 0);
  375. if (aalg_list[i].available != status)
  376. aalg_list[i].available = status;
  377. }
  378. for (i = 0; i < ealg_entries(); i++) {
  379. status = crypto_alg_available(ealg_list[i].name, 0);
  380. if (ealg_list[i].available != status)
  381. ealg_list[i].available = status;
  382. }
  383. for (i = 0; i < calg_entries(); i++) {
  384. status = crypto_alg_available(calg_list[i].name, 0);
  385. if (calg_list[i].available != status)
  386. calg_list[i].available = status;
  387. }
  388. #endif
  389. }
  390. EXPORT_SYMBOL_GPL(xfrm_probe_algs);
  391. int xfrm_count_auth_supported(void)
  392. {
  393. int i, n;
  394. for (i = 0, n = 0; i < aalg_entries(); i++)
  395. if (aalg_list[i].available)
  396. n++;
  397. return n;
  398. }
  399. EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
  400. int xfrm_count_enc_supported(void)
  401. {
  402. int i, n;
  403. for (i = 0, n = 0; i < ealg_entries(); i++)
  404. if (ealg_list[i].available)
  405. n++;
  406. return n;
  407. }
  408. EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
  409. /* Move to common area: it is shared with AH. */
  410. void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
  411. int offset, int len, icv_update_fn_t icv_update)
  412. {
  413. int start = skb_headlen(skb);
  414. int i, copy = start - offset;
  415. struct scatterlist sg;
  416. /* Checksum header. */
  417. if (copy > 0) {
  418. if (copy > len)
  419. copy = len;
  420. sg.page = virt_to_page(skb->data + offset);
  421. sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  422. sg.length = copy;
  423. icv_update(tfm, &sg, 1);
  424. if ((len -= copy) == 0)
  425. return;
  426. offset += copy;
  427. }
  428. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  429. int end;
  430. BUG_TRAP(start <= offset + len);
  431. end = start + skb_shinfo(skb)->frags[i].size;
  432. if ((copy = end - offset) > 0) {
  433. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  434. if (copy > len)
  435. copy = len;
  436. sg.page = frag->page;
  437. sg.offset = frag->page_offset + offset-start;
  438. sg.length = copy;
  439. icv_update(tfm, &sg, 1);
  440. if (!(len -= copy))
  441. return;
  442. offset += copy;
  443. }
  444. start = end;
  445. }
  446. if (skb_shinfo(skb)->frag_list) {
  447. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  448. for (; list; list = list->next) {
  449. int end;
  450. BUG_TRAP(start <= offset + len);
  451. end = start + list->len;
  452. if ((copy = end - offset) > 0) {
  453. if (copy > len)
  454. copy = len;
  455. skb_icv_walk(list, tfm, offset-start, copy, icv_update);
  456. if ((len -= copy) == 0)
  457. return;
  458. offset += copy;
  459. }
  460. start = end;
  461. }
  462. }
  463. BUG_ON(len);
  464. }
  465. EXPORT_SYMBOL_GPL(skb_icv_walk);
  466. #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
  467. /* Looking generic it is not used in another places. */
  468. int
  469. skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
  470. {
  471. int start = skb_headlen(skb);
  472. int i, copy = start - offset;
  473. int elt = 0;
  474. if (copy > 0) {
  475. if (copy > len)
  476. copy = len;
  477. sg[elt].page = virt_to_page(skb->data + offset);
  478. sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
  479. sg[elt].length = copy;
  480. elt++;
  481. if ((len -= copy) == 0)
  482. return elt;
  483. offset += copy;
  484. }
  485. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  486. int end;
  487. BUG_TRAP(start <= offset + len);
  488. end = start + skb_shinfo(skb)->frags[i].size;
  489. if ((copy = end - offset) > 0) {
  490. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  491. if (copy > len)
  492. copy = len;
  493. sg[elt].page = frag->page;
  494. sg[elt].offset = frag->page_offset+offset-start;
  495. sg[elt].length = copy;
  496. elt++;
  497. if (!(len -= copy))
  498. return elt;
  499. offset += copy;
  500. }
  501. start = end;
  502. }
  503. if (skb_shinfo(skb)->frag_list) {
  504. struct sk_buff *list = skb_shinfo(skb)->frag_list;
  505. for (; list; list = list->next) {
  506. int end;
  507. BUG_TRAP(start <= offset + len);
  508. end = start + list->len;
  509. if ((copy = end - offset) > 0) {
  510. if (copy > len)
  511. copy = len;
  512. elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
  513. if ((len -= copy) == 0)
  514. return elt;
  515. offset += copy;
  516. }
  517. start = end;
  518. }
  519. }
  520. BUG_ON(len);
  521. return elt;
  522. }
  523. EXPORT_SYMBOL_GPL(skb_to_sgvec);
  524. /* Check that skb data bits are writable. If they are not, copy data
  525. * to newly created private area. If "tailbits" is given, make sure that
  526. * tailbits bytes beyond current end of skb are writable.
  527. *
  528. * Returns amount of elements of scatterlist to load for subsequent
  529. * transformations and pointer to writable trailer skb.
  530. */
  531. int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
  532. {
  533. int copyflag;
  534. int elt;
  535. struct sk_buff *skb1, **skb_p;
  536. /* If skb is cloned or its head is paged, reallocate
  537. * head pulling out all the pages (pages are considered not writable
  538. * at the moment even if they are anonymous).
  539. */
  540. if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
  541. __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
  542. return -ENOMEM;
  543. /* Easy case. Most of packets will go this way. */
  544. if (!skb_shinfo(skb)->frag_list) {
  545. /* A little of trouble, not enough of space for trailer.
  546. * This should not happen, when stack is tuned to generate
  547. * good frames. OK, on miss we reallocate and reserve even more
  548. * space, 128 bytes is fair. */
  549. if (skb_tailroom(skb) < tailbits &&
  550. pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
  551. return -ENOMEM;
  552. /* Voila! */
  553. *trailer = skb;
  554. return 1;
  555. }
  556. /* Misery. We are in troubles, going to mincer fragments... */
  557. elt = 1;
  558. skb_p = &skb_shinfo(skb)->frag_list;
  559. copyflag = 0;
  560. while ((skb1 = *skb_p) != NULL) {
  561. int ntail = 0;
  562. /* The fragment is partially pulled by someone,
  563. * this can happen on input. Copy it and everything
  564. * after it. */
  565. if (skb_shared(skb1))
  566. copyflag = 1;
  567. /* If the skb is the last, worry about trailer. */
  568. if (skb1->next == NULL && tailbits) {
  569. if (skb_shinfo(skb1)->nr_frags ||
  570. skb_shinfo(skb1)->frag_list ||
  571. skb_tailroom(skb1) < tailbits)
  572. ntail = tailbits + 128;
  573. }
  574. if (copyflag ||
  575. skb_cloned(skb1) ||
  576. ntail ||
  577. skb_shinfo(skb1)->nr_frags ||
  578. skb_shinfo(skb1)->frag_list) {
  579. struct sk_buff *skb2;
  580. /* Fuck, we are miserable poor guys... */
  581. if (ntail == 0)
  582. skb2 = skb_copy(skb1, GFP_ATOMIC);
  583. else
  584. skb2 = skb_copy_expand(skb1,
  585. skb_headroom(skb1),
  586. ntail,
  587. GFP_ATOMIC);
  588. if (unlikely(skb2 == NULL))
  589. return -ENOMEM;
  590. if (skb1->sk)
  591. skb_set_owner_w(skb2, skb1->sk);
  592. /* Looking around. Are we still alive?
  593. * OK, link new skb, drop old one */
  594. skb2->next = skb1->next;
  595. *skb_p = skb2;
  596. kfree_skb(skb1);
  597. skb1 = skb2;
  598. }
  599. elt++;
  600. *trailer = skb1;
  601. skb_p = &skb1->next;
  602. }
  603. return elt;
  604. }
  605. EXPORT_SYMBOL_GPL(skb_cow_data);
  606. void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
  607. {
  608. if (tail != skb) {
  609. skb->data_len += len;
  610. skb->len += len;
  611. }
  612. return skb_put(tail, len);
  613. }
  614. EXPORT_SYMBOL_GPL(pskb_put);
  615. #endif