padlock-sha.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for VIA PadLock hardware crypto engine.
  5. *
  6. * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. */
  14. #include <crypto/internal/hash.h>
  15. #include <crypto/sha.h>
  16. #include <linux/err.h>
  17. #include <linux/module.h>
  18. #include <linux/init.h>
  19. #include <linux/errno.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/kernel.h>
  22. #include <linux/scatterlist.h>
  23. #include <asm/i387.h>
  24. #include "padlock.h"
  25. #ifdef CONFIG_64BIT
  26. #define STACK_ALIGN 16
  27. #else
  28. #define STACK_ALIGN 4
  29. #endif
  30. struct padlock_sha_desc {
  31. struct shash_desc fallback;
  32. };
  33. struct padlock_sha_ctx {
  34. struct crypto_shash *fallback;
  35. };
  36. static int padlock_sha_init(struct shash_desc *desc)
  37. {
  38. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  39. struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
  40. dctx->fallback.tfm = ctx->fallback;
  41. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  42. return crypto_shash_init(&dctx->fallback);
  43. }
  44. static int padlock_sha_update(struct shash_desc *desc,
  45. const u8 *data, unsigned int length)
  46. {
  47. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  48. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  49. return crypto_shash_update(&dctx->fallback, data, length);
  50. }
  51. static inline void padlock_output_block(uint32_t *src,
  52. uint32_t *dst, size_t count)
  53. {
  54. while (count--)
  55. *dst++ = swab32(*src++);
  56. }
  57. static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
  58. unsigned int count, u8 *out)
  59. {
  60. /* We can't store directly to *out as it may be unaligned. */
  61. /* BTW Don't reduce the buffer size below 128 Bytes!
  62. * PadLock microcode needs it that big. */
  63. char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
  64. ((aligned(STACK_ALIGN)));
  65. char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
  66. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  67. struct sha1_state state;
  68. unsigned int space;
  69. unsigned int leftover;
  70. int ts_state;
  71. int err;
  72. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  73. err = crypto_shash_export(&dctx->fallback, &state);
  74. if (err)
  75. goto out;
  76. if (state.count + count > ULONG_MAX)
  77. return crypto_shash_finup(&dctx->fallback, in, count, out);
  78. leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
  79. space = SHA1_BLOCK_SIZE - leftover;
  80. if (space) {
  81. if (count > space) {
  82. err = crypto_shash_update(&dctx->fallback, in, space) ?:
  83. crypto_shash_export(&dctx->fallback, &state);
  84. if (err)
  85. goto out;
  86. count -= space;
  87. in += space;
  88. } else {
  89. memcpy(state.buffer + leftover, in, count);
  90. in = state.buffer;
  91. count += leftover;
  92. state.count &= ~(SHA1_BLOCK_SIZE - 1);
  93. }
  94. }
  95. memcpy(result, &state.state, SHA1_DIGEST_SIZE);
  96. /* prevent taking the spurious DNA fault with padlock. */
  97. ts_state = irq_ts_save();
  98. asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
  99. : \
  100. : "c"((unsigned long)state.count + count), \
  101. "a"((unsigned long)state.count), \
  102. "S"(in), "D"(result));
  103. irq_ts_restore(ts_state);
  104. padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
  105. out:
  106. return err;
  107. }
  108. static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
  109. {
  110. u8 buf[4];
  111. return padlock_sha1_finup(desc, buf, 0, out);
  112. }
  113. static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
  114. unsigned int count, u8 *out)
  115. {
  116. /* We can't store directly to *out as it may be unaligned. */
  117. /* BTW Don't reduce the buffer size below 128 Bytes!
  118. * PadLock microcode needs it that big. */
  119. char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
  120. ((aligned(STACK_ALIGN)));
  121. char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
  122. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  123. struct sha256_state state;
  124. unsigned int space;
  125. unsigned int leftover;
  126. int ts_state;
  127. int err;
  128. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  129. err = crypto_shash_export(&dctx->fallback, &state);
  130. if (err)
  131. goto out;
  132. if (state.count + count > ULONG_MAX)
  133. return crypto_shash_finup(&dctx->fallback, in, count, out);
  134. leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
  135. space = SHA256_BLOCK_SIZE - leftover;
  136. if (space) {
  137. if (count > space) {
  138. err = crypto_shash_update(&dctx->fallback, in, space) ?:
  139. crypto_shash_export(&dctx->fallback, &state);
  140. if (err)
  141. goto out;
  142. count -= space;
  143. in += space;
  144. } else {
  145. memcpy(state.buf + leftover, in, count);
  146. in = state.buf;
  147. count += leftover;
  148. state.count &= ~(SHA1_BLOCK_SIZE - 1);
  149. }
  150. }
  151. memcpy(result, &state.state, SHA256_DIGEST_SIZE);
  152. /* prevent taking the spurious DNA fault with padlock. */
  153. ts_state = irq_ts_save();
  154. asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
  155. : \
  156. : "c"((unsigned long)state.count + count), \
  157. "a"((unsigned long)state.count), \
  158. "S"(in), "D"(result));
  159. irq_ts_restore(ts_state);
  160. padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
  161. out:
  162. return err;
  163. }
  164. static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
  165. {
  166. u8 buf[4];
  167. return padlock_sha256_finup(desc, buf, 0, out);
  168. }
  169. static int padlock_cra_init(struct crypto_tfm *tfm)
  170. {
  171. struct crypto_shash *hash = __crypto_shash_cast(tfm);
  172. const char *fallback_driver_name = tfm->__crt_alg->cra_name;
  173. struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
  174. struct crypto_shash *fallback_tfm;
  175. int err = -ENOMEM;
  176. /* Allocate a fallback and abort if it failed. */
  177. fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
  178. CRYPTO_ALG_NEED_FALLBACK);
  179. if (IS_ERR(fallback_tfm)) {
  180. printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
  181. fallback_driver_name);
  182. err = PTR_ERR(fallback_tfm);
  183. goto out;
  184. }
  185. ctx->fallback = fallback_tfm;
  186. hash->descsize += crypto_shash_descsize(fallback_tfm);
  187. return 0;
  188. out:
  189. return err;
  190. }
  191. static void padlock_cra_exit(struct crypto_tfm *tfm)
  192. {
  193. struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
  194. crypto_free_shash(ctx->fallback);
  195. }
  196. static struct shash_alg sha1_alg = {
  197. .digestsize = SHA1_DIGEST_SIZE,
  198. .init = padlock_sha_init,
  199. .update = padlock_sha_update,
  200. .finup = padlock_sha1_finup,
  201. .final = padlock_sha1_final,
  202. .descsize = sizeof(struct padlock_sha_desc),
  203. .base = {
  204. .cra_name = "sha1",
  205. .cra_driver_name = "sha1-padlock",
  206. .cra_priority = PADLOCK_CRA_PRIORITY,
  207. .cra_flags = CRYPTO_ALG_TYPE_SHASH |
  208. CRYPTO_ALG_NEED_FALLBACK,
  209. .cra_blocksize = SHA1_BLOCK_SIZE,
  210. .cra_ctxsize = sizeof(struct padlock_sha_ctx),
  211. .cra_module = THIS_MODULE,
  212. .cra_init = padlock_cra_init,
  213. .cra_exit = padlock_cra_exit,
  214. }
  215. };
  216. static struct shash_alg sha256_alg = {
  217. .digestsize = SHA256_DIGEST_SIZE,
  218. .init = padlock_sha_init,
  219. .update = padlock_sha_update,
  220. .finup = padlock_sha256_finup,
  221. .final = padlock_sha256_final,
  222. .descsize = sizeof(struct padlock_sha_desc),
  223. .base = {
  224. .cra_name = "sha256",
  225. .cra_driver_name = "sha256-padlock",
  226. .cra_priority = PADLOCK_CRA_PRIORITY,
  227. .cra_flags = CRYPTO_ALG_TYPE_SHASH |
  228. CRYPTO_ALG_NEED_FALLBACK,
  229. .cra_blocksize = SHA256_BLOCK_SIZE,
  230. .cra_ctxsize = sizeof(struct padlock_sha_ctx),
  231. .cra_module = THIS_MODULE,
  232. .cra_init = padlock_cra_init,
  233. .cra_exit = padlock_cra_exit,
  234. }
  235. };
  236. static int __init padlock_init(void)
  237. {
  238. int rc = -ENODEV;
  239. if (!cpu_has_phe) {
  240. printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
  241. return -ENODEV;
  242. }
  243. if (!cpu_has_phe_enabled) {
  244. printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
  245. return -ENODEV;
  246. }
  247. rc = crypto_register_shash(&sha1_alg);
  248. if (rc)
  249. goto out;
  250. rc = crypto_register_shash(&sha256_alg);
  251. if (rc)
  252. goto out_unreg1;
  253. printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
  254. return 0;
  255. out_unreg1:
  256. crypto_unregister_shash(&sha1_alg);
  257. out:
  258. printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
  259. return rc;
  260. }
  261. static void __exit padlock_fini(void)
  262. {
  263. crypto_unregister_shash(&sha1_alg);
  264. crypto_unregister_shash(&sha256_alg);
  265. }
  266. module_init(padlock_init);
  267. module_exit(padlock_fini);
  268. MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
  269. MODULE_LICENSE("GPL");
  270. MODULE_AUTHOR("Michal Ludvig");
  271. MODULE_ALIAS("sha1-all");
  272. MODULE_ALIAS("sha256-all");
  273. MODULE_ALIAS("sha1-padlock");
  274. MODULE_ALIAS("sha256-padlock");