padlock-sha.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for VIA PadLock hardware crypto engine.
  5. *
  6. * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. */
  14. #include <crypto/internal/hash.h>
  15. #include <crypto/padlock.h>
  16. #include <crypto/sha.h>
  17. #include <linux/err.h>
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/errno.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/kernel.h>
  23. #include <linux/scatterlist.h>
  24. #include <asm/i387.h>
  25. struct padlock_sha_desc {
  26. struct shash_desc fallback;
  27. };
  28. struct padlock_sha_ctx {
  29. struct crypto_shash *fallback;
  30. };
  31. static int padlock_sha_init(struct shash_desc *desc)
  32. {
  33. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  34. struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
  35. dctx->fallback.tfm = ctx->fallback;
  36. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  37. return crypto_shash_init(&dctx->fallback);
  38. }
  39. static int padlock_sha_update(struct shash_desc *desc,
  40. const u8 *data, unsigned int length)
  41. {
  42. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  43. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  44. return crypto_shash_update(&dctx->fallback, data, length);
  45. }
  46. static int padlock_sha_export(struct shash_desc *desc, void *out)
  47. {
  48. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  49. return crypto_shash_export(&dctx->fallback, out);
  50. }
  51. static int padlock_sha_import(struct shash_desc *desc, const void *in)
  52. {
  53. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  54. struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
  55. dctx->fallback.tfm = ctx->fallback;
  56. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  57. return crypto_shash_import(&dctx->fallback, in);
  58. }
  59. static inline void padlock_output_block(uint32_t *src,
  60. uint32_t *dst, size_t count)
  61. {
  62. while (count--)
  63. *dst++ = swab32(*src++);
  64. }
  65. static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
  66. unsigned int count, u8 *out)
  67. {
  68. /* We can't store directly to *out as it may be unaligned. */
  69. /* BTW Don't reduce the buffer size below 128 Bytes!
  70. * PadLock microcode needs it that big. */
  71. char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
  72. ((aligned(STACK_ALIGN)));
  73. char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
  74. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  75. struct sha1_state state;
  76. unsigned int space;
  77. unsigned int leftover;
  78. int ts_state;
  79. int err;
  80. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  81. err = crypto_shash_export(&dctx->fallback, &state);
  82. if (err)
  83. goto out;
  84. if (state.count + count > ULONG_MAX)
  85. return crypto_shash_finup(&dctx->fallback, in, count, out);
  86. leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
  87. space = SHA1_BLOCK_SIZE - leftover;
  88. if (space) {
  89. if (count > space) {
  90. err = crypto_shash_update(&dctx->fallback, in, space) ?:
  91. crypto_shash_export(&dctx->fallback, &state);
  92. if (err)
  93. goto out;
  94. count -= space;
  95. in += space;
  96. } else {
  97. memcpy(state.buffer + leftover, in, count);
  98. in = state.buffer;
  99. count += leftover;
  100. state.count &= ~(SHA1_BLOCK_SIZE - 1);
  101. }
  102. }
  103. memcpy(result, &state.state, SHA1_DIGEST_SIZE);
  104. /* prevent taking the spurious DNA fault with padlock. */
  105. ts_state = irq_ts_save();
  106. asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
  107. : \
  108. : "c"((unsigned long)state.count + count), \
  109. "a"((unsigned long)state.count), \
  110. "S"(in), "D"(result));
  111. irq_ts_restore(ts_state);
  112. padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
  113. out:
  114. return err;
  115. }
  116. static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
  117. {
  118. u8 buf[4];
  119. return padlock_sha1_finup(desc, buf, 0, out);
  120. }
  121. static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
  122. unsigned int count, u8 *out)
  123. {
  124. /* We can't store directly to *out as it may be unaligned. */
  125. /* BTW Don't reduce the buffer size below 128 Bytes!
  126. * PadLock microcode needs it that big. */
  127. char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
  128. ((aligned(STACK_ALIGN)));
  129. char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
  130. struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
  131. struct sha256_state state;
  132. unsigned int space;
  133. unsigned int leftover;
  134. int ts_state;
  135. int err;
  136. dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  137. err = crypto_shash_export(&dctx->fallback, &state);
  138. if (err)
  139. goto out;
  140. if (state.count + count > ULONG_MAX)
  141. return crypto_shash_finup(&dctx->fallback, in, count, out);
  142. leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
  143. space = SHA256_BLOCK_SIZE - leftover;
  144. if (space) {
  145. if (count > space) {
  146. err = crypto_shash_update(&dctx->fallback, in, space) ?:
  147. crypto_shash_export(&dctx->fallback, &state);
  148. if (err)
  149. goto out;
  150. count -= space;
  151. in += space;
  152. } else {
  153. memcpy(state.buf + leftover, in, count);
  154. in = state.buf;
  155. count += leftover;
  156. state.count &= ~(SHA1_BLOCK_SIZE - 1);
  157. }
  158. }
  159. memcpy(result, &state.state, SHA256_DIGEST_SIZE);
  160. /* prevent taking the spurious DNA fault with padlock. */
  161. ts_state = irq_ts_save();
  162. asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
  163. : \
  164. : "c"((unsigned long)state.count + count), \
  165. "a"((unsigned long)state.count), \
  166. "S"(in), "D"(result));
  167. irq_ts_restore(ts_state);
  168. padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
  169. out:
  170. return err;
  171. }
  172. static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
  173. {
  174. u8 buf[4];
  175. return padlock_sha256_finup(desc, buf, 0, out);
  176. }
  177. static int padlock_cra_init(struct crypto_tfm *tfm)
  178. {
  179. struct crypto_shash *hash = __crypto_shash_cast(tfm);
  180. const char *fallback_driver_name = tfm->__crt_alg->cra_name;
  181. struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
  182. struct crypto_shash *fallback_tfm;
  183. int err = -ENOMEM;
  184. /* Allocate a fallback and abort if it failed. */
  185. fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
  186. CRYPTO_ALG_NEED_FALLBACK);
  187. if (IS_ERR(fallback_tfm)) {
  188. printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
  189. fallback_driver_name);
  190. err = PTR_ERR(fallback_tfm);
  191. goto out;
  192. }
  193. ctx->fallback = fallback_tfm;
  194. hash->descsize += crypto_shash_descsize(fallback_tfm);
  195. return 0;
  196. out:
  197. return err;
  198. }
  199. static void padlock_cra_exit(struct crypto_tfm *tfm)
  200. {
  201. struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
  202. crypto_free_shash(ctx->fallback);
  203. }
  204. static struct shash_alg sha1_alg = {
  205. .digestsize = SHA1_DIGEST_SIZE,
  206. .init = padlock_sha_init,
  207. .update = padlock_sha_update,
  208. .finup = padlock_sha1_finup,
  209. .final = padlock_sha1_final,
  210. .export = padlock_sha_export,
  211. .import = padlock_sha_import,
  212. .descsize = sizeof(struct padlock_sha_desc),
  213. .statesize = sizeof(struct sha1_state),
  214. .base = {
  215. .cra_name = "sha1",
  216. .cra_driver_name = "sha1-padlock",
  217. .cra_priority = PADLOCK_CRA_PRIORITY,
  218. .cra_flags = CRYPTO_ALG_TYPE_SHASH |
  219. CRYPTO_ALG_NEED_FALLBACK,
  220. .cra_blocksize = SHA1_BLOCK_SIZE,
  221. .cra_ctxsize = sizeof(struct padlock_sha_ctx),
  222. .cra_module = THIS_MODULE,
  223. .cra_init = padlock_cra_init,
  224. .cra_exit = padlock_cra_exit,
  225. }
  226. };
  227. static struct shash_alg sha256_alg = {
  228. .digestsize = SHA256_DIGEST_SIZE,
  229. .init = padlock_sha_init,
  230. .update = padlock_sha_update,
  231. .finup = padlock_sha256_finup,
  232. .final = padlock_sha256_final,
  233. .export = padlock_sha_export,
  234. .import = padlock_sha_import,
  235. .descsize = sizeof(struct padlock_sha_desc),
  236. .statesize = sizeof(struct sha256_state),
  237. .base = {
  238. .cra_name = "sha256",
  239. .cra_driver_name = "sha256-padlock",
  240. .cra_priority = PADLOCK_CRA_PRIORITY,
  241. .cra_flags = CRYPTO_ALG_TYPE_SHASH |
  242. CRYPTO_ALG_NEED_FALLBACK,
  243. .cra_blocksize = SHA256_BLOCK_SIZE,
  244. .cra_ctxsize = sizeof(struct padlock_sha_ctx),
  245. .cra_module = THIS_MODULE,
  246. .cra_init = padlock_cra_init,
  247. .cra_exit = padlock_cra_exit,
  248. }
  249. };
  250. static int __init padlock_init(void)
  251. {
  252. int rc = -ENODEV;
  253. if (!cpu_has_phe) {
  254. printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
  255. return -ENODEV;
  256. }
  257. if (!cpu_has_phe_enabled) {
  258. printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
  259. return -ENODEV;
  260. }
  261. rc = crypto_register_shash(&sha1_alg);
  262. if (rc)
  263. goto out;
  264. rc = crypto_register_shash(&sha256_alg);
  265. if (rc)
  266. goto out_unreg1;
  267. printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
  268. return 0;
  269. out_unreg1:
  270. crypto_unregister_shash(&sha1_alg);
  271. out:
  272. printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
  273. return rc;
  274. }
  275. static void __exit padlock_fini(void)
  276. {
  277. crypto_unregister_shash(&sha1_alg);
  278. crypto_unregister_shash(&sha256_alg);
  279. }
  280. module_init(padlock_init);
  281. module_exit(padlock_fini);
  282. MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
  283. MODULE_LICENSE("GPL");
  284. MODULE_AUTHOR("Michal Ludvig");
  285. MODULE_ALIAS("sha1-all");
  286. MODULE_ALIAS("sha256-all");
  287. MODULE_ALIAS("sha1-padlock");
  288. MODULE_ALIAS("sha256-padlock");