padlock-sha.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for VIA PadLock hardware crypto engine.
  5. *
  6. * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. */
  14. #include <crypto/algapi.h>
  15. #include <crypto/sha.h>
  16. #include <linux/err.h>
  17. #include <linux/module.h>
  18. #include <linux/init.h>
  19. #include <linux/errno.h>
  20. #include <linux/cryptohash.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/kernel.h>
  23. #include <linux/scatterlist.h>
  24. #include <asm/i387.h>
  25. #include "padlock.h"
  26. #define SHA1_DEFAULT_FALLBACK "sha1-generic"
  27. #define SHA256_DEFAULT_FALLBACK "sha256-generic"
  28. struct padlock_sha_ctx {
  29. char *data;
  30. size_t used;
  31. int bypass;
  32. void (*f_sha_padlock)(const char *in, char *out, int count);
  33. struct hash_desc fallback;
  34. };
  35. static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
  36. {
  37. return crypto_tfm_ctx(tfm);
  38. }
  39. /* We'll need aligned address on the stack */
  40. #define NEAREST_ALIGNED(ptr) \
  41. ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
  42. static struct crypto_alg sha1_alg, sha256_alg;
  43. static void padlock_sha_bypass(struct crypto_tfm *tfm)
  44. {
  45. if (ctx(tfm)->bypass)
  46. return;
  47. crypto_hash_init(&ctx(tfm)->fallback);
  48. if (ctx(tfm)->data && ctx(tfm)->used) {
  49. struct scatterlist sg;
  50. sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
  51. crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
  52. }
  53. ctx(tfm)->used = 0;
  54. ctx(tfm)->bypass = 1;
  55. }
  56. static void padlock_sha_init(struct crypto_tfm *tfm)
  57. {
  58. ctx(tfm)->used = 0;
  59. ctx(tfm)->bypass = 0;
  60. }
  61. static void padlock_sha_update(struct crypto_tfm *tfm,
  62. const uint8_t *data, unsigned int length)
  63. {
  64. /* Our buffer is always one page. */
  65. if (unlikely(!ctx(tfm)->bypass &&
  66. (ctx(tfm)->used + length > PAGE_SIZE)))
  67. padlock_sha_bypass(tfm);
  68. if (unlikely(ctx(tfm)->bypass)) {
  69. struct scatterlist sg;
  70. sg_init_one(&sg, (uint8_t *)data, length);
  71. crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
  72. return;
  73. }
  74. memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
  75. ctx(tfm)->used += length;
  76. }
  77. static inline void padlock_output_block(uint32_t *src,
  78. uint32_t *dst, size_t count)
  79. {
  80. while (count--)
  81. *dst++ = swab32(*src++);
  82. }
  83. static void padlock_do_sha1(const char *in, char *out, int count)
  84. {
  85. /* We can't store directly to *out as it may be unaligned. */
  86. /* BTW Don't reduce the buffer size below 128 Bytes!
  87. * PadLock microcode needs it that big. */
  88. char buf[128+16];
  89. char *result = NEAREST_ALIGNED(buf);
  90. int ts_state;
  91. ((uint32_t *)result)[0] = SHA1_H0;
  92. ((uint32_t *)result)[1] = SHA1_H1;
  93. ((uint32_t *)result)[2] = SHA1_H2;
  94. ((uint32_t *)result)[3] = SHA1_H3;
  95. ((uint32_t *)result)[4] = SHA1_H4;
  96. /* prevent taking the spurious DNA fault with padlock. */
  97. ts_state = irq_ts_save();
  98. asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
  99. : "+S"(in), "+D"(result)
  100. : "c"(count), "a"(0));
  101. irq_ts_restore(ts_state);
  102. padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
  103. }
  104. static void padlock_do_sha256(const char *in, char *out, int count)
  105. {
  106. /* We can't store directly to *out as it may be unaligned. */
  107. /* BTW Don't reduce the buffer size below 128 Bytes!
  108. * PadLock microcode needs it that big. */
  109. char buf[128+16];
  110. char *result = NEAREST_ALIGNED(buf);
  111. int ts_state;
  112. ((uint32_t *)result)[0] = SHA256_H0;
  113. ((uint32_t *)result)[1] = SHA256_H1;
  114. ((uint32_t *)result)[2] = SHA256_H2;
  115. ((uint32_t *)result)[3] = SHA256_H3;
  116. ((uint32_t *)result)[4] = SHA256_H4;
  117. ((uint32_t *)result)[5] = SHA256_H5;
  118. ((uint32_t *)result)[6] = SHA256_H6;
  119. ((uint32_t *)result)[7] = SHA256_H7;
  120. /* prevent taking the spurious DNA fault with padlock. */
  121. ts_state = irq_ts_save();
  122. asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
  123. : "+S"(in), "+D"(result)
  124. : "c"(count), "a"(0));
  125. irq_ts_restore(ts_state);
  126. padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
  127. }
  128. static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
  129. {
  130. if (unlikely(ctx(tfm)->bypass)) {
  131. crypto_hash_final(&ctx(tfm)->fallback, out);
  132. ctx(tfm)->bypass = 0;
  133. return;
  134. }
  135. /* Pass the input buffer to PadLock microcode... */
  136. ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
  137. ctx(tfm)->used = 0;
  138. }
  139. static int padlock_cra_init(struct crypto_tfm *tfm)
  140. {
  141. const char *fallback_driver_name = tfm->__crt_alg->cra_name;
  142. struct crypto_hash *fallback_tfm;
  143. /* For now we'll allocate one page. This
  144. * could eventually be configurable one day. */
  145. ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
  146. if (!ctx(tfm)->data)
  147. return -ENOMEM;
  148. /* Allocate a fallback and abort if it failed. */
  149. fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
  150. CRYPTO_ALG_ASYNC |
  151. CRYPTO_ALG_NEED_FALLBACK);
  152. if (IS_ERR(fallback_tfm)) {
  153. printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
  154. fallback_driver_name);
  155. free_page((unsigned long)(ctx(tfm)->data));
  156. return PTR_ERR(fallback_tfm);
  157. }
  158. ctx(tfm)->fallback.tfm = fallback_tfm;
  159. return 0;
  160. }
  161. static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
  162. {
  163. ctx(tfm)->f_sha_padlock = padlock_do_sha1;
  164. return padlock_cra_init(tfm);
  165. }
  166. static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
  167. {
  168. ctx(tfm)->f_sha_padlock = padlock_do_sha256;
  169. return padlock_cra_init(tfm);
  170. }
  171. static void padlock_cra_exit(struct crypto_tfm *tfm)
  172. {
  173. if (ctx(tfm)->data) {
  174. free_page((unsigned long)(ctx(tfm)->data));
  175. ctx(tfm)->data = NULL;
  176. }
  177. crypto_free_hash(ctx(tfm)->fallback.tfm);
  178. ctx(tfm)->fallback.tfm = NULL;
  179. }
  180. static struct crypto_alg sha1_alg = {
  181. .cra_name = "sha1",
  182. .cra_driver_name = "sha1-padlock",
  183. .cra_priority = PADLOCK_CRA_PRIORITY,
  184. .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
  185. CRYPTO_ALG_NEED_FALLBACK,
  186. .cra_blocksize = SHA1_BLOCK_SIZE,
  187. .cra_ctxsize = sizeof(struct padlock_sha_ctx),
  188. .cra_module = THIS_MODULE,
  189. .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
  190. .cra_init = padlock_sha1_cra_init,
  191. .cra_exit = padlock_cra_exit,
  192. .cra_u = {
  193. .digest = {
  194. .dia_digestsize = SHA1_DIGEST_SIZE,
  195. .dia_init = padlock_sha_init,
  196. .dia_update = padlock_sha_update,
  197. .dia_final = padlock_sha_final,
  198. }
  199. }
  200. };
  201. static struct crypto_alg sha256_alg = {
  202. .cra_name = "sha256",
  203. .cra_driver_name = "sha256-padlock",
  204. .cra_priority = PADLOCK_CRA_PRIORITY,
  205. .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
  206. CRYPTO_ALG_NEED_FALLBACK,
  207. .cra_blocksize = SHA256_BLOCK_SIZE,
  208. .cra_ctxsize = sizeof(struct padlock_sha_ctx),
  209. .cra_module = THIS_MODULE,
  210. .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
  211. .cra_init = padlock_sha256_cra_init,
  212. .cra_exit = padlock_cra_exit,
  213. .cra_u = {
  214. .digest = {
  215. .dia_digestsize = SHA256_DIGEST_SIZE,
  216. .dia_init = padlock_sha_init,
  217. .dia_update = padlock_sha_update,
  218. .dia_final = padlock_sha_final,
  219. }
  220. }
  221. };
  222. static int __init padlock_init(void)
  223. {
  224. int rc = -ENODEV;
  225. if (!cpu_has_phe) {
  226. printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
  227. return -ENODEV;
  228. }
  229. if (!cpu_has_phe_enabled) {
  230. printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
  231. return -ENODEV;
  232. }
  233. rc = crypto_register_alg(&sha1_alg);
  234. if (rc)
  235. goto out;
  236. rc = crypto_register_alg(&sha256_alg);
  237. if (rc)
  238. goto out_unreg1;
  239. printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
  240. return 0;
  241. out_unreg1:
  242. crypto_unregister_alg(&sha1_alg);
  243. out:
  244. printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
  245. return rc;
  246. }
  247. static void __exit padlock_fini(void)
  248. {
  249. crypto_unregister_alg(&sha1_alg);
  250. crypto_unregister_alg(&sha256_alg);
  251. }
  252. module_init(padlock_init);
  253. module_exit(padlock_fini);
  254. MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
  255. MODULE_LICENSE("GPL");
  256. MODULE_AUTHOR("Michal Ludvig");
  257. MODULE_ALIAS("sha1-all");
  258. MODULE_ALIAS("sha256-all");
  259. MODULE_ALIAS("sha1-padlock");
  260. MODULE_ALIAS("sha256-padlock");