|
@@ -14,8 +14,14 @@
|
|
|
#include <linux/kthread.h>
|
|
|
#include <linux/platform_device.h>
|
|
|
#include <linux/scatterlist.h>
|
|
|
+#include <crypto/internal/hash.h>
|
|
|
+#include <crypto/sha.h>
|
|
|
|
|
|
#include "mv_cesa.h"
|
|
|
+
|
|
|
+#define MV_CESA "MV-CESA:"
|
|
|
+#define MAX_HW_HASH_SIZE 0xFFFF
|
|
|
+
|
|
|
/*
|
|
|
* STM:
|
|
|
* /---------------------------------------\
|
|
@@ -38,7 +44,7 @@ enum engine_status {
|
|
|
* @dst_sg_it: sg iterator for dst
|
|
|
* @sg_src_left: bytes left in src to process (scatter list)
|
|
|
* @src_start: offset to add to src start position (scatter list)
|
|
|
- * @crypt_len: length of current crypt process
|
|
|
+ * @crypt_len: length of current hw crypt/hash process
|
|
|
* @hw_nbytes: total bytes to process in hw for this request
|
|
|
* @copy_back: whether to copy data back (crypt) or not (hash)
|
|
|
* @sg_dst_left: bytes left dst to process in this scatter list
|
|
@@ -81,6 +87,8 @@ struct crypto_priv {
|
|
|
struct req_progress p;
|
|
|
int max_req_size;
|
|
|
int sram_size;
|
|
|
+ int has_sha1;
|
|
|
+ int has_hmac_sha1;
|
|
|
};
|
|
|
|
|
|
static struct crypto_priv *cpg;
|
|
@@ -102,6 +110,31 @@ struct mv_req_ctx {
|
|
|
int decrypt;
|
|
|
};
|
|
|
|
|
|
+enum hash_op {
|
|
|
+ COP_SHA1,
|
|
|
+ COP_HMAC_SHA1
|
|
|
+};
|
|
|
+
|
|
|
+struct mv_tfm_hash_ctx {
|
|
|
+ struct crypto_shash *fallback;
|
|
|
+ struct crypto_shash *base_hash;
|
|
|
+ u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
|
|
|
+ int count_add;
|
|
|
+ enum hash_op op;
|
|
|
+};
|
|
|
+
|
|
|
+struct mv_req_hash_ctx {
|
|
|
+ u64 count;
|
|
|
+ u32 state[SHA1_DIGEST_SIZE / 4];
|
|
|
+ u8 buffer[SHA1_BLOCK_SIZE];
|
|
|
+ int first_hash; /* marks that we don't have previous state */
|
|
|
+ int last_chunk; /* marks that this is the 'final' request */
|
|
|
+ int extra_bytes; /* unprocessed bytes in buffer */
|
|
|
+ enum hash_op op;
|
|
|
+ int count_add;
|
|
|
+ struct scatterlist dummysg;
|
|
|
+};
|
|
|
+
|
|
|
static void compute_aes_dec_key(struct mv_ctx *ctx)
|
|
|
{
|
|
|
struct crypto_aes_ctx gen_aes_key;
|
|
@@ -265,6 +298,132 @@ static void mv_crypto_algo_completion(void)
|
|
|
memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
|
|
|
}
|
|
|
|
|
|
+static void mv_process_hash_current(int first_block)
|
|
|
+{
|
|
|
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
|
|
|
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
|
|
|
+ struct req_progress *p = &cpg->p;
|
|
|
+ struct sec_accel_config op = { 0 };
|
|
|
+ int is_last;
|
|
|
+
|
|
|
+ switch (req_ctx->op) {
|
|
|
+ case COP_SHA1:
|
|
|
+ default:
|
|
|
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
|
|
|
+ break;
|
|
|
+ case COP_HMAC_SHA1:
|
|
|
+ op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ op.mac_src_p =
|
|
|
+ MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
|
|
|
+ req_ctx->
|
|
|
+ count);
|
|
|
+
|
|
|
+ setup_data_in();
|
|
|
+
|
|
|
+ op.mac_digest =
|
|
|
+ MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
|
|
|
+ op.mac_iv =
|
|
|
+ MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
|
|
|
+ MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
|
|
|
+
|
|
|
+ is_last = req_ctx->last_chunk
|
|
|
+ && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
|
|
|
+ && (req_ctx->count <= MAX_HW_HASH_SIZE);
|
|
|
+ if (req_ctx->first_hash) {
|
|
|
+ if (is_last)
|
|
|
+ op.config |= CFG_NOT_FRAG;
|
|
|
+ else
|
|
|
+ op.config |= CFG_FIRST_FRAG;
|
|
|
+
|
|
|
+ req_ctx->first_hash = 0;
|
|
|
+ } else {
|
|
|
+ if (is_last)
|
|
|
+ op.config |= CFG_LAST_FRAG;
|
|
|
+ else
|
|
|
+ op.config |= CFG_MID_FRAG;
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
|
|
|
+
|
|
|
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
|
|
|
+ /* GO */
|
|
|
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * XXX: add timer if the interrupt does not occur for some mystery
|
|
|
+ * reason
|
|
|
+ */
|
|
|
+}
|
|
|
+
|
|
|
+static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
|
|
|
+ struct shash_desc *desc)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct sha1_state shash_state;
|
|
|
+
|
|
|
+ shash_state.count = ctx->count + ctx->count_add;
|
|
|
+ for (i = 0; i < 5; i++)
|
|
|
+ shash_state.state[i] = ctx->state[i];
|
|
|
+ memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
|
|
|
+ return crypto_shash_import(desc, &shash_state);
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_final_fallback(struct ahash_request *req)
|
|
|
+{
|
|
|
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
|
|
|
+ struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
|
|
|
+ struct {
|
|
|
+ struct shash_desc shash;
|
|
|
+ char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
|
|
|
+ } desc;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ desc.shash.tfm = tfm_ctx->fallback;
|
|
|
+ desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
+ if (unlikely(req_ctx->first_hash)) {
|
|
|
+ crypto_shash_init(&desc.shash);
|
|
|
+ crypto_shash_update(&desc.shash, req_ctx->buffer,
|
|
|
+ req_ctx->extra_bytes);
|
|
|
+ } else {
|
|
|
+ /* only SHA1 for now....
|
|
|
+ */
|
|
|
+ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
|
|
|
+ if (rc)
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ rc = crypto_shash_final(&desc.shash, req->result);
|
|
|
+out:
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static void mv_hash_algo_completion(void)
|
|
|
+{
|
|
|
+ struct ahash_request *req = ahash_request_cast(cpg->cur_req);
|
|
|
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
|
|
|
+
|
|
|
+ if (ctx->extra_bytes)
|
|
|
+ copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
|
|
|
+ sg_miter_stop(&cpg->p.src_sg_it);
|
|
|
+
|
|
|
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
|
|
|
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
|
|
|
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
|
|
|
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
|
|
|
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
|
|
|
+
|
|
|
+ if (likely(ctx->last_chunk)) {
|
|
|
+ if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
|
|
|
+ memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
|
|
|
+ crypto_ahash_digestsize(crypto_ahash_reqtfm
|
|
|
+ (req)));
|
|
|
+ } else
|
|
|
+ mv_hash_final_fallback(req);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void dequeue_complete_req(void)
|
|
|
{
|
|
|
struct crypto_async_request *req = cpg->cur_req;
|
|
@@ -332,7 +491,7 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
|
|
|
return i;
|
|
|
}
|
|
|
|
|
|
-static void mv_enqueue_new_req(struct ablkcipher_request *req)
|
|
|
+static void mv_start_new_crypt_req(struct ablkcipher_request *req)
|
|
|
{
|
|
|
struct req_progress *p = &cpg->p;
|
|
|
int num_sgs;
|
|
@@ -353,11 +512,68 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
|
|
|
mv_process_current_q(1);
|
|
|
}
|
|
|
|
|
|
+static void mv_start_new_hash_req(struct ahash_request *req)
|
|
|
+{
|
|
|
+ struct req_progress *p = &cpg->p;
|
|
|
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
|
|
|
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
|
|
|
+ int num_sgs, hw_bytes, old_extra_bytes, rc;
|
|
|
+ cpg->cur_req = &req->base;
|
|
|
+ memset(p, 0, sizeof(struct req_progress));
|
|
|
+ hw_bytes = req->nbytes + ctx->extra_bytes;
|
|
|
+ old_extra_bytes = ctx->extra_bytes;
|
|
|
+
|
|
|
+ if (unlikely(ctx->extra_bytes)) {
|
|
|
+ memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
|
|
|
+ ctx->extra_bytes);
|
|
|
+ p->crypt_len = ctx->extra_bytes;
|
|
|
+ }
|
|
|
+
|
|
|
+ memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
|
|
|
+
|
|
|
+ if (unlikely(!ctx->first_hash)) {
|
|
|
+ writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
|
|
|
+ writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
|
|
|
+ writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
|
|
|
+ writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
|
|
|
+ writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
|
|
|
+ }
|
|
|
+
|
|
|
+ ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
|
|
|
+ if (ctx->extra_bytes != 0
|
|
|
+ && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
|
|
|
+ hw_bytes -= ctx->extra_bytes;
|
|
|
+ else
|
|
|
+ ctx->extra_bytes = 0;
|
|
|
+
|
|
|
+ num_sgs = count_sgs(req->src, req->nbytes);
|
|
|
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
|
|
|
+
|
|
|
+ if (hw_bytes) {
|
|
|
+ p->hw_nbytes = hw_bytes;
|
|
|
+ p->complete = mv_hash_algo_completion;
|
|
|
+ p->process = mv_process_hash_current;
|
|
|
+
|
|
|
+ mv_process_hash_current(1);
|
|
|
+ } else {
|
|
|
+ copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
|
|
|
+ ctx->extra_bytes - old_extra_bytes);
|
|
|
+ sg_miter_stop(&p->src_sg_it);
|
|
|
+ if (ctx->last_chunk)
|
|
|
+ rc = mv_hash_final_fallback(req);
|
|
|
+ else
|
|
|
+ rc = 0;
|
|
|
+ cpg->eng_st = ENGINE_IDLE;
|
|
|
+ local_bh_disable();
|
|
|
+ req->base.complete(&req->base, rc);
|
|
|
+ local_bh_enable();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int queue_manag(void *data)
|
|
|
{
|
|
|
cpg->eng_st = ENGINE_IDLE;
|
|
|
do {
|
|
|
- struct ablkcipher_request *req;
|
|
|
struct crypto_async_request *async_req = NULL;
|
|
|
struct crypto_async_request *backlog;
|
|
|
|
|
@@ -383,9 +599,18 @@ static int queue_manag(void *data)
|
|
|
}
|
|
|
|
|
|
if (async_req) {
|
|
|
- req = container_of(async_req,
|
|
|
- struct ablkcipher_request, base);
|
|
|
- mv_enqueue_new_req(req);
|
|
|
+ if (async_req->tfm->__crt_alg->cra_type !=
|
|
|
+ &crypto_ahash_type) {
|
|
|
+ struct ablkcipher_request *req =
|
|
|
+ container_of(async_req,
|
|
|
+ struct ablkcipher_request,
|
|
|
+ base);
|
|
|
+ mv_start_new_crypt_req(req);
|
|
|
+ } else {
|
|
|
+ struct ahash_request *req =
|
|
|
+ ahash_request_cast(async_req);
|
|
|
+ mv_start_new_hash_req(req);
|
|
|
+ }
|
|
|
async_req = NULL;
|
|
|
}
|
|
|
|
|
@@ -457,6 +682,215 @@ static int mv_cra_init(struct crypto_tfm *tfm)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
|
|
|
+ int is_last, unsigned int req_len,
|
|
|
+ int count_add)
|
|
|
+{
|
|
|
+ memset(ctx, 0, sizeof(*ctx));
|
|
|
+ ctx->op = op;
|
|
|
+ ctx->count = req_len;
|
|
|
+ ctx->first_hash = 1;
|
|
|
+ ctx->last_chunk = is_last;
|
|
|
+ ctx->count_add = count_add;
|
|
|
+}
|
|
|
+
|
|
|
+static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
|
|
|
+ unsigned req_len)
|
|
|
+{
|
|
|
+ ctx->last_chunk = is_last;
|
|
|
+ ctx->count += req_len;
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_init(struct ahash_request *req)
|
|
|
+{
|
|
|
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
|
|
|
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
|
|
|
+ tfm_ctx->count_add);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_update(struct ahash_request *req)
|
|
|
+{
|
|
|
+ if (!req->nbytes)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
|
|
|
+ return mv_handle_req(&req->base);
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_final(struct ahash_request *req)
|
|
|
+{
|
|
|
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
|
|
|
+ /* dummy buffer of 4 bytes */
|
|
|
+ sg_init_one(&ctx->dummysg, ctx->buffer, 4);
|
|
|
+ /* I think I'm allowed to do that... */
|
|
|
+ ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
|
|
|
+ mv_update_hash_req_ctx(ctx, 1, 0);
|
|
|
+ return mv_handle_req(&req->base);
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_finup(struct ahash_request *req)
|
|
|
+{
|
|
|
+ if (!req->nbytes)
|
|
|
+ return mv_hash_final(req);
|
|
|
+
|
|
|
+ mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
|
|
|
+ return mv_handle_req(&req->base);
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_digest(struct ahash_request *req)
|
|
|
+{
|
|
|
+ const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
|
|
|
+ mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
|
|
|
+ req->nbytes, tfm_ctx->count_add);
|
|
|
+ return mv_handle_req(&req->base);
|
|
|
+}
|
|
|
+
|
|
|
+static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
|
|
|
+ const void *ostate)
|
|
|
+{
|
|
|
+ const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
|
|
|
+ int i;
|
|
|
+ for (i = 0; i < 5; i++) {
|
|
|
+ ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
|
|
|
+ ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
|
|
|
+ unsigned int keylen)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
|
|
|
+ int bs, ds, ss;
|
|
|
+
|
|
|
+ if (!ctx->base_hash)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ rc = crypto_shash_setkey(ctx->fallback, key, keylen);
|
|
|
+ if (rc)
|
|
|
+ return rc;
|
|
|
+
|
|
|
+ /* Can't see a way to extract the ipad/opad from the fallback tfm
|
|
|
+ so I'm basically copying code from the hmac module */
|
|
|
+ bs = crypto_shash_blocksize(ctx->base_hash);
|
|
|
+ ds = crypto_shash_digestsize(ctx->base_hash);
|
|
|
+ ss = crypto_shash_statesize(ctx->base_hash);
|
|
|
+
|
|
|
+ {
|
|
|
+ struct {
|
|
|
+ struct shash_desc shash;
|
|
|
+ char ctx[crypto_shash_descsize(ctx->base_hash)];
|
|
|
+ } desc;
|
|
|
+ unsigned int i;
|
|
|
+ char ipad[ss];
|
|
|
+ char opad[ss];
|
|
|
+
|
|
|
+ desc.shash.tfm = ctx->base_hash;
|
|
|
+ desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
|
|
|
+ CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
+
|
|
|
+ if (keylen > bs) {
|
|
|
+ int err;
|
|
|
+
|
|
|
+ err =
|
|
|
+ crypto_shash_digest(&desc.shash, key, keylen, ipad);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ keylen = ds;
|
|
|
+ } else
|
|
|
+ memcpy(ipad, key, keylen);
|
|
|
+
|
|
|
+ memset(ipad + keylen, 0, bs - keylen);
|
|
|
+ memcpy(opad, ipad, bs);
|
|
|
+
|
|
|
+ for (i = 0; i < bs; i++) {
|
|
|
+ ipad[i] ^= 0x36;
|
|
|
+ opad[i] ^= 0x5c;
|
|
|
+ }
|
|
|
+
|
|
|
+ rc = crypto_shash_init(&desc.shash) ? :
|
|
|
+ crypto_shash_update(&desc.shash, ipad, bs) ? :
|
|
|
+ crypto_shash_export(&desc.shash, ipad) ? :
|
|
|
+ crypto_shash_init(&desc.shash) ? :
|
|
|
+ crypto_shash_update(&desc.shash, opad, bs) ? :
|
|
|
+ crypto_shash_export(&desc.shash, opad);
|
|
|
+
|
|
|
+ if (rc == 0)
|
|
|
+ mv_hash_init_ivs(ctx, ipad, opad);
|
|
|
+
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
|
|
|
+ enum hash_op op, int count_add)
|
|
|
+{
|
|
|
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
|
|
|
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
+ struct crypto_shash *fallback_tfm = NULL;
|
|
|
+ struct crypto_shash *base_hash = NULL;
|
|
|
+ int err = -ENOMEM;
|
|
|
+
|
|
|
+ ctx->op = op;
|
|
|
+ ctx->count_add = count_add;
|
|
|
+
|
|
|
+ /* Allocate a fallback and abort if it failed. */
|
|
|
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
|
|
|
+ CRYPTO_ALG_NEED_FALLBACK);
|
|
|
+ if (IS_ERR(fallback_tfm)) {
|
|
|
+ printk(KERN_WARNING MV_CESA
|
|
|
+ "Fallback driver '%s' could not be loaded!\n",
|
|
|
+ fallback_driver_name);
|
|
|
+ err = PTR_ERR(fallback_tfm);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ ctx->fallback = fallback_tfm;
|
|
|
+
|
|
|
+ if (base_hash_name) {
|
|
|
+ /* Allocate a hash to compute the ipad/opad of hmac. */
|
|
|
+ base_hash = crypto_alloc_shash(base_hash_name, 0,
|
|
|
+ CRYPTO_ALG_NEED_FALLBACK);
|
|
|
+ if (IS_ERR(base_hash)) {
|
|
|
+ printk(KERN_WARNING MV_CESA
|
|
|
+ "Base driver '%s' could not be loaded!\n",
|
|
|
+ base_hash_name);
|
|
|
+ err = PTR_ERR(fallback_tfm);
|
|
|
+ goto err_bad_base;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ ctx->base_hash = base_hash;
|
|
|
+
|
|
|
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
+ sizeof(struct mv_req_hash_ctx) +
|
|
|
+ crypto_shash_descsize(ctx->fallback));
|
|
|
+ return 0;
|
|
|
+err_bad_base:
|
|
|
+ crypto_free_shash(fallback_tfm);
|
|
|
+out:
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static void mv_cra_hash_exit(struct crypto_tfm *tfm)
|
|
|
+{
|
|
|
+ struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
+
|
|
|
+ crypto_free_shash(ctx->fallback);
|
|
|
+ if (ctx->base_hash)
|
|
|
+ crypto_free_shash(ctx->base_hash);
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
|
|
|
+{
|
|
|
+ return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
|
|
|
+{
|
|
|
+ return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
|
|
|
+}
|
|
|
+
|
|
|
irqreturn_t crypto_int(int irq, void *priv)
|
|
|
{
|
|
|
u32 val;
|
|
@@ -519,6 +953,53 @@ struct crypto_alg mv_aes_alg_cbc = {
|
|
|
},
|
|
|
};
|
|
|
|
|
|
+struct ahash_alg mv_sha1_alg = {
|
|
|
+ .init = mv_hash_init,
|
|
|
+ .update = mv_hash_update,
|
|
|
+ .final = mv_hash_final,
|
|
|
+ .finup = mv_hash_finup,
|
|
|
+ .digest = mv_hash_digest,
|
|
|
+ .halg = {
|
|
|
+ .digestsize = SHA1_DIGEST_SIZE,
|
|
|
+ .base = {
|
|
|
+ .cra_name = "sha1",
|
|
|
+ .cra_driver_name = "mv-sha1",
|
|
|
+ .cra_priority = 300,
|
|
|
+ .cra_flags =
|
|
|
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
|
+ .cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
|
|
|
+ .cra_init = mv_cra_hash_sha1_init,
|
|
|
+ .cra_exit = mv_cra_hash_exit,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ }
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
+struct ahash_alg mv_hmac_sha1_alg = {
|
|
|
+ .init = mv_hash_init,
|
|
|
+ .update = mv_hash_update,
|
|
|
+ .final = mv_hash_final,
|
|
|
+ .finup = mv_hash_finup,
|
|
|
+ .digest = mv_hash_digest,
|
|
|
+ .setkey = mv_hash_setkey,
|
|
|
+ .halg = {
|
|
|
+ .digestsize = SHA1_DIGEST_SIZE,
|
|
|
+ .base = {
|
|
|
+ .cra_name = "hmac(sha1)",
|
|
|
+ .cra_driver_name = "mv-hmac-sha1",
|
|
|
+ .cra_priority = 300,
|
|
|
+ .cra_flags =
|
|
|
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
|
|
|
+ .cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
+ .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
|
|
|
+ .cra_init = mv_cra_hash_hmac_sha1_init,
|
|
|
+ .cra_exit = mv_cra_hash_exit,
|
|
|
+ .cra_module = THIS_MODULE,
|
|
|
+ }
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
static int mv_probe(struct platform_device *pdev)
|
|
|
{
|
|
|
struct crypto_priv *cp;
|
|
@@ -527,7 +1008,7 @@ static int mv_probe(struct platform_device *pdev)
|
|
|
int ret;
|
|
|
|
|
|
if (cpg) {
|
|
|
- printk(KERN_ERR "Second crypto dev?\n");
|
|
|
+ printk(KERN_ERR MV_CESA "Second crypto dev?\n");
|
|
|
return -EEXIST;
|
|
|
}
|
|
|
|
|
@@ -591,6 +1072,21 @@ static int mv_probe(struct platform_device *pdev)
|
|
|
ret = crypto_register_alg(&mv_aes_alg_cbc);
|
|
|
if (ret)
|
|
|
goto err_unreg_ecb;
|
|
|
+
|
|
|
+ ret = crypto_register_ahash(&mv_sha1_alg);
|
|
|
+ if (ret == 0)
|
|
|
+ cpg->has_sha1 = 1;
|
|
|
+ else
|
|
|
+ printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
|
|
|
+
|
|
|
+ ret = crypto_register_ahash(&mv_hmac_sha1_alg);
|
|
|
+ if (ret == 0) {
|
|
|
+ cpg->has_hmac_sha1 = 1;
|
|
|
+ } else {
|
|
|
+ printk(KERN_WARNING MV_CESA
|
|
|
+ "Could not register hmac-sha1 driver\n");
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
err_unreg_ecb:
|
|
|
crypto_unregister_alg(&mv_aes_alg_ecb);
|
|
@@ -615,6 +1111,10 @@ static int mv_remove(struct platform_device *pdev)
|
|
|
|
|
|
crypto_unregister_alg(&mv_aes_alg_ecb);
|
|
|
crypto_unregister_alg(&mv_aes_alg_cbc);
|
|
|
+ if (cp->has_sha1)
|
|
|
+ crypto_unregister_ahash(&mv_sha1_alg);
|
|
|
+ if (cp->has_hmac_sha1)
|
|
|
+ crypto_unregister_ahash(&mv_hmac_sha1_alg);
|
|
|
kthread_stop(cp->queue_th);
|
|
|
free_irq(cp->irq, cp);
|
|
|
memset(cp->sram, 0, cp->sram_size);
|