|
@@ -72,17 +72,20 @@
|
|
|
|
|
|
#define DEFAULT_TIMEOUT_INTERVAL HZ
|
|
|
|
|
|
-#define FLAGS_FINUP 0x0002
|
|
|
-#define FLAGS_FINAL 0x0004
|
|
|
-#define FLAGS_SG 0x0008
|
|
|
-#define FLAGS_SHA1 0x0010
|
|
|
-#define FLAGS_DMA_ACTIVE 0x0020
|
|
|
-#define FLAGS_OUTPUT_READY 0x0040
|
|
|
-#define FLAGS_INIT 0x0100
|
|
|
-#define FLAGS_CPU 0x0200
|
|
|
-#define FLAGS_HMAC 0x0400
|
|
|
-#define FLAGS_ERROR 0x0800
|
|
|
-#define FLAGS_BUSY 0x1000
|
|
|
+/* mostly device flags */
|
|
|
+#define FLAGS_BUSY 0
|
|
|
+#define FLAGS_FINAL 1
|
|
|
+#define FLAGS_DMA_ACTIVE 2
|
|
|
+#define FLAGS_OUTPUT_READY 3
|
|
|
+#define FLAGS_INIT 4
|
|
|
+#define FLAGS_CPU 5
|
|
|
+#define FLAGS_DMA_READY 6
|
|
|
+/* context flags */
|
|
|
+#define FLAGS_FINUP 16
|
|
|
+#define FLAGS_SG 17
|
|
|
+#define FLAGS_SHA1 18
|
|
|
+#define FLAGS_HMAC 19
|
|
|
+#define FLAGS_ERROR 20
|
|
|
|
|
|
#define OP_UPDATE 1
|
|
|
#define OP_FINAL 2
|
|
@@ -144,7 +147,6 @@ struct omap_sham_dev {
|
|
|
int dma;
|
|
|
int dma_lch;
|
|
|
struct tasklet_struct done_task;
|
|
|
- struct tasklet_struct queue_task;
|
|
|
|
|
|
unsigned long flags;
|
|
|
struct crypto_queue queue;
|
|
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
|
|
|
if (!hash)
|
|
|
return;
|
|
|
|
|
|
- if (likely(ctx->flags & FLAGS_SHA1)) {
|
|
|
+ if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
|
|
|
/* SHA1 results are in big endian */
|
|
|
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
|
|
|
hash[i] = be32_to_cpu(in[i]);
|
|
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
|
|
|
{
|
|
|
clk_enable(dd->iclk);
|
|
|
|
|
|
- if (!(dd->flags & FLAGS_INIT)) {
|
|
|
+ if (!test_bit(FLAGS_INIT, &dd->flags)) {
|
|
|
omap_sham_write_mask(dd, SHA_REG_MASK,
|
|
|
SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
|
|
|
|
|
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
|
|
|
SHA_REG_SYSSTATUS_RESETDONE))
|
|
|
return -ETIMEDOUT;
|
|
|
|
|
|
- dd->flags |= FLAGS_INIT;
|
|
|
+ set_bit(FLAGS_INIT, &dd->flags);
|
|
|
dd->err = 0;
|
|
|
}
|
|
|
|
|
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
|
|
|
* Setting ALGO_CONST only for the first iteration
|
|
|
* and CLOSE_HASH only for the last one.
|
|
|
*/
|
|
|
- if (ctx->flags & FLAGS_SHA1)
|
|
|
+ if (ctx->flags & BIT(FLAGS_SHA1))
|
|
|
val |= SHA_REG_CTRL_ALGO;
|
|
|
if (!ctx->digcnt)
|
|
|
val |= SHA_REG_CTRL_ALGO_CONST;
|
|
@@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
|
|
|
return -ETIMEDOUT;
|
|
|
|
|
|
if (final)
|
|
|
- ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
|
|
|
+ set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
|
|
|
+
|
|
|
+ set_bit(FLAGS_CPU, &dd->flags);
|
|
|
|
|
|
len32 = DIV_ROUND_UP(length, sizeof(u32));
|
|
|
|
|
@@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
|
|
|
ctx->digcnt += length;
|
|
|
|
|
|
if (final)
|
|
|
- ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
|
|
|
+ set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
|
|
|
|
|
|
- dd->flags |= FLAGS_DMA_ACTIVE;
|
|
|
+ set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
|
|
|
|
|
|
omap_start_dma(dd->dma_lch);
|
|
|
|
|
@@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- ctx->flags &= ~FLAGS_SG;
|
|
|
+ ctx->flags &= ~BIT(FLAGS_SG);
|
|
|
|
|
|
/* next call does not fail... so no unmap in the case of error */
|
|
|
return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
|
|
@@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
|
|
|
|
|
|
omap_sham_append_sg(ctx);
|
|
|
|
|
|
- final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
|
|
|
+ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
|
|
|
|
|
|
dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
|
|
|
ctx->bufcnt, ctx->digcnt, final);
|
|
@@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
|
|
|
length = min(ctx->total, sg->length);
|
|
|
|
|
|
if (sg_is_last(sg)) {
|
|
|
- if (!(ctx->flags & FLAGS_FINUP)) {
|
|
|
+ if (!(ctx->flags & BIT(FLAGS_FINUP))) {
|
|
|
/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
|
|
|
tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
|
|
|
/* without finup() we need one block to close hash */
|
|
@@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- ctx->flags |= FLAGS_SG;
|
|
|
+ ctx->flags |= BIT(FLAGS_SG);
|
|
|
|
|
|
ctx->total -= length;
|
|
|
ctx->offset = length; /* offset where to start slow */
|
|
|
|
|
|
- final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
|
|
|
+ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
|
|
|
|
|
|
/* next call does not fail... so no unmap in the case of error */
|
|
|
return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
|
|
@@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
|
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
|
|
|
|
|
omap_stop_dma(dd->dma_lch);
|
|
|
- if (ctx->flags & FLAGS_SG) {
|
|
|
+ if (ctx->flags & BIT(FLAGS_SG)) {
|
|
|
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
|
|
|
if (ctx->sg->length == ctx->offset) {
|
|
|
ctx->sg = sg_next(ctx->sg);
|
|
@@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req)
|
|
|
crypto_ahash_digestsize(tfm));
|
|
|
|
|
|
if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
|
|
|
- ctx->flags |= FLAGS_SHA1;
|
|
|
+ ctx->flags |= BIT(FLAGS_SHA1);
|
|
|
|
|
|
ctx->bufcnt = 0;
|
|
|
ctx->digcnt = 0;
|
|
|
ctx->buflen = BUFLEN;
|
|
|
|
|
|
- if (tctx->flags & FLAGS_HMAC) {
|
|
|
+ if (tctx->flags & BIT(FLAGS_HMAC)) {
|
|
|
struct omap_sham_hmac_ctx *bctx = tctx->base;
|
|
|
|
|
|
memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
|
|
|
ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
|
|
|
- ctx->flags |= FLAGS_HMAC;
|
|
|
+ ctx->flags |= BIT(FLAGS_HMAC);
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
|
|
|
int err;
|
|
|
|
|
|
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
|
|
|
- ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
|
|
|
+ ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
|
|
|
|
|
|
- if (ctx->flags & FLAGS_CPU)
|
|
|
+ if (ctx->flags & BIT(FLAGS_CPU))
|
|
|
err = omap_sham_update_cpu(dd);
|
|
|
else
|
|
|
err = omap_sham_update_dma_start(dd);
|
|
@@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req)
|
|
|
|
|
|
if (ctx->digcnt) {
|
|
|
omap_sham_copy_ready_hash(req);
|
|
|
- if (ctx->flags & FLAGS_HMAC)
|
|
|
+ if (ctx->flags & BIT(FLAGS_HMAC))
|
|
|
err = omap_sham_finish_hmac(req);
|
|
|
}
|
|
|
|
|
@@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
|
|
|
struct omap_sham_dev *dd = ctx->dd;
|
|
|
|
|
|
if (!err) {
|
|
|
- omap_sham_copy_hash(ctx->dd->req, 1);
|
|
|
- if (ctx->flags & FLAGS_FINAL)
|
|
|
+ omap_sham_copy_hash(req, 1);
|
|
|
+ if (test_bit(FLAGS_FINAL, &dd->flags))
|
|
|
err = omap_sham_finish(req);
|
|
|
} else {
|
|
|
- ctx->flags |= FLAGS_ERROR;
|
|
|
+ ctx->flags |= BIT(FLAGS_ERROR);
|
|
|
}
|
|
|
|
|
|
+ /* atomic operation is not needed here */
|
|
|
+ dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
|
|
|
+ BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
|
|
|
clk_disable(dd->iclk);
|
|
|
- dd->flags &= ~FLAGS_BUSY;
|
|
|
|
|
|
if (req->base.complete)
|
|
|
req->base.complete(&req->base, err);
|
|
|
+
|
|
|
+ /* handle new request */
|
|
|
+ tasklet_schedule(&dd->done_task);
|
|
|
}
|
|
|
|
|
|
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
|
|
@@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
|
|
|
{
|
|
|
struct crypto_async_request *async_req, *backlog;
|
|
|
struct omap_sham_reqctx *ctx;
|
|
|
- struct ahash_request *prev_req;
|
|
|
unsigned long flags;
|
|
|
int err = 0, ret = 0;
|
|
|
|
|
|
spin_lock_irqsave(&dd->lock, flags);
|
|
|
if (req)
|
|
|
ret = ahash_enqueue_request(&dd->queue, req);
|
|
|
- if (dd->flags & FLAGS_BUSY) {
|
|
|
+ if (test_bit(FLAGS_BUSY, &dd->flags)) {
|
|
|
spin_unlock_irqrestore(&dd->lock, flags);
|
|
|
return ret;
|
|
|
}
|
|
|
backlog = crypto_get_backlog(&dd->queue);
|
|
|
async_req = crypto_dequeue_request(&dd->queue);
|
|
|
if (async_req)
|
|
|
- dd->flags |= FLAGS_BUSY;
|
|
|
+ set_bit(FLAGS_BUSY, &dd->flags);
|
|
|
spin_unlock_irqrestore(&dd->lock, flags);
|
|
|
|
|
|
if (!async_req)
|
|
@@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
|
|
|
backlog->complete(backlog, -EINPROGRESS);
|
|
|
|
|
|
req = ahash_request_cast(async_req);
|
|
|
-
|
|
|
- prev_req = dd->req;
|
|
|
dd->req = req;
|
|
|
-
|
|
|
ctx = ahash_request_ctx(req);
|
|
|
|
|
|
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
|
|
|
ctx->op, req->nbytes);
|
|
|
|
|
|
-
|
|
|
err = omap_sham_hw_init(dd);
|
|
|
if (err)
|
|
|
goto err1;
|
|
@@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
|
|
|
|
|
|
if (ctx->op == OP_UPDATE) {
|
|
|
err = omap_sham_update_req(dd);
|
|
|
- if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
|
|
|
+ if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
|
|
|
/* no final() after finup() */
|
|
|
err = omap_sham_final_req(dd);
|
|
|
} else if (ctx->op == OP_FINAL) {
|
|
|
err = omap_sham_final_req(dd);
|
|
|
}
|
|
|
err1:
|
|
|
- if (err != -EINPROGRESS) {
|
|
|
+ if (err != -EINPROGRESS)
|
|
|
/* done_task will not finish it, so do it here */
|
|
|
omap_sham_finish_req(req, err);
|
|
|
- tasklet_schedule(&dd->queue_task);
|
|
|
- }
|
|
|
|
|
|
dev_dbg(dd->dev, "exit, err: %d\n", err);
|
|
|
|
|
@@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req)
|
|
|
ctx->sg = req->src;
|
|
|
ctx->offset = 0;
|
|
|
|
|
|
- if (ctx->flags & FLAGS_FINUP) {
|
|
|
+ if (ctx->flags & BIT(FLAGS_FINUP)) {
|
|
|
if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
|
|
|
/*
|
|
|
* OMAP HW accel works only with buffers >= 9
|
|
@@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req)
|
|
|
/*
|
|
|
* faster to use CPU for short transfers
|
|
|
*/
|
|
|
- ctx->flags |= FLAGS_CPU;
|
|
|
+ ctx->flags |= BIT(FLAGS_CPU);
|
|
|
}
|
|
|
} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
|
|
|
omap_sham_append_sg(ctx);
|
|
@@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req)
|
|
|
{
|
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
|
|
|
|
- ctx->flags |= FLAGS_FINUP;
|
|
|
+ ctx->flags |= BIT(FLAGS_FINUP);
|
|
|
|
|
|
- if (ctx->flags & FLAGS_ERROR)
|
|
|
+ if (ctx->flags & BIT(FLAGS_ERROR))
|
|
|
return 0; /* uncompleted hash is not needed */
|
|
|
|
|
|
/* OMAP HW accel works only with buffers >= 9 */
|
|
@@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req)
|
|
|
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
|
int err1, err2;
|
|
|
|
|
|
- ctx->flags |= FLAGS_FINUP;
|
|
|
+ ctx->flags |= BIT(FLAGS_FINUP);
|
|
|
|
|
|
err1 = omap_sham_update(req);
|
|
|
if (err1 == -EINPROGRESS || err1 == -EBUSY)
|
|
@@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
|
|
|
|
|
|
if (alg_base) {
|
|
|
struct omap_sham_hmac_ctx *bctx = tctx->base;
|
|
|
- tctx->flags |= FLAGS_HMAC;
|
|
|
+ tctx->flags |= BIT(FLAGS_HMAC);
|
|
|
bctx->shash = crypto_alloc_shash(alg_base, 0,
|
|
|
CRYPTO_ALG_NEED_FALLBACK);
|
|
|
if (IS_ERR(bctx->shash)) {
|
|
@@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
|
|
|
crypto_free_shash(tctx->fallback);
|
|
|
tctx->fallback = NULL;
|
|
|
|
|
|
- if (tctx->flags & FLAGS_HMAC) {
|
|
|
+ if (tctx->flags & BIT(FLAGS_HMAC)) {
|
|
|
struct omap_sham_hmac_ctx *bctx = tctx->base;
|
|
|
crypto_free_shash(bctx->shash);
|
|
|
}
|
|
@@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = {
|
|
|
static void omap_sham_done_task(unsigned long data)
|
|
|
{
|
|
|
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
|
|
|
- struct ahash_request *req = dd->req;
|
|
|
- struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
|
|
- int ready = 0, err = 0;
|
|
|
+ int err = 0;
|
|
|
|
|
|
- if (ctx->flags & FLAGS_OUTPUT_READY) {
|
|
|
- ctx->flags &= ~FLAGS_OUTPUT_READY;
|
|
|
- ready = 1;
|
|
|
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
|
|
|
+ omap_sham_handle_queue(dd, NULL);
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
- if (dd->flags & FLAGS_DMA_ACTIVE) {
|
|
|
- dd->flags &= ~FLAGS_DMA_ACTIVE;
|
|
|
- omap_sham_update_dma_stop(dd);
|
|
|
- if (!dd->err)
|
|
|
+ if (test_bit(FLAGS_CPU, &dd->flags)) {
|
|
|
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
|
|
|
+ goto finish;
|
|
|
+ } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
|
|
|
+ if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
|
|
|
+ omap_sham_update_dma_stop(dd);
|
|
|
+ if (dd->err) {
|
|
|
+ err = dd->err;
|
|
|
+ goto finish;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
|
|
|
+ /* hash or semi-hash ready */
|
|
|
+ clear_bit(FLAGS_DMA_READY, &dd->flags);
|
|
|
err = omap_sham_update_dma_start(dd);
|
|
|
+ if (err != -EINPROGRESS)
|
|
|
+ goto finish;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- err = dd->err ? : err;
|
|
|
-
|
|
|
- if (err != -EINPROGRESS && (ready || err)) {
|
|
|
- dev_dbg(dd->dev, "update done: err: %d\n", err);
|
|
|
- /* finish curent request */
|
|
|
- omap_sham_finish_req(req, err);
|
|
|
- /* start new request */
|
|
|
- omap_sham_handle_queue(dd, NULL);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void omap_sham_queue_task(unsigned long data)
|
|
|
-{
|
|
|
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
|
|
|
+ return;
|
|
|
|
|
|
- omap_sham_handle_queue(dd, NULL);
|
|
|
+finish:
|
|
|
+ dev_dbg(dd->dev, "update done: err: %d\n", err);
|
|
|
+ /* finish curent request */
|
|
|
+ omap_sham_finish_req(dd->req, err);
|
|
|
}
|
|
|
|
|
|
static irqreturn_t omap_sham_irq(int irq, void *dev_id)
|
|
|
{
|
|
|
struct omap_sham_dev *dd = dev_id;
|
|
|
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
|
|
|
-
|
|
|
- if (!ctx) {
|
|
|
- dev_err(dd->dev, "unknown interrupt.\n");
|
|
|
- return IRQ_HANDLED;
|
|
|
- }
|
|
|
|
|
|
- if (unlikely(ctx->flags & FLAGS_FINAL))
|
|
|
+ if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
|
|
|
/* final -> allow device to go to power-saving mode */
|
|
|
omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
|
|
|
|
|
@@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
|
|
|
SHA_REG_CTRL_OUTPUT_READY);
|
|
|
omap_sham_read(dd, SHA_REG_CTRL);
|
|
|
|
|
|
- ctx->flags |= FLAGS_OUTPUT_READY;
|
|
|
- dd->err = 0;
|
|
|
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
|
|
|
+ dev_warn(dd->dev, "Interrupt when no active requests.\n");
|
|
|
+ return IRQ_HANDLED;
|
|
|
+ }
|
|
|
+
|
|
|
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
|
|
|
tasklet_schedule(&dd->done_task);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
@@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
|
|
|
if (ch_status != OMAP_DMA_BLOCK_IRQ) {
|
|
|
pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
|
|
|
dd->err = -EIO;
|
|
|
- dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
|
|
|
+ clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
|
|
|
}
|
|
|
|
|
|
+ set_bit(FLAGS_DMA_READY, &dd->flags);
|
|
|
tasklet_schedule(&dd->done_task);
|
|
|
}
|
|
|
|
|
@@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
|
|
|
INIT_LIST_HEAD(&dd->list);
|
|
|
spin_lock_init(&dd->lock);
|
|
|
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
|
|
|
- tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
|
|
|
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
|
|
|
|
|
|
dd->irq = -1;
|
|
@@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev)
|
|
|
for (i = 0; i < ARRAY_SIZE(algs); i++)
|
|
|
crypto_unregister_ahash(&algs[i]);
|
|
|
tasklet_kill(&dd->done_task);
|
|
|
- tasklet_kill(&dd->queue_task);
|
|
|
iounmap(dd->io_base);
|
|
|
clk_put(dd->iclk);
|
|
|
omap_sham_dma_cleanup(dd);
|