|
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = {
|
|
|
.release = scm_release,
|
|
|
};
|
|
|
|
|
|
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
|
|
|
+{
|
|
|
+ return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
|
|
|
+}
|
|
|
+
|
|
|
static void scm_request_prepare(struct scm_request *scmrq)
|
|
|
{
|
|
|
struct scm_blk_dev *bdev = scmrq->bdev;
|
|
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq)
|
|
|
|
|
|
scm_release_cluster(scmrq);
|
|
|
blk_requeue_request(bdev->rq, scmrq->request);
|
|
|
+ atomic_dec(&bdev->queued_reqs);
|
|
|
scm_request_done(scmrq);
|
|
|
scm_ensure_queue_restart(bdev);
|
|
|
}
|
|
|
|
|
|
void scm_request_finish(struct scm_request *scmrq)
|
|
|
{
|
|
|
+ struct scm_blk_dev *bdev = scmrq->bdev;
|
|
|
+
|
|
|
scm_release_cluster(scmrq);
|
|
|
blk_end_request_all(scmrq->request, scmrq->error);
|
|
|
+ atomic_dec(&bdev->queued_reqs);
|
|
|
scm_request_done(scmrq);
|
|
|
}
|
|
|
|
|
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq)
|
|
|
if (req->cmd_type != REQ_TYPE_FS)
|
|
|
continue;
|
|
|
|
|
|
+ if (!scm_permit_request(bdev, req)) {
|
|
|
+ scm_ensure_queue_restart(bdev);
|
|
|
+ return;
|
|
|
+ }
|
|
|
scmrq = scm_request_fetch();
|
|
|
if (!scmrq) {
|
|
|
SCM_LOG(5, "no request");
|
|
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq)
|
|
|
return;
|
|
|
}
|
|
|
if (scm_need_cluster_request(scmrq)) {
|
|
|
+ atomic_inc(&bdev->queued_reqs);
|
|
|
blk_start_request(req);
|
|
|
scm_initiate_cluster_request(scmrq);
|
|
|
return;
|
|
|
}
|
|
|
scm_request_prepare(scmrq);
|
|
|
+ atomic_inc(&bdev->queued_reqs);
|
|
|
blk_start_request(req);
|
|
|
|
|
|
ret = scm_start_aob(scmrq->aob);
|
|
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq)
|
|
|
scm_request_requeue(scmrq);
|
|
|
return;
|
|
|
}
|
|
|
- atomic_inc(&bdev->queued_reqs);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
|
|
|
tasklet_hi_schedule(&bdev->tasklet);
|
|
|
}
|
|
|
|
|
|
+static void scm_blk_handle_error(struct scm_request *scmrq)
|
|
|
+{
|
|
|
+ struct scm_blk_dev *bdev = scmrq->bdev;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (scmrq->error != -EIO)
|
|
|
+ goto restart;
|
|
|
+
|
|
|
+ /* For -EIO the response block is valid. */
|
|
|
+ switch (scmrq->aob->response.eqc) {
|
|
|
+ case EQC_WR_PROHIBIT:
|
|
|
+ spin_lock_irqsave(&bdev->lock, flags);
|
|
|
+ if (bdev->state != SCM_WR_PROHIBIT)
|
|
|
+ pr_info("%lu: Write access to the SCM increment is suspended\n",
|
|
|
+ (unsigned long) bdev->scmdev->address);
|
|
|
+ bdev->state = SCM_WR_PROHIBIT;
|
|
|
+ spin_unlock_irqrestore(&bdev->lock, flags);
|
|
|
+ goto requeue;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+restart:
|
|
|
+ if (!scm_start_aob(scmrq->aob))
|
|
|
+ return;
|
|
|
+
|
|
|
+requeue:
|
|
|
+ spin_lock_irqsave(&bdev->rq_lock, flags);
|
|
|
+ scm_request_requeue(scmrq);
|
|
|
+ spin_unlock_irqrestore(&bdev->rq_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
static void scm_blk_tasklet(struct scm_blk_dev *bdev)
|
|
|
{
|
|
|
struct scm_request *scmrq;
|
|
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
|
|
|
spin_unlock_irqrestore(&bdev->lock, flags);
|
|
|
|
|
|
if (scmrq->error && scmrq->retries-- > 0) {
|
|
|
- if (scm_start_aob(scmrq->aob)) {
|
|
|
- spin_lock_irqsave(&bdev->rq_lock, flags);
|
|
|
- scm_request_requeue(scmrq);
|
|
|
- spin_unlock_irqrestore(&bdev->rq_lock, flags);
|
|
|
- }
|
|
|
+ scm_blk_handle_error(scmrq);
|
|
|
+
|
|
|
/* Request restarted or requeued, handle next. */
|
|
|
spin_lock_irqsave(&bdev->lock, flags);
|
|
|
continue;
|
|
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
|
|
|
}
|
|
|
|
|
|
scm_request_finish(scmrq);
|
|
|
- atomic_dec(&bdev->queued_reqs);
|
|
|
spin_lock_irqsave(&bdev->lock, flags);
|
|
|
}
|
|
|
spin_unlock_irqrestore(&bdev->lock, flags);
|
|
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
|
|
}
|
|
|
|
|
|
bdev->scmdev = scmdev;
|
|
|
+ bdev->state = SCM_OPER;
|
|
|
spin_lock_init(&bdev->rq_lock);
|
|
|
spin_lock_init(&bdev->lock);
|
|
|
INIT_LIST_HEAD(&bdev->finished_requests);
|
|
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
|
|
|
put_disk(bdev->gendisk);
|
|
|
}
|
|
|
|
|
|
+void scm_blk_set_available(struct scm_blk_dev *bdev)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&bdev->lock, flags);
|
|
|
+ if (bdev->state == SCM_WR_PROHIBIT)
|
|
|
+ pr_info("%lu: Write access to the SCM increment is restored\n",
|
|
|
+ (unsigned long) bdev->scmdev->address);
|
|
|
+ bdev->state = SCM_OPER;
|
|
|
+ spin_unlock_irqrestore(&bdev->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
static int __init scm_blk_init(void)
|
|
|
{
|
|
|
int ret = -EINVAL;
|