|
@@ -29,6 +29,7 @@
|
|
|
#include <linux/kernel.h>
|
|
|
#include <net/sock.h>
|
|
|
#include <linux/net.h>
|
|
|
+#include <linux/kthread.h>
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/system.h>
|
|
@@ -441,6 +442,85 @@ static void nbd_clear_que(struct nbd_device *lo)
|
|
|
}
|
|
|
|
|
|
|
|
|
+static void nbd_handle_req(struct nbd_device *lo, struct request *req)
|
|
|
+{
|
|
|
+ if (!blk_fs_request(req))
|
|
|
+ goto error_out;
|
|
|
+
|
|
|
+ nbd_cmd(req) = NBD_CMD_READ;
|
|
|
+ if (rq_data_dir(req) == WRITE) {
|
|
|
+ nbd_cmd(req) = NBD_CMD_WRITE;
|
|
|
+ if (lo->flags & NBD_READ_ONLY) {
|
|
|
+ printk(KERN_ERR "%s: Write on read-only\n",
|
|
|
+ lo->disk->disk_name);
|
|
|
+ goto error_out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ req->errors = 0;
|
|
|
+
|
|
|
+ mutex_lock(&lo->tx_lock);
|
|
|
+ if (unlikely(!lo->sock)) {
|
|
|
+ mutex_unlock(&lo->tx_lock);
|
|
|
+ printk(KERN_ERR "%s: Attempted send on closed socket\n",
|
|
|
+ lo->disk->disk_name);
|
|
|
+ req->errors++;
|
|
|
+ nbd_end_request(req);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ lo->active_req = req;
|
|
|
+
|
|
|
+ if (nbd_send_req(lo, req) != 0) {
|
|
|
+ printk(KERN_ERR "%s: Request send failed\n",
|
|
|
+ lo->disk->disk_name);
|
|
|
+ req->errors++;
|
|
|
+ nbd_end_request(req);
|
|
|
+ } else {
|
|
|
+ spin_lock(&lo->queue_lock);
|
|
|
+ list_add(&req->queuelist, &lo->queue_head);
|
|
|
+ spin_unlock(&lo->queue_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ lo->active_req = NULL;
|
|
|
+ mutex_unlock(&lo->tx_lock);
|
|
|
+ wake_up_all(&lo->active_wq);
|
|
|
+
|
|
|
+ return;
|
|
|
+
|
|
|
+error_out:
|
|
|
+ req->errors++;
|
|
|
+ nbd_end_request(req);
|
|
|
+}
|
|
|
+
|
|
|
+static int nbd_thread(void *data)
|
|
|
+{
|
|
|
+ struct nbd_device *lo = data;
|
|
|
+ struct request *req;
|
|
|
+
|
|
|
+ set_user_nice(current, -20);
|
|
|
+ while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
|
|
|
+ /* wait for something to do */
|
|
|
+ wait_event_interruptible(lo->waiting_wq,
|
|
|
+ kthread_should_stop() ||
|
|
|
+ !list_empty(&lo->waiting_queue));
|
|
|
+
|
|
|
+ /* extract request */
|
|
|
+ if (list_empty(&lo->waiting_queue))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ spin_lock_irq(&lo->queue_lock);
|
|
|
+ req = list_entry(lo->waiting_queue.next, struct request,
|
|
|
+ queuelist);
|
|
|
+ list_del_init(&req->queuelist);
|
|
|
+ spin_unlock_irq(&lo->queue_lock);
|
|
|
+
|
|
|
+ /* handle request */
|
|
|
+ nbd_handle_req(lo, req);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* We always wait for result of write, for now. It would be nice to make it optional
|
|
|
* in future
|
|
@@ -456,65 +536,23 @@ static void do_nbd_request(struct request_queue * q)
|
|
|
struct nbd_device *lo;
|
|
|
|
|
|
blkdev_dequeue_request(req);
|
|
|
+
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
+
|
|
|
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
|
|
|
req->rq_disk->disk_name, req, req->cmd_type);
|
|
|
|
|
|
- if (!blk_fs_request(req))
|
|
|
- goto error_out;
|
|
|
-
|
|
|
lo = req->rq_disk->private_data;
|
|
|
|
|
|
BUG_ON(lo->magic != LO_MAGIC);
|
|
|
|
|
|
- nbd_cmd(req) = NBD_CMD_READ;
|
|
|
- if (rq_data_dir(req) == WRITE) {
|
|
|
- nbd_cmd(req) = NBD_CMD_WRITE;
|
|
|
- if (lo->flags & NBD_READ_ONLY) {
|
|
|
- printk(KERN_ERR "%s: Write on read-only\n",
|
|
|
- lo->disk->disk_name);
|
|
|
- goto error_out;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- req->errors = 0;
|
|
|
- spin_unlock_irq(q->queue_lock);
|
|
|
-
|
|
|
- mutex_lock(&lo->tx_lock);
|
|
|
- if (unlikely(!lo->sock)) {
|
|
|
- mutex_unlock(&lo->tx_lock);
|
|
|
- printk(KERN_ERR "%s: Attempted send on closed socket\n",
|
|
|
- lo->disk->disk_name);
|
|
|
- req->errors++;
|
|
|
- nbd_end_request(req);
|
|
|
- spin_lock_irq(q->queue_lock);
|
|
|
- continue;
|
|
|
- }
|
|
|
+ spin_lock_irq(&lo->queue_lock);
|
|
|
+ list_add_tail(&req->queuelist, &lo->waiting_queue);
|
|
|
+ spin_unlock_irq(&lo->queue_lock);
|
|
|
|
|
|
- lo->active_req = req;
|
|
|
-
|
|
|
- if (nbd_send_req(lo, req) != 0) {
|
|
|
- printk(KERN_ERR "%s: Request send failed\n",
|
|
|
- lo->disk->disk_name);
|
|
|
- req->errors++;
|
|
|
- nbd_end_request(req);
|
|
|
- } else {
|
|
|
- spin_lock(&lo->queue_lock);
|
|
|
- list_add(&req->queuelist, &lo->queue_head);
|
|
|
- spin_unlock(&lo->queue_lock);
|
|
|
- }
|
|
|
-
|
|
|
- lo->active_req = NULL;
|
|
|
- mutex_unlock(&lo->tx_lock);
|
|
|
- wake_up_all(&lo->active_wq);
|
|
|
+ wake_up(&lo->waiting_wq);
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
- continue;
|
|
|
-
|
|
|
-error_out:
|
|
|
- req->errors++;
|
|
|
- spin_unlock(q->queue_lock);
|
|
|
- nbd_end_request(req);
|
|
|
- spin_lock(q->queue_lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -524,6 +562,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
|
|
|
struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
|
|
|
int error;
|
|
|
struct request sreq ;
|
|
|
+ struct task_struct *thread;
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
return -EPERM;
|
|
@@ -606,7 +645,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
|
|
|
case NBD_DO_IT:
|
|
|
if (!lo->file)
|
|
|
return -EINVAL;
|
|
|
+ thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
|
|
|
+ if (IS_ERR(thread))
|
|
|
+ return PTR_ERR(thread);
|
|
|
+ wake_up_process(thread);
|
|
|
error = nbd_do_it(lo);
|
|
|
+ kthread_stop(thread);
|
|
|
if (error)
|
|
|
return error;
|
|
|
sock_shutdown(lo, 1);
|
|
@@ -695,10 +739,12 @@ static int __init nbd_init(void)
|
|
|
nbd_dev[i].file = NULL;
|
|
|
nbd_dev[i].magic = LO_MAGIC;
|
|
|
nbd_dev[i].flags = 0;
|
|
|
+ INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
|
|
|
spin_lock_init(&nbd_dev[i].queue_lock);
|
|
|
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
|
|
|
mutex_init(&nbd_dev[i].tx_lock);
|
|
|
init_waitqueue_head(&nbd_dev[i].active_wq);
|
|
|
+ init_waitqueue_head(&nbd_dev[i].waiting_wq);
|
|
|
nbd_dev[i].blksize = 1024;
|
|
|
nbd_dev[i].bytesize = 0;
|
|
|
disk->major = NBD_MAJOR;
|