|
@@ -182,7 +182,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
|
|
|
+ if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
|
|
|
hdr->request_len))
|
|
|
return -EFAULT;
|
|
|
|
|
@@ -249,7 +249,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
|
|
struct request *rq, *next_rq = NULL;
|
|
|
int ret, rw;
|
|
|
unsigned int dxfer_len;
|
|
|
- void *dxferp = NULL;
|
|
|
+ void __user *dxferp = NULL;
|
|
|
struct bsg_class_device *bcd = &q->bsg_dev;
|
|
|
|
|
|
/* if the LLD has been removed then the bsg_unregister_queue will
|
|
@@ -291,7 +291,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
|
|
rq->next_rq = next_rq;
|
|
|
next_rq->cmd_type = rq->cmd_type;
|
|
|
|
|
|
- dxferp = (void*)(unsigned long)hdr->din_xferp;
|
|
|
+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
|
|
|
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
|
|
|
hdr->din_xfer_len, GFP_KERNEL);
|
|
|
if (ret)
|
|
@@ -300,10 +300,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
|
|
|
|
|
if (hdr->dout_xfer_len) {
|
|
|
dxfer_len = hdr->dout_xfer_len;
|
|
|
- dxferp = (void*)(unsigned long)hdr->dout_xferp;
|
|
|
+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
|
|
|
} else if (hdr->din_xfer_len) {
|
|
|
dxfer_len = hdr->din_xfer_len;
|
|
|
- dxferp = (void*)(unsigned long)hdr->din_xferp;
|
|
|
+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
|
|
|
} else
|
|
|
dxfer_len = 0;
|
|
|
|
|
@@ -445,7 +445,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
|
|
int len = min_t(unsigned int, hdr->max_response_len,
|
|
|
rq->sense_len);
|
|
|
|
|
|
- ret = copy_to_user((void*)(unsigned long)hdr->response,
|
|
|
+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
|
|
|
rq->sense, len);
|
|
|
if (!ret)
|
|
|
hdr->response_len = len;
|
|
@@ -606,7 +606,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|
|
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
|
|
|
*ppos = bytes_read;
|
|
|
|
|
|
- if (!bytes_read || (bytes_read && err_block_err(ret)))
|
|
|
+ if (!bytes_read || err_block_err(ret))
|
|
|
bytes_read = ret;
|
|
|
|
|
|
return bytes_read;
|
|
@@ -686,7 +686,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
|
|
|
/*
|
|
|
* return bytes written on non-fatal errors
|
|
|
*/
|
|
|
- if (!bytes_written || (bytes_written && err_block_err(ret)))
|
|
|
+ if (!bytes_written || err_block_err(ret))
|
|
|
bytes_written = ret;
|
|
|
|
|
|
dprintk("%s: returning %Zd\n", bd->name, bytes_written);
|
|
@@ -878,7 +878,7 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait)
|
|
|
spin_lock_irq(&bd->lock);
|
|
|
if (!list_empty(&bd->done_list))
|
|
|
mask |= POLLIN | POLLRDNORM;
|
|
|
- if (bd->queued_cmds >= bd->max_queue)
|
|
|
+ if (bd->queued_cmds < bd->max_queue)
|
|
|
mask |= POLLOUT;
|
|
|
spin_unlock_irq(&bd->lock);
|
|
|
|