|
@@ -1398,6 +1398,106 @@ static int cciss_getluninfo(ctlr_info_t *h,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int cciss_passthru(ctlr_info_t *h, void __user *argp)
|
|
|
+{
|
|
|
+ IOCTL_Command_struct iocommand;
|
|
|
+ CommandList_struct *c;
|
|
|
+ char *buff = NULL;
|
|
|
+ u64bit temp64;
|
|
|
+ DECLARE_COMPLETION_ONSTACK(wait);
|
|
|
+
|
|
|
+ if (!argp)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!capable(CAP_SYS_RAWIO))
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
+ if (copy_from_user
|
|
|
+ (&iocommand, argp, sizeof(IOCTL_Command_struct)))
|
|
|
+ return -EFAULT;
|
|
|
+ if ((iocommand.buf_size < 1) &&
|
|
|
+ (iocommand.Request.Type.Direction != XFER_NONE)) {
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ if (iocommand.buf_size > 0) {
|
|
|
+ buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
|
|
|
+ if (buff == NULL)
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
|
|
|
+ /* Copy the data into the buffer we created */
|
|
|
+ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
|
|
|
+ kfree(buff);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ memset(buff, 0, iocommand.buf_size);
|
|
|
+ }
|
|
|
+ c = cmd_special_alloc(h);
|
|
|
+ if (!c) {
|
|
|
+ kfree(buff);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ /* Fill in the command type */
|
|
|
+ c->cmd_type = CMD_IOCTL_PEND;
|
|
|
+ /* Fill in Command Header */
|
|
|
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
|
|
|
+ if (iocommand.buf_size > 0) { /* buffer to fill */
|
|
|
+ c->Header.SGList = 1;
|
|
|
+ c->Header.SGTotal = 1;
|
|
|
+ } else { /* no buffers to fill */
|
|
|
+ c->Header.SGList = 0;
|
|
|
+ c->Header.SGTotal = 0;
|
|
|
+ }
|
|
|
+ c->Header.LUN = iocommand.LUN_info;
|
|
|
+ /* use the kernel address the cmd block for tag */
|
|
|
+ c->Header.Tag.lower = c->busaddr;
|
|
|
+
|
|
|
+ /* Fill in Request block */
|
|
|
+ c->Request = iocommand.Request;
|
|
|
+
|
|
|
+ /* Fill in the scatter gather information */
|
|
|
+ if (iocommand.buf_size > 0) {
|
|
|
+ temp64.val = pci_map_single(h->pdev, buff,
|
|
|
+ iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
|
|
|
+ c->SG[0].Addr.lower = temp64.val32.lower;
|
|
|
+ c->SG[0].Addr.upper = temp64.val32.upper;
|
|
|
+ c->SG[0].Len = iocommand.buf_size;
|
|
|
+ c->SG[0].Ext = 0; /* we are not chaining */
|
|
|
+ }
|
|
|
+ c->waiting = &wait;
|
|
|
+
|
|
|
+ enqueue_cmd_and_start_io(h, c);
|
|
|
+ wait_for_completion(&wait);
|
|
|
+
|
|
|
+ /* unlock the buffers from DMA */
|
|
|
+ temp64.val32.lower = c->SG[0].Addr.lower;
|
|
|
+ temp64.val32.upper = c->SG[0].Addr.upper;
|
|
|
+ pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size,
|
|
|
+ PCI_DMA_BIDIRECTIONAL);
|
|
|
+ check_ioctl_unit_attention(h, c);
|
|
|
+
|
|
|
+ /* Copy the error information out */
|
|
|
+ iocommand.error_info = *(c->err_info);
|
|
|
+ if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
|
|
|
+ kfree(buff);
|
|
|
+ cmd_special_free(h, c);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (iocommand.Request.Type.Direction == XFER_READ) {
|
|
|
+ /* Copy the data out of the buffer we created */
|
|
|
+ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
|
|
|
+ kfree(buff);
|
|
|
+ cmd_special_free(h, c);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ kfree(buff);
|
|
|
+ cmd_special_free(h, c);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
{
|
|
@@ -1433,117 +1533,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
case CCISS_GETLUNINFO:
|
|
|
return cciss_getluninfo(h, disk, argp);
|
|
|
case CCISS_PASSTHRU:
|
|
|
- {
|
|
|
- IOCTL_Command_struct iocommand;
|
|
|
- CommandList_struct *c;
|
|
|
- char *buff = NULL;
|
|
|
- u64bit temp64;
|
|
|
- DECLARE_COMPLETION_ONSTACK(wait);
|
|
|
-
|
|
|
- if (!arg)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (!capable(CAP_SYS_RAWIO))
|
|
|
- return -EPERM;
|
|
|
-
|
|
|
- if (copy_from_user
|
|
|
- (&iocommand, argp, sizeof(IOCTL_Command_struct)))
|
|
|
- return -EFAULT;
|
|
|
- if ((iocommand.buf_size < 1) &&
|
|
|
- (iocommand.Request.Type.Direction != XFER_NONE)) {
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
|
|
|
- /* Check kmalloc limits */
|
|
|
- if (iocommand.buf_size > 128000)
|
|
|
- return -EINVAL;
|
|
|
-#endif
|
|
|
- if (iocommand.buf_size > 0) {
|
|
|
- buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
|
|
|
- if (buff == NULL)
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
|
|
|
- /* Copy the data into the buffer we created */
|
|
|
- if (copy_from_user
|
|
|
- (buff, iocommand.buf, iocommand.buf_size)) {
|
|
|
- kfree(buff);
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- } else {
|
|
|
- memset(buff, 0, iocommand.buf_size);
|
|
|
- }
|
|
|
- c = cmd_special_alloc(h);
|
|
|
- if (!c) {
|
|
|
- kfree(buff);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- /* Fill in the command type */
|
|
|
- c->cmd_type = CMD_IOCTL_PEND;
|
|
|
- /* Fill in Command Header */
|
|
|
- c->Header.ReplyQueue = 0; /* unused in simple mode */
|
|
|
- if (iocommand.buf_size > 0) /* buffer to fill */
|
|
|
- {
|
|
|
- c->Header.SGList = 1;
|
|
|
- c->Header.SGTotal = 1;
|
|
|
- } else /* no buffers to fill */
|
|
|
- {
|
|
|
- c->Header.SGList = 0;
|
|
|
- c->Header.SGTotal = 0;
|
|
|
- }
|
|
|
- c->Header.LUN = iocommand.LUN_info;
|
|
|
- /* use the kernel address the cmd block for tag */
|
|
|
- c->Header.Tag.lower = c->busaddr;
|
|
|
-
|
|
|
- /* Fill in Request block */
|
|
|
- c->Request = iocommand.Request;
|
|
|
-
|
|
|
- /* Fill in the scatter gather information */
|
|
|
- if (iocommand.buf_size > 0) {
|
|
|
- temp64.val = pci_map_single(h->pdev, buff,
|
|
|
- iocommand.buf_size,
|
|
|
- PCI_DMA_BIDIRECTIONAL);
|
|
|
- c->SG[0].Addr.lower = temp64.val32.lower;
|
|
|
- c->SG[0].Addr.upper = temp64.val32.upper;
|
|
|
- c->SG[0].Len = iocommand.buf_size;
|
|
|
- c->SG[0].Ext = 0; /* we are not chaining */
|
|
|
- }
|
|
|
- c->waiting = &wait;
|
|
|
-
|
|
|
- enqueue_cmd_and_start_io(h, c);
|
|
|
- wait_for_completion(&wait);
|
|
|
-
|
|
|
- /* unlock the buffers from DMA */
|
|
|
- temp64.val32.lower = c->SG[0].Addr.lower;
|
|
|
- temp64.val32.upper = c->SG[0].Addr.upper;
|
|
|
- pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
|
|
|
- iocommand.buf_size,
|
|
|
- PCI_DMA_BIDIRECTIONAL);
|
|
|
-
|
|
|
- check_ioctl_unit_attention(h, c);
|
|
|
-
|
|
|
- /* Copy the error information out */
|
|
|
- iocommand.error_info = *(c->err_info);
|
|
|
- if (copy_to_user
|
|
|
- (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
|
|
|
- kfree(buff);
|
|
|
- cmd_special_free(h, c);
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
-
|
|
|
- if (iocommand.Request.Type.Direction == XFER_READ) {
|
|
|
- /* Copy the data out of the buffer we created */
|
|
|
- if (copy_to_user
|
|
|
- (iocommand.buf, buff, iocommand.buf_size)) {
|
|
|
- kfree(buff);
|
|
|
- cmd_special_free(h, c);
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- }
|
|
|
- kfree(buff);
|
|
|
- cmd_special_free(h, c);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ return cciss_passthru(h, argp);
|
|
|
case CCISS_BIG_PASSTHRU:{
|
|
|
BIG_IOCTL_Command_struct *ioc;
|
|
|
CommandList_struct *c;
|