|
@@ -93,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
|
|
|
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
|
|
|
static int fast_fail = 1;
|
|
|
static int client_reserve = 1;
|
|
|
+static char partition_name[97] = "UNKNOWN";
|
|
|
+static unsigned int partition_number = -1;
|
|
|
|
|
|
static struct scsi_transport_template *ibmvscsi_transport_template;
|
|
|
|
|
|
#define IBMVSCSI_VERSION "1.5.9"
|
|
|
|
|
|
-static struct ibmvscsi_ops *ibmvscsi_ops;
|
|
|
-
|
|
|
MODULE_DESCRIPTION("IBM Virtual SCSI");
|
|
|
MODULE_AUTHOR("Dave Boutcher");
|
|
|
MODULE_LICENSE("GPL");
|
|
@@ -118,6 +118,316 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
|
|
|
module_param_named(client_reserve, client_reserve, int, S_IRUGO );
|
|
|
MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
|
|
|
|
|
|
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
|
|
|
+ struct ibmvscsi_host_data *hostdata);
|
|
|
+
|
|
|
+/* ------------------------------------------------------------
|
|
|
+ * Routines for managing the command/response queue
|
|
|
+ */
|
|
|
+/**
|
|
|
+ * ibmvscsi_handle_event: - Interrupt handler for crq events
|
|
|
+ * @irq: number of irq to handle, not used
|
|
|
+ * @dev_instance: ibmvscsi_host_data of host that received interrupt
|
|
|
+ *
|
|
|
+ * Disables interrupts and schedules srp_task
|
|
|
+ * Always returns IRQ_HANDLED
|
|
|
+ */
|
|
|
+static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
|
|
|
+{
|
|
|
+ struct ibmvscsi_host_data *hostdata =
|
|
|
+ (struct ibmvscsi_host_data *)dev_instance;
|
|
|
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
|
|
|
+ tasklet_schedule(&hostdata->srp_task);
|
|
|
+ return IRQ_HANDLED;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * release_crq_queue: - Deallocates data and unregisters CRQ
|
|
|
+ * @queue: crq_queue to initialize and register
|
|
|
+ * @host_data: ibmvscsi_host_data of host
|
|
|
+ *
|
|
|
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
|
|
|
+ * the crq with the hypervisor.
|
|
|
+ */
|
|
|
+static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
|
|
|
+ struct ibmvscsi_host_data *hostdata,
|
|
|
+ int max_requests)
|
|
|
+{
|
|
|
+ long rc = 0;
|
|
|
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
|
|
+ free_irq(vdev->irq, (void *)hostdata);
|
|
|
+ tasklet_kill(&hostdata->srp_task);
|
|
|
+ do {
|
|
|
+ if (rc)
|
|
|
+ msleep(100);
|
|
|
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
|
|
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
|
|
+ dma_unmap_single(hostdata->dev,
|
|
|
+ queue->msg_token,
|
|
|
+ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
|
|
|
+ free_page((unsigned long)queue->msgs);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * crq_queue_next_crq: - Returns the next entry in message queue
|
|
|
+ * @queue: crq_queue to use
|
|
|
+ *
|
|
|
+ * Returns pointer to next entry in queue, or NULL if there are no new
|
|
|
+ * entried in the CRQ.
|
|
|
+ */
|
|
|
+static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
|
|
|
+{
|
|
|
+ struct viosrp_crq *crq;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&queue->lock, flags);
|
|
|
+ crq = &queue->msgs[queue->cur];
|
|
|
+ if (crq->valid & 0x80) {
|
|
|
+ if (++queue->cur == queue->size)
|
|
|
+ queue->cur = 0;
|
|
|
+ } else
|
|
|
+ crq = NULL;
|
|
|
+ spin_unlock_irqrestore(&queue->lock, flags);
|
|
|
+
|
|
|
+ return crq;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ibmvscsi_send_crq: - Send a CRQ
|
|
|
+ * @hostdata: the adapter
|
|
|
+ * @word1: the first 64 bits of the data
|
|
|
+ * @word2: the second 64 bits of the data
|
|
|
+ */
|
|
|
+static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
|
|
|
+ u64 word1, u64 word2)
|
|
|
+{
|
|
|
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
|
|
+
|
|
|
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ibmvscsi_task: - Process srps asynchronously
|
|
|
+ * @data: ibmvscsi_host_data of host
|
|
|
+ */
|
|
|
+static void ibmvscsi_task(void *data)
|
|
|
+{
|
|
|
+ struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
|
|
|
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
|
|
+ struct viosrp_crq *crq;
|
|
|
+ int done = 0;
|
|
|
+
|
|
|
+ while (!done) {
|
|
|
+ /* Pull all the valid messages off the CRQ */
|
|
|
+ while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
|
|
|
+ ibmvscsi_handle_crq(crq, hostdata);
|
|
|
+ crq->valid = 0x00;
|
|
|
+ }
|
|
|
+
|
|
|
+ vio_enable_interrupts(vdev);
|
|
|
+ crq = crq_queue_next_crq(&hostdata->queue);
|
|
|
+ if (crq != NULL) {
|
|
|
+ vio_disable_interrupts(vdev);
|
|
|
+ ibmvscsi_handle_crq(crq, hostdata);
|
|
|
+ crq->valid = 0x00;
|
|
|
+ } else {
|
|
|
+ done = 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void gather_partition_info(void)
|
|
|
+{
|
|
|
+ struct device_node *rootdn;
|
|
|
+
|
|
|
+ const char *ppartition_name;
|
|
|
+ const unsigned int *p_number_ptr;
|
|
|
+
|
|
|
+ /* Retrieve information about this partition */
|
|
|
+ rootdn = of_find_node_by_path("/");
|
|
|
+ if (!rootdn) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
|
|
|
+ if (ppartition_name)
|
|
|
+ strncpy(partition_name, ppartition_name,
|
|
|
+ sizeof(partition_name));
|
|
|
+ p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
|
|
|
+ if (p_number_ptr)
|
|
|
+ partition_number = *p_number_ptr;
|
|
|
+ of_node_put(rootdn);
|
|
|
+}
|
|
|
+
|
|
|
+static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
|
|
|
+{
|
|
|
+ memset(&hostdata->madapter_info, 0x00,
|
|
|
+ sizeof(hostdata->madapter_info));
|
|
|
+
|
|
|
+ dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
|
|
|
+ strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
|
|
|
+
|
|
|
+ strncpy(hostdata->madapter_info.partition_name, partition_name,
|
|
|
+ sizeof(hostdata->madapter_info.partition_name));
|
|
|
+
|
|
|
+ hostdata->madapter_info.partition_number = partition_number;
|
|
|
+
|
|
|
+ hostdata->madapter_info.mad_version = 1;
|
|
|
+ hostdata->madapter_info.os_type = 2;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * reset_crq_queue: - resets a crq after a failure
|
|
|
+ * @queue: crq_queue to initialize and register
|
|
|
+ * @hostdata: ibmvscsi_host_data of host
|
|
|
+ *
|
|
|
+ */
|
|
|
+static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
|
|
|
+ struct ibmvscsi_host_data *hostdata)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
|
|
+
|
|
|
+ /* Close the CRQ */
|
|
|
+ do {
|
|
|
+ if (rc)
|
|
|
+ msleep(100);
|
|
|
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
|
|
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
|
|
+
|
|
|
+ /* Clean out the queue */
|
|
|
+ memset(queue->msgs, 0x00, PAGE_SIZE);
|
|
|
+ queue->cur = 0;
|
|
|
+
|
|
|
+ set_adapter_info(hostdata);
|
|
|
+
|
|
|
+ /* And re-open it again */
|
|
|
+ rc = plpar_hcall_norets(H_REG_CRQ,
|
|
|
+ vdev->unit_address,
|
|
|
+ queue->msg_token, PAGE_SIZE);
|
|
|
+ if (rc == 2) {
|
|
|
+ /* Adapter is good, but other end is not ready */
|
|
|
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
|
|
|
+ } else if (rc != 0) {
|
|
|
+ dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
|
|
|
+ }
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
|
|
|
+ * @queue: crq_queue to initialize and register
|
|
|
+ * @hostdata: ibmvscsi_host_data of host
|
|
|
+ *
|
|
|
+ * Allocates a page for messages, maps it for dma, and registers
|
|
|
+ * the crq with the hypervisor.
|
|
|
+ * Returns zero on success.
|
|
|
+ */
|
|
|
+static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
|
|
|
+ struct ibmvscsi_host_data *hostdata,
|
|
|
+ int max_requests)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ int retrc;
|
|
|
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
|
|
+
|
|
|
+ queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
|
|
|
+
|
|
|
+ if (!queue->msgs)
|
|
|
+ goto malloc_failed;
|
|
|
+ queue->size = PAGE_SIZE / sizeof(*queue->msgs);
|
|
|
+
|
|
|
+ queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
|
|
|
+ queue->size * sizeof(*queue->msgs),
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+
|
|
|
+ if (dma_mapping_error(hostdata->dev, queue->msg_token))
|
|
|
+ goto map_failed;
|
|
|
+
|
|
|
+ gather_partition_info();
|
|
|
+ set_adapter_info(hostdata);
|
|
|
+
|
|
|
+ retrc = rc = plpar_hcall_norets(H_REG_CRQ,
|
|
|
+ vdev->unit_address,
|
|
|
+ queue->msg_token, PAGE_SIZE);
|
|
|
+ if (rc == H_RESOURCE)
|
|
|
+ /* maybe kexecing and resource is busy. try a reset */
|
|
|
+ rc = ibmvscsi_reset_crq_queue(queue,
|
|
|
+ hostdata);
|
|
|
+
|
|
|
+ if (rc == 2) {
|
|
|
+ /* Adapter is good, but other end is not ready */
|
|
|
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
|
|
|
+ retrc = 0;
|
|
|
+ } else if (rc != 0) {
|
|
|
+ dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
|
|
|
+ goto reg_crq_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ queue->cur = 0;
|
|
|
+ spin_lock_init(&queue->lock);
|
|
|
+
|
|
|
+ tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
|
|
|
+ (unsigned long)hostdata);
|
|
|
+
|
|
|
+ if (request_irq(vdev->irq,
|
|
|
+ ibmvscsi_handle_event,
|
|
|
+ 0, "ibmvscsi", (void *)hostdata) != 0) {
|
|
|
+ dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
|
|
|
+ vdev->irq);
|
|
|
+ goto req_irq_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ rc = vio_enable_interrupts(vdev);
|
|
|
+ if (rc != 0) {
|
|
|
+ dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
|
|
|
+ goto req_irq_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ return retrc;
|
|
|
+
|
|
|
+ req_irq_failed:
|
|
|
+ tasklet_kill(&hostdata->srp_task);
|
|
|
+ rc = 0;
|
|
|
+ do {
|
|
|
+ if (rc)
|
|
|
+ msleep(100);
|
|
|
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
|
|
|
+ } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
|
|
+ reg_crq_failed:
|
|
|
+ dma_unmap_single(hostdata->dev,
|
|
|
+ queue->msg_token,
|
|
|
+ queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
|
|
|
+ map_failed:
|
|
|
+ free_page((unsigned long)queue->msgs);
|
|
|
+ malloc_failed:
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * reenable_crq_queue: - reenables a crq after
|
|
|
+ * @queue: crq_queue to initialize and register
|
|
|
+ * @hostdata: ibmvscsi_host_data of host
|
|
|
+ *
|
|
|
+ */
|
|
|
+static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
|
|
|
+ struct ibmvscsi_host_data *hostdata)
|
|
|
+{
|
|
|
+ int rc = 0;
|
|
|
+ struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
|
|
+
|
|
|
+ /* Re-enable the CRQ */
|
|
|
+ do {
|
|
|
+ if (rc)
|
|
|
+ msleep(100);
|
|
|
+ rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
|
|
|
+ } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
|
|
|
+
|
|
|
+ if (rc)
|
|
|
+ dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
/* ------------------------------------------------------------
|
|
|
* Routines for the event pool and event structs
|
|
|
*/
|
|
@@ -611,7 +921,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
|
|
}
|
|
|
|
|
|
if ((rc =
|
|
|
- ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
|
|
|
+ ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
|
|
|
list_del(&evt_struct->list);
|
|
|
del_timer(&evt_struct->timer);
|
|
|
|
|
@@ -1420,8 +1730,8 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
|
|
|
* @hostdata: ibmvscsi_host_data of host
|
|
|
*
|
|
|
*/
|
|
|
-void ibmvscsi_handle_crq(struct viosrp_crq *crq,
|
|
|
- struct ibmvscsi_host_data *hostdata)
|
|
|
+static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
|
|
|
+ struct ibmvscsi_host_data *hostdata)
|
|
|
{
|
|
|
long rc;
|
|
|
unsigned long flags;
|
|
@@ -1433,8 +1743,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
|
|
|
case 0x01: /* Initialization message */
|
|
|
dev_info(hostdata->dev, "partner initialized\n");
|
|
|
/* Send back a response */
|
|
|
- if ((rc = ibmvscsi_ops->send_crq(hostdata,
|
|
|
- 0xC002000000000000LL, 0)) == 0) {
|
|
|
+ rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
|
|
|
+ if (rc == 0) {
|
|
|
/* Now login */
|
|
|
init_adapter(hostdata);
|
|
|
} else {
|
|
@@ -1840,17 +2150,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
|
|
|
smp_rmb();
|
|
|
hostdata->reset_crq = 0;
|
|
|
|
|
|
- rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata);
|
|
|
+ rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
|
|
|
if (!rc)
|
|
|
- rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
|
|
|
+ rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
|
|
|
vio_enable_interrupts(to_vio_dev(hostdata->dev));
|
|
|
} else if (hostdata->reenable_crq) {
|
|
|
smp_rmb();
|
|
|
action = "enable";
|
|
|
- rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata);
|
|
|
+ rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
|
|
|
hostdata->reenable_crq = 0;
|
|
|
if (!rc)
|
|
|
- rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0);
|
|
|
+ rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
|
|
|
} else
|
|
|
return;
|
|
|
|
|
@@ -1944,7 +2254,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|
|
goto init_crq_failed;
|
|
|
}
|
|
|
|
|
|
- rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
|
|
|
+ rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
|
|
|
if (rc != 0 && rc != H_RESOURCE) {
|
|
|
dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
|
|
|
goto kill_kthread;
|
|
@@ -1974,7 +2284,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|
|
* to fail if the other end is not acive. In that case we don't
|
|
|
* want to scan
|
|
|
*/
|
|
|
- if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0
|
|
|
+ if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
|
|
|
|| rc == H_RESOURCE) {
|
|
|
/*
|
|
|
* Wait around max init_timeout secs for the adapter to finish
|
|
@@ -2002,7 +2312,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|
|
add_host_failed:
|
|
|
release_event_pool(&hostdata->pool, hostdata);
|
|
|
init_pool_failed:
|
|
|
- ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
|
|
|
+ ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
|
|
|
kill_kthread:
|
|
|
kthread_stop(hostdata->work_thread);
|
|
|
init_crq_failed:
|
|
@@ -2018,7 +2328,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
|
|
|
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
|
|
|
unmap_persist_bufs(hostdata);
|
|
|
release_event_pool(&hostdata->pool, hostdata);
|
|
|
- ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
|
|
|
+ ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
|
|
|
max_events);
|
|
|
|
|
|
kthread_stop(hostdata->work_thread);
|
|
@@ -2039,7 +2349,10 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
|
|
|
static int ibmvscsi_resume(struct device *dev)
|
|
|
{
|
|
|
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
|
|
|
- return ibmvscsi_ops->resume(hostdata);
|
|
|
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
|
|
|
+ tasklet_schedule(&hostdata->srp_task);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2076,9 +2389,7 @@ int __init ibmvscsi_module_init(void)
|
|
|
driver_template.can_queue = max_requests;
|
|
|
max_events = max_requests + 2;
|
|
|
|
|
|
- if (firmware_has_feature(FW_FEATURE_VIO))
|
|
|
- ibmvscsi_ops = &rpavscsi_ops;
|
|
|
- else
|
|
|
+ if (!firmware_has_feature(FW_FEATURE_VIO))
|
|
|
return -ENODEV;
|
|
|
|
|
|
ibmvscsi_transport_template =
|