|
@@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
|
|
|
cdev->id.dev_model != cdev->private->senseid.dev_model ||
|
|
|
cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_do_unreg_rereg, cdev);
|
|
|
+ ccw_device_do_unreg_rereg);
|
|
|
queue_work(ccw_device_work, &cdev->private->kick_work);
|
|
|
return 0;
|
|
|
}
|
|
@@ -329,19 +329,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-ccw_device_oper_notify(void *data)
|
|
|
+ccw_device_oper_notify(struct work_struct *work)
|
|
|
{
|
|
|
+ struct ccw_device_private *priv;
|
|
|
struct ccw_device *cdev;
|
|
|
struct subchannel *sch;
|
|
|
int ret;
|
|
|
|
|
|
- cdev = data;
|
|
|
+ priv = container_of(work, struct ccw_device_private, kick_work);
|
|
|
+ cdev = priv->cdev;
|
|
|
sch = to_subchannel(cdev->dev.parent);
|
|
|
ret = (sch->driver && sch->driver->notify) ?
|
|
|
sch->driver->notify(&sch->dev, CIO_OPER) : 0;
|
|
|
if (!ret)
|
|
|
/* Driver doesn't want device back. */
|
|
|
- ccw_device_do_unreg_rereg(cdev);
|
|
|
+ ccw_device_do_unreg_rereg(work);
|
|
|
else {
|
|
|
/* Reenable channel measurements, if needed. */
|
|
|
cmf_reenable(cdev);
|
|
@@ -377,8 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
|
|
|
|
|
|
if (cdev->private->flags.donotify) {
|
|
|
cdev->private->flags.donotify = 0;
|
|
|
- PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
|
|
|
- cdev);
|
|
|
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
|
|
|
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
|
|
}
|
|
|
wake_up(&cdev->private->wait_q);
|
|
@@ -528,13 +529,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
|
|
|
|
|
|
|
|
|
static void
|
|
|
-ccw_device_nopath_notify(void *data)
|
|
|
+ccw_device_nopath_notify(struct work_struct *work)
|
|
|
{
|
|
|
+ struct ccw_device_private *priv;
|
|
|
struct ccw_device *cdev;
|
|
|
struct subchannel *sch;
|
|
|
int ret;
|
|
|
|
|
|
- cdev = data;
|
|
|
+ priv = container_of(work, struct ccw_device_private, kick_work);
|
|
|
+ cdev = priv->cdev;
|
|
|
sch = to_subchannel(cdev->dev.parent);
|
|
|
/* Extra sanity. */
|
|
|
if (sch->lpm)
|
|
@@ -547,8 +550,7 @@ ccw_device_nopath_notify(void *data)
|
|
|
cio_disable_subchannel(sch);
|
|
|
if (get_device(&cdev->dev)) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_call_sch_unregister,
|
|
|
- cdev);
|
|
|
+ ccw_device_call_sch_unregister);
|
|
|
queue_work(ccw_device_work,
|
|
|
&cdev->private->kick_work);
|
|
|
} else
|
|
@@ -607,7 +609,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
|
|
|
/* Reset oper notify indication after verify error. */
|
|
|
cdev->private->flags.donotify = 0;
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_nopath_notify, cdev);
|
|
|
+ ccw_device_nopath_notify);
|
|
|
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
|
|
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
|
|
|
break;
|
|
@@ -738,7 +740,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
|
|
|
sch = to_subchannel(cdev->dev.parent);
|
|
|
if (get_device(&cdev->dev)) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_call_sch_unregister, cdev);
|
|
|
+ ccw_device_call_sch_unregister);
|
|
|
queue_work(ccw_device_work, &cdev->private->kick_work);
|
|
|
}
|
|
|
wake_up(&cdev->private->wait_q);
|
|
@@ -769,7 +771,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
|
|
|
}
|
|
|
if (get_device(&cdev->dev)) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_call_sch_unregister, cdev);
|
|
|
+ ccw_device_call_sch_unregister);
|
|
|
queue_work(ccw_device_work, &cdev->private->kick_work);
|
|
|
}
|
|
|
wake_up(&cdev->private->wait_q);
|
|
@@ -874,7 +876,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
|
|
|
sch = to_subchannel(cdev->dev.parent);
|
|
|
if (!sch->lpm) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_nopath_notify, cdev);
|
|
|
+ ccw_device_nopath_notify);
|
|
|
queue_work(ccw_device_notify_work,
|
|
|
&cdev->private->kick_work);
|
|
|
} else
|
|
@@ -969,7 +971,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
|
|
|
ERR_PTR(-EIO));
|
|
|
if (!sch->lpm) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_nopath_notify, cdev);
|
|
|
+ ccw_device_nopath_notify);
|
|
|
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
|
|
} else if (cdev->private->flags.doverify)
|
|
|
/* Start delayed path verification. */
|
|
@@ -992,7 +994,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
|
|
|
sch = to_subchannel(cdev->dev.parent);
|
|
|
if (!sch->lpm) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_nopath_notify, cdev);
|
|
|
+ ccw_device_nopath_notify);
|
|
|
queue_work(ccw_device_notify_work,
|
|
|
&cdev->private->kick_work);
|
|
|
} else
|
|
@@ -1021,7 +1023,7 @@ void device_kill_io(struct subchannel *sch)
|
|
|
if (ret == -ENODEV) {
|
|
|
if (!sch->lpm) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_nopath_notify, cdev);
|
|
|
+ ccw_device_nopath_notify);
|
|
|
queue_work(ccw_device_notify_work,
|
|
|
&cdev->private->kick_work);
|
|
|
} else
|
|
@@ -1033,7 +1035,7 @@ void device_kill_io(struct subchannel *sch)
|
|
|
ERR_PTR(-EIO));
|
|
|
if (!sch->lpm) {
|
|
|
PREPARE_WORK(&cdev->private->kick_work,
|
|
|
- ccw_device_nopath_notify, cdev);
|
|
|
+ ccw_device_nopath_notify);
|
|
|
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
|
|
|
} else
|
|
|
/* Start delayed path verification. */
|