|
@@ -808,13 +808,11 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_helper_resume_force_mode);
|
|
|
|
|
|
-static struct slow_work_ops output_poll_ops;
|
|
|
-
|
|
|
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
|
|
|
-static void output_poll_execute(struct slow_work *work)
|
|
|
+static void output_poll_execute(struct work_struct *work)
|
|
|
{
|
|
|
- struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
|
|
|
- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
|
|
|
+ struct delayed_work *delayed_work = to_delayed_work(work);
|
|
|
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
|
|
|
struct drm_connector *connector;
|
|
|
enum drm_connector_status old_status, status;
|
|
|
bool repoll = false, changed = false;
|
|
@@ -854,7 +852,7 @@ static void output_poll_execute(struct slow_work *work)
|
|
|
}
|
|
|
|
|
|
if (repoll) {
|
|
|
- ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
|
|
|
+ ret = queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
|
|
|
if (ret)
|
|
|
DRM_ERROR("delayed enqueue failed %d\n", ret);
|
|
|
}
|
|
@@ -864,7 +862,7 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
|
|
|
{
|
|
|
if (!dev->mode_config.poll_enabled)
|
|
|
return;
|
|
|
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
|
|
|
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
|
|
|
|
|
@@ -880,7 +878,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
|
|
|
}
|
|
|
|
|
|
if (poll) {
|
|
|
- ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
|
|
|
+ ret = queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
|
|
|
if (ret)
|
|
|
DRM_ERROR("delayed enqueue failed %d\n", ret);
|
|
|
}
|
|
@@ -889,9 +887,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_enable);
|
|
|
|
|
|
void drm_kms_helper_poll_init(struct drm_device *dev)
|
|
|
{
|
|
|
- slow_work_register_user(THIS_MODULE);
|
|
|
- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
|
|
|
- &output_poll_ops);
|
|
|
+ INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
|
|
|
dev->mode_config.poll_enabled = true;
|
|
|
|
|
|
drm_kms_helper_poll_enable(dev);
|
|
@@ -901,7 +897,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
|
|
|
void drm_kms_helper_poll_fini(struct drm_device *dev)
|
|
|
{
|
|
|
drm_kms_helper_poll_disable(dev);
|
|
|
- slow_work_unregister_user(THIS_MODULE);
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
|
|
|
|
|
@@ -909,12 +904,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
|
|
|
{
|
|
|
if (!dev->mode_config.poll_enabled)
|
|
|
return;
|
|
|
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
|
|
|
- /* schedule a slow work asap */
|
|
|
- delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
|
|
|
+ /* kill timer and schedule immediate execution, this doesn't block */
|
|
|
+ cancel_delayed_work(&dev->mode_config.output_poll_work);
|
|
|
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
|
|
|
}
|
|
|
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
|
|
|
-
|
|
|
-static struct slow_work_ops output_poll_ops = {
|
|
|
- .execute = output_poll_execute,
|
|
|
-};
|