|
@@ -35,9 +35,6 @@
|
|
|
|
|
|
#include "drmP.h"
|
|
|
|
|
|
-static int drm_lock_transfer(drm_device_t * dev,
|
|
|
- __volatile__ unsigned int *lock,
|
|
|
- unsigned int context);
|
|
|
static int drm_notifier(void *priv);
|
|
|
|
|
|
/**
|
|
@@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|
|
return -EINVAL;
|
|
|
|
|
|
add_wait_queue(&dev->lock.lock_queue, &entry);
|
|
|
+ spin_lock(&dev->lock.spinlock);
|
|
|
+ dev->lock.user_waiters++;
|
|
|
+ spin_unlock(&dev->lock.spinlock);
|
|
|
for (;;) {
|
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
|
if (!dev->lock.hw_lock) {
|
|
@@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|
|
ret = -EINTR;
|
|
|
break;
|
|
|
}
|
|
|
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
|
|
|
+ if (drm_lock_take(&dev->lock, lock.context)) {
|
|
|
dev->lock.filp = filp;
|
|
|
dev->lock.lock_time = jiffies;
|
|
|
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
|
@@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
+ spin_lock(&dev->lock.spinlock);
|
|
|
+ dev->lock.user_waiters--;
|
|
|
+ spin_unlock(&dev->lock.spinlock);
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
remove_wait_queue(&dev->lock.lock_queue, &entry);
|
|
|
|
|
|
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
+ DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
|
|
+ if (ret) return ret;
|
|
|
|
|
|
sigemptyset(&dev->sigmask);
|
|
|
sigaddset(&dev->sigmask, SIGSTOP);
|
|
@@ -127,14 +129,12 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* dev->driver->kernel_context_switch isn't used by any of the x86
|
|
|
- * drivers but is used by the Sparc driver.
|
|
|
- */
|
|
|
if (dev->driver->kernel_context_switch &&
|
|
|
dev->last_context != lock.context) {
|
|
|
dev->driver->kernel_context_switch(dev, dev->last_context,
|
|
|
lock.context);
|
|
|
}
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -184,12 +184,8 @@ int drm_unlock(struct inode *inode, struct file *filp,
|
|
|
if (dev->driver->kernel_context_switch_unlock)
|
|
|
dev->driver->kernel_context_switch_unlock(dev);
|
|
|
else {
|
|
|
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
|
|
|
- DRM_KERNEL_CONTEXT);
|
|
|
-
|
|
|
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
|
|
|
- DRM_KERNEL_CONTEXT)) {
|
|
|
- DRM_ERROR("\n");
|
|
|
+ if (drm_lock_free(&dev->lock,lock.context)) {
|
|
|
+ /* FIXME: Should really bail out here. */
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -206,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
|
|
|
*
|
|
|
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
|
|
|
*/
|
|
|
-int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|
|
+int drm_lock_take(drm_lock_data_t *lock_data,
|
|
|
+ unsigned int context)
|
|
|
{
|
|
|
unsigned int old, new, prev;
|
|
|
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
|
|
|
|
|
+ spin_lock(&lock_data->spinlock);
|
|
|
do {
|
|
|
old = *lock;
|
|
|
if (old & _DRM_LOCK_HELD)
|
|
|
new = old | _DRM_LOCK_CONT;
|
|
|
- else
|
|
|
- new = context | _DRM_LOCK_HELD;
|
|
|
+ else {
|
|
|
+ new = context | _DRM_LOCK_HELD |
|
|
|
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
|
|
|
+ _DRM_LOCK_CONT : 0);
|
|
|
+ }
|
|
|
prev = cmpxchg(lock, old, new);
|
|
|
} while (prev != old);
|
|
|
+ spin_unlock(&lock_data->spinlock);
|
|
|
+
|
|
|
if (_DRM_LOCKING_CONTEXT(old) == context) {
|
|
|
if (old & _DRM_LOCK_HELD) {
|
|
|
if (context != DRM_KERNEL_CONTEXT) {
|
|
@@ -227,7 +231,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|
|
return 0;
|
|
|
}
|
|
|
}
|
|
|
- if (new == (context | _DRM_LOCK_HELD)) {
|
|
|
+
|
|
|
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
|
|
|
/* Have lock */
|
|
|
return 1;
|
|
|
}
|
|
@@ -246,13 +251,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|
|
* Resets the lock file pointer.
|
|
|
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
|
|
|
*/
|
|
|
-static int drm_lock_transfer(drm_device_t * dev,
|
|
|
- __volatile__ unsigned int *lock,
|
|
|
+static int drm_lock_transfer(drm_lock_data_t *lock_data,
|
|
|
unsigned int context)
|
|
|
{
|
|
|
unsigned int old, new, prev;
|
|
|
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
|
|
|
|
|
- dev->lock.filp = NULL;
|
|
|
+ lock_data->filp = NULL;
|
|
|
do {
|
|
|
old = *lock;
|
|
|
new = context | _DRM_LOCK_HELD;
|
|
@@ -272,23 +277,32 @@ static int drm_lock_transfer(drm_device_t * dev,
|
|
|
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
|
|
|
* waiting on the lock queue.
|
|
|
*/
|
|
|
-int drm_lock_free(drm_device_t * dev,
|
|
|
- __volatile__ unsigned int *lock, unsigned int context)
|
|
|
+int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
|
|
|
{
|
|
|
unsigned int old, new, prev;
|
|
|
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
|
|
+
|
|
|
+ spin_lock(&lock_data->spinlock);
|
|
|
+ if (lock_data->kernel_waiters != 0) {
|
|
|
+ drm_lock_transfer(lock_data, 0);
|
|
|
+ lock_data->idle_has_lock = 1;
|
|
|
+ spin_unlock(&lock_data->spinlock);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ spin_unlock(&lock_data->spinlock);
|
|
|
|
|
|
- dev->lock.filp = NULL;
|
|
|
do {
|
|
|
old = *lock;
|
|
|
- new = 0;
|
|
|
+ new = _DRM_LOCKING_CONTEXT(old);
|
|
|
prev = cmpxchg(lock, old, new);
|
|
|
} while (prev != old);
|
|
|
+
|
|
|
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
|
|
|
DRM_ERROR("%d freed heavyweight lock held by %d\n",
|
|
|
context, _DRM_LOCKING_CONTEXT(old));
|
|
|
return 1;
|
|
|
}
|
|
|
- wake_up_interruptible(&dev->lock.lock_queue);
|
|
|
+ wake_up_interruptible(&lock_data->lock_queue);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -322,3 +336,67 @@ static int drm_notifier(void *priv)
|
|
|
} while (prev != old);
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * This function returns immediately and takes the hw lock
|
|
|
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
|
|
|
+ * it is eventually released.
|
|
|
+ *
|
|
|
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
|
|
|
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
|
|
|
+ * a deadlock, which is why the "idlelock" was invented).
|
|
|
+ *
|
|
|
+ * This should be sufficient to wait for GPU idle without
|
|
|
+ * having to worry about starvation.
|
|
|
+ */
|
|
|
+
|
|
|
+void drm_idlelock_take(drm_lock_data_t *lock_data)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ spin_lock(&lock_data->spinlock);
|
|
|
+ lock_data->kernel_waiters++;
|
|
|
+ if (!lock_data->idle_has_lock) {
|
|
|
+
|
|
|
+ spin_unlock(&lock_data->spinlock);
|
|
|
+ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
|
|
|
+ spin_lock(&lock_data->spinlock);
|
|
|
+
|
|
|
+ if (ret == 1)
|
|
|
+ lock_data->idle_has_lock = 1;
|
|
|
+ }
|
|
|
+ spin_unlock(&lock_data->spinlock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(drm_idlelock_take);
|
|
|
+
|
|
|
+void drm_idlelock_release(drm_lock_data_t *lock_data)
|
|
|
+{
|
|
|
+ unsigned int old, prev;
|
|
|
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
|
|
|
+
|
|
|
+ spin_lock(&lock_data->spinlock);
|
|
|
+ if (--lock_data->kernel_waiters == 0) {
|
|
|
+ if (lock_data->idle_has_lock) {
|
|
|
+ do {
|
|
|
+ old = *lock;
|
|
|
+ prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
|
|
|
+ } while (prev != old);
|
|
|
+ wake_up_interruptible(&lock_data->lock_queue);
|
|
|
+ lock_data->idle_has_lock = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock(&lock_data->spinlock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(drm_idlelock_release);
|
|
|
+
|
|
|
+
|
|
|
+int drm_i_have_hw_lock(struct file *filp)
|
|
|
+{
|
|
|
+ DRM_DEVICE;
|
|
|
+
|
|
|
+ return (priv->lock_count && dev->lock.hw_lock &&
|
|
|
+ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
|
|
+ dev->lock.filp == filp);
|
|
|
+}
|
|
|
+
|
|
|
+EXPORT_SYMBOL(drm_i_have_hw_lock);
|