|
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
|
|
atomic_dec(&bo->glob->bo_count);
|
|
|
if (bo->resv == &bo->ttm_resv)
|
|
|
reservation_object_fini(&bo->ttm_resv);
|
|
|
-
|
|
|
+ mutex_destroy(&bo->wu_mutex);
|
|
|
if (bo->destroy)
|
|
|
bo->destroy(bo);
|
|
|
else {
|
|
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
|
|
INIT_LIST_HEAD(&bo->ddestroy);
|
|
|
INIT_LIST_HEAD(&bo->swap);
|
|
|
INIT_LIST_HEAD(&bo->io_reserve_lru);
|
|
|
+ mutex_init(&bo->wu_mutex);
|
|
|
bo->bdev = bdev;
|
|
|
bo->glob = bdev->glob;
|
|
|
bo->type = type;
|
|
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
|
|
;
|
|
|
}
|
|
|
EXPORT_SYMBOL(ttm_bo_swapout_all);
|
|
|
+
|
|
|
+/**
|
|
|
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
|
|
|
+ * unreserved
|
|
|
+ *
|
|
|
+ * @bo: Pointer to buffer
|
|
|
+ */
|
|
|
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * In the absense of a wait_unlocked API,
|
|
|
+ * Use the bo::wu_mutex to avoid triggering livelocks due to
|
|
|
+ * concurrent use of this function. Note that this use of
|
|
|
+ * bo::wu_mutex can go away if we change locking order to
|
|
|
+ * mmap_sem -> bo::reserve.
|
|
|
+ */
|
|
|
+ ret = mutex_lock_interruptible(&bo->wu_mutex);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ return -ERESTARTSYS;
|
|
|
+ if (!ww_mutex_is_locked(&bo->resv->lock))
|
|
|
+ goto out_unlock;
|
|
|
+ ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
|
|
|
+ if (unlikely(ret != 0))
|
|
|
+ goto out_unlock;
|
|
|
+ ww_mutex_unlock(&bo->resv->lock);
|
|
|
+
|
|
|
+out_unlock:
|
|
|
+ mutex_unlock(&bo->wu_mutex);
|
|
|
+ return ret;
|
|
|
+}
|