|
@@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
|
|
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
|
|
|
}
|
|
|
|
|
|
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
|
|
|
+static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
|
|
|
+ bool interruptible)
|
|
|
{
|
|
|
if (interruptible) {
|
|
|
return wait_event_interruptible(bo->event_queue,
|
|
@@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
|
|
|
return 0;
|
|
|
}
|
|
|
}
|
|
|
-EXPORT_SYMBOL(ttm_bo_wait_unreserved);
|
|
|
|
|
|
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
|
|
{
|
|
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
|
|
return put_count;
|
|
|
}
|
|
|
|
|
|
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|
|
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
|
|
|
bool interruptible,
|
|
|
bool no_wait, bool use_sequence, uint32_t sequence)
|
|
|
{
|
|
|
- struct ttm_bo_global *glob = bo->glob;
|
|
|
int ret;
|
|
|
|
|
|
- while (unlikely(atomic_read(&bo->reserved) != 0)) {
|
|
|
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
|
|
|
/**
|
|
|
* Deadlock avoidance for multi-bo reserving.
|
|
|
*/
|
|
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|
|
if (no_wait)
|
|
|
return -EBUSY;
|
|
|
|
|
|
- spin_unlock(&glob->lru_lock);
|
|
|
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
|
|
- spin_lock(&glob->lru_lock);
|
|
|
|
|
|
if (unlikely(ret))
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- atomic_set(&bo->reserved, 1);
|
|
|
if (use_sequence) {
|
|
|
+ bool wake_up = false;
|
|
|
/**
|
|
|
* Wake up waiters that may need to recheck for deadlock,
|
|
|
* if we decreased the sequence number.
|
|
|
*/
|
|
|
if (unlikely((bo->val_seq - sequence < (1 << 31))
|
|
|
|| !bo->seq_valid))
|
|
|
- wake_up_all(&bo->event_queue);
|
|
|
+ wake_up = true;
|
|
|
|
|
|
+ /*
|
|
|
+ * In the worst case with memory ordering these values can be
|
|
|
+ * seen in the wrong order. However since we call wake_up_all
|
|
|
+ * in that case, this will hopefully not pose a problem,
|
|
|
+ * and the worst case would only cause someone to accidentally
|
|
|
+ * hit -EAGAIN in ttm_bo_reserve when they see old value of
|
|
|
+ * val_seq. However this would only happen if seq_valid was
|
|
|
+ * written before val_seq was, and just means some slightly
|
|
|
+ * increased cpu usage
|
|
|
+ */
|
|
|
bo->val_seq = sequence;
|
|
|
bo->seq_valid = true;
|
|
|
+ if (wake_up)
|
|
|
+ wake_up_all(&bo->event_queue);
|
|
|
} else {
|
|
|
bo->seq_valid = false;
|
|
|
}
|
|
@@ -289,17 +298,64 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
|
|
int put_count = 0;
|
|
|
int ret;
|
|
|
|
|
|
- spin_lock(&glob->lru_lock);
|
|
|
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
|
|
|
- sequence);
|
|
|
- if (likely(ret == 0))
|
|
|
+ ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
|
|
|
+ sequence);
|
|
|
+ if (likely(ret == 0)) {
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
|
- spin_unlock(&glob->lru_lock);
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
+ ttm_bo_list_ref_sub(bo, put_count, true);
|
|
|
+ }
|
|
|
|
|
|
- ttm_bo_list_ref_sub(bo, put_count, true);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
|
|
|
+ bool interruptible, uint32_t sequence)
|
|
|
+{
|
|
|
+ bool wake_up = false;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
|
|
|
+ WARN_ON(bo->seq_valid && sequence == bo->val_seq);
|
|
|
+
|
|
|
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
|
|
|
|
|
|
+ if (unlikely(ret))
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
|
|
|
+ wake_up = true;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Wake up waiters that may need to recheck for deadlock,
|
|
|
+ * if we decreased the sequence number.
|
|
|
+ */
|
|
|
+ bo->val_seq = sequence;
|
|
|
+ bo->seq_valid = true;
|
|
|
+ if (wake_up)
|
|
|
+ wake_up_all(&bo->event_queue);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
|
|
|
+ bool interruptible, uint32_t sequence)
|
|
|
+{
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
|
+ int put_count, ret;
|
|
|
+
|
|
|
+ ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
|
|
|
+ if (likely(!ret)) {
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
+ put_count = ttm_bo_del_from_lru(bo);
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
+ ttm_bo_list_ref_sub(bo, put_count, true);
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
|
|
|
|
|
|
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
|
|
|
{
|
|
@@ -511,7 +567,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
|
|
int ret;
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
|
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
|
|
|
|
|
spin_lock(&bdev->fence_lock);
|
|
|
(void) ttm_bo_wait(bo, false, false, true);
|
|
@@ -604,7 +660,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
|
|
|
return ret;
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
|
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
|
|
|
|
|
/*
|
|
|
* We raced, and lost, someone else holds the reservation now,
|
|
@@ -668,7 +724,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
|
|
kref_get(&nentry->list_kref);
|
|
|
}
|
|
|
|
|
|
- ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
|
|
|
+ ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
|
|
|
+ if (remove_all && ret) {
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
+ ret = ttm_bo_reserve_nolru(entry, false, false,
|
|
|
+ false, 0);
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
+ }
|
|
|
+
|
|
|
if (!ret)
|
|
|
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
|
|
|
!remove_all);
|
|
@@ -816,7 +879,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
list_for_each_entry(bo, &man->lru, lru) {
|
|
|
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
|
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
|
|
if (!ret)
|
|
|
break;
|
|
|
}
|
|
@@ -1797,7 +1860,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|
|
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
list_for_each_entry(bo, &glob->swap_lru, swap) {
|
|
|
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
|
+ ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
|
|
if (!ret)
|
|
|
break;
|
|
|
}
|