|
@@ -357,7 +357,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|
|
|
|
|
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|
|
struct ttm_mem_reg *mem,
|
|
|
- bool evict, bool interruptible, bool no_wait)
|
|
|
+ bool evict, bool interruptible,
|
|
|
+ bool no_wait_reserve, bool no_wait_gpu)
|
|
|
{
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
|
|
@@ -402,12 +403,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|
|
|
|
|
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
|
|
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
|
|
- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
|
|
|
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
|
|
|
else if (bdev->driver->move)
|
|
|
ret = bdev->driver->move(bo, evict, interruptible,
|
|
|
- no_wait, mem);
|
|
|
+ no_wait_reserve, no_wait_gpu, mem);
|
|
|
else
|
|
|
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
|
|
|
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
|
|
|
|
|
|
if (ret)
|
|
|
goto out_err;
|
|
@@ -606,7 +607,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
|
|
|
EXPORT_SYMBOL(ttm_bo_unref);
|
|
|
|
|
|
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
|
|
- bool no_wait)
|
|
|
+ bool no_wait_reserve, bool no_wait_gpu)
|
|
|
{
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
struct ttm_bo_global *glob = bo->glob;
|
|
@@ -615,7 +616,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
|
|
int ret = 0;
|
|
|
|
|
|
spin_lock(&bo->lock);
|
|
|
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
|
|
|
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
|
|
|
spin_unlock(&bo->lock);
|
|
|
|
|
|
if (unlikely(ret != 0)) {
|
|
@@ -638,7 +639,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
|
|
placement.num_busy_placement = 0;
|
|
|
bdev->driver->evict_flags(bo, &placement);
|
|
|
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
|
|
|
- no_wait);
|
|
|
+ no_wait_reserve, no_wait_gpu);
|
|
|
if (ret) {
|
|
|
if (ret != -ERESTARTSYS) {
|
|
|
printk(KERN_ERR TTM_PFX
|
|
@@ -650,7 +651,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
|
|
|
}
|
|
|
|
|
|
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
|
|
|
- no_wait);
|
|
|
+ no_wait_reserve, no_wait_gpu);
|
|
|
if (ret) {
|
|
|
if (ret != -ERESTARTSYS)
|
|
|
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
|
|
@@ -670,7 +671,8 @@ out:
|
|
|
|
|
|
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
|
|
uint32_t mem_type,
|
|
|
- bool interruptible, bool no_wait)
|
|
|
+ bool interruptible, bool no_wait_reserve,
|
|
|
+ bool no_wait_gpu)
|
|
|
{
|
|
|
struct ttm_bo_global *glob = bdev->glob;
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
|
@@ -687,11 +689,11 @@ retry:
|
|
|
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
|
|
|
kref_get(&bo->list_kref);
|
|
|
|
|
|
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
|
+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
|
|
|
|
|
|
if (unlikely(ret == -EBUSY)) {
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
- if (likely(!no_wait))
|
|
|
+ if (likely(!no_wait_gpu))
|
|
|
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
|
|
|
|
|
kref_put(&bo->list_kref, ttm_bo_release_list);
|
|
@@ -713,7 +715,7 @@ retry:
|
|
|
while (put_count--)
|
|
|
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
|
|
|
|
|
- ret = ttm_bo_evict(bo, interruptible, no_wait);
|
|
|
+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
|
|
|
ttm_bo_unreserve(bo);
|
|
|
|
|
|
kref_put(&bo->list_kref, ttm_bo_release_list);
|
|
@@ -764,7 +766,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
|
|
uint32_t mem_type,
|
|
|
struct ttm_placement *placement,
|
|
|
struct ttm_mem_reg *mem,
|
|
|
- bool interruptible, bool no_wait)
|
|
|
+ bool interruptible,
|
|
|
+ bool no_wait_reserve,
|
|
|
+ bool no_wait_gpu)
|
|
|
{
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
struct ttm_bo_global *glob = bdev->glob;
|
|
@@ -785,7 +789,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
|
|
|
}
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
|
|
|
- no_wait);
|
|
|
+ no_wait_reserve, no_wait_gpu);
|
|
|
if (unlikely(ret != 0))
|
|
|
return ret;
|
|
|
} while (1);
|
|
@@ -855,7 +859,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
|
|
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|
|
struct ttm_placement *placement,
|
|
|
struct ttm_mem_reg *mem,
|
|
|
- bool interruptible, bool no_wait)
|
|
|
+ bool interruptible, bool no_wait_reserve,
|
|
|
+ bool no_wait_gpu)
|
|
|
{
|
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
struct ttm_mem_type_manager *man;
|
|
@@ -952,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|
|
}
|
|
|
|
|
|
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
|
|
|
- interruptible, no_wait);
|
|
|
+ interruptible, no_wait_reserve, no_wait_gpu);
|
|
|
if (ret == 0 && mem->mm_node) {
|
|
|
mem->placement = cur_flags;
|
|
|
mem->mm_node->private = bo;
|
|
@@ -978,7 +983,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
|
|
|
|
|
|
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
|
|
struct ttm_placement *placement,
|
|
|
- bool interruptible, bool no_wait)
|
|
|
+ bool interruptible, bool no_wait_reserve,
|
|
|
+ bool no_wait_gpu)
|
|
|
{
|
|
|
struct ttm_bo_global *glob = bo->glob;
|
|
|
int ret = 0;
|
|
@@ -992,7 +998,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
|
|
* instead of doing it here.
|
|
|
*/
|
|
|
spin_lock(&bo->lock);
|
|
|
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
|
|
|
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
|
|
|
spin_unlock(&bo->lock);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -1002,10 +1008,10 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
|
|
/*
|
|
|
* Determine where to move the buffer.
|
|
|
*/
|
|
|
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
|
|
|
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
|
|
|
if (ret)
|
|
|
goto out_unlock;
|
|
|
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
|
|
|
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
|
|
|
out_unlock:
|
|
|
if (ret && mem.mm_node) {
|
|
|
spin_lock(&glob->lru_lock);
|
|
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
|
|
|
|
|
|
int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|
|
struct ttm_placement *placement,
|
|
|
- bool interruptible, bool no_wait)
|
|
|
+ bool interruptible, bool no_wait_reserve,
|
|
|
+ bool no_wait_gpu)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|
|
*/
|
|
|
ret = ttm_bo_mem_compat(placement, &bo->mem);
|
|
|
if (ret < 0) {
|
|
|
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
|
|
|
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
} else {
|
|
@@ -1175,7 +1182,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
|
|
goto out_err;
|
|
|
}
|
|
|
|
|
|
- ret = ttm_bo_validate(bo, placement, interruptible, false);
|
|
|
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
|
|
|
if (ret)
|
|
|
goto out_err;
|
|
|
|
|
@@ -1249,7 +1256,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
|
|
spin_lock(&glob->lru_lock);
|
|
|
while (!list_empty(&man->lru)) {
|
|
|
spin_unlock(&glob->lru_lock);
|
|
|
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
|
|
|
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
|
|
|
if (ret) {
|
|
|
if (allow_errors) {
|
|
|
return ret;
|
|
@@ -1839,7 +1846,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|
|
evict_mem.mem_type = TTM_PL_SYSTEM;
|
|
|
|
|
|
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
|
|
|
- false, false);
|
|
|
+ false, false, false);
|
|
|
if (unlikely(ret != 0))
|
|
|
goto out;
|
|
|
}
|