|
@@ -45,6 +45,39 @@
|
|
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
|
|
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
|
|
static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
|
|
static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
|
|
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
|
|
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
|
|
|
|
+static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
|
|
|
+
|
|
|
|
+static struct attribute ttm_bo_count = {
|
|
|
|
+ .name = "bo_count",
|
|
|
|
+ .mode = S_IRUGO
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static ssize_t ttm_bo_global_show(struct kobject *kobj,
|
|
|
|
+ struct attribute *attr,
|
|
|
|
+ char *buffer)
|
|
|
|
+{
|
|
|
|
+ struct ttm_bo_global *glob =
|
|
|
|
+ container_of(kobj, struct ttm_bo_global, kobj);
|
|
|
|
+
|
|
|
|
+ return snprintf(buffer, PAGE_SIZE, "%lu\n",
|
|
|
|
+ (unsigned long) atomic_read(&glob->bo_count));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct attribute *ttm_bo_global_attrs[] = {
|
|
|
|
+ &ttm_bo_count,
|
|
|
|
+ NULL
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static struct sysfs_ops ttm_bo_global_ops = {
|
|
|
|
+ .show = &ttm_bo_global_show
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static struct kobj_type ttm_bo_glob_kobj_type = {
|
|
|
|
+ .release = &ttm_bo_global_kobj_release,
|
|
|
|
+ .sysfs_ops = &ttm_bo_global_ops,
|
|
|
|
+ .default_attrs = ttm_bo_global_attrs
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
|
|
static inline uint32_t ttm_bo_type_flags(unsigned type)
|
|
static inline uint32_t ttm_bo_type_flags(unsigned type)
|
|
{
|
|
{
|
|
@@ -67,10 +100,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
|
|
|
|
|
if (bo->ttm)
|
|
if (bo->ttm)
|
|
ttm_tt_destroy(bo->ttm);
|
|
ttm_tt_destroy(bo->ttm);
|
|
|
|
+ atomic_dec(&bo->glob->bo_count);
|
|
if (bo->destroy)
|
|
if (bo->destroy)
|
|
bo->destroy(bo);
|
|
bo->destroy(bo);
|
|
else {
|
|
else {
|
|
- ttm_mem_global_free(bdev->mem_glob, bo->acc_size);
|
|
|
|
|
|
+ ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
|
|
kfree(bo);
|
|
kfree(bo);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -107,7 +141,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
|
kref_get(&bo->list_kref);
|
|
kref_get(&bo->list_kref);
|
|
|
|
|
|
if (bo->ttm != NULL) {
|
|
if (bo->ttm != NULL) {
|
|
- list_add_tail(&bo->swap, &bdev->swap_lru);
|
|
|
|
|
|
+ list_add_tail(&bo->swap, &bo->glob->swap_lru);
|
|
kref_get(&bo->list_kref);
|
|
kref_get(&bo->list_kref);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -142,7 +176,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|
bool interruptible,
|
|
bool interruptible,
|
|
bool no_wait, bool use_sequence, uint32_t sequence)
|
|
bool no_wait, bool use_sequence, uint32_t sequence)
|
|
{
|
|
{
|
|
- struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
|
|
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
|
|
@@ -154,9 +188,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|
if (no_wait)
|
|
if (no_wait)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
|
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
|
|
|
if (unlikely(ret))
|
|
if (unlikely(ret))
|
|
return ret;
|
|
return ret;
|
|
@@ -182,16 +216,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
|
bool interruptible,
|
|
bool interruptible,
|
|
bool no_wait, bool use_sequence, uint32_t sequence)
|
|
bool no_wait, bool use_sequence, uint32_t sequence)
|
|
{
|
|
{
|
|
- struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
int put_count = 0;
|
|
int put_count = 0;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
|
|
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
|
|
sequence);
|
|
sequence);
|
|
if (likely(ret == 0))
|
|
if (likely(ret == 0))
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
while (put_count--)
|
|
while (put_count--)
|
|
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
|
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
|
@@ -201,13 +235,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
|
|
|
|
|
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
|
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
|
{
|
|
{
|
|
- struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
ttm_bo_add_to_lru(bo);
|
|
ttm_bo_add_to_lru(bo);
|
|
atomic_set(&bo->reserved, 0);
|
|
atomic_set(&bo->reserved, 0);
|
|
wake_up_all(&bo->event_queue);
|
|
wake_up_all(&bo->event_queue);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(ttm_bo_unreserve);
|
|
EXPORT_SYMBOL(ttm_bo_unreserve);
|
|
|
|
|
|
@@ -218,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
|
|
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|
{
|
|
{
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
uint32_t page_flags = 0;
|
|
uint32_t page_flags = 0;
|
|
|
|
|
|
@@ -230,14 +265,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
|
|
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
|
|
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
|
|
case ttm_bo_type_kernel:
|
|
case ttm_bo_type_kernel:
|
|
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
|
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
|
- page_flags, bdev->dummy_read_page);
|
|
|
|
|
|
+ page_flags, glob->dummy_read_page);
|
|
if (unlikely(bo->ttm == NULL))
|
|
if (unlikely(bo->ttm == NULL))
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
break;
|
|
break;
|
|
case ttm_bo_type_user:
|
|
case ttm_bo_type_user:
|
|
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
|
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
|
|
page_flags | TTM_PAGE_FLAG_USER,
|
|
page_flags | TTM_PAGE_FLAG_USER,
|
|
- bdev->dummy_read_page);
|
|
|
|
|
|
+ glob->dummy_read_page);
|
|
if (unlikely(bo->ttm == NULL))
|
|
if (unlikely(bo->ttm == NULL))
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
break;
|
|
break;
|
|
@@ -355,6 +390,7 @@ out_err:
|
|
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
{
|
|
{
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
struct ttm_bo_driver *driver = bdev->driver;
|
|
struct ttm_bo_driver *driver = bdev->driver;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
@@ -366,7 +402,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
|
|
|
|
spin_unlock(&bo->lock);
|
|
spin_unlock(&bo->lock);
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
|
|
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
|
|
BUG_ON(ret);
|
|
BUG_ON(ret);
|
|
if (bo->ttm)
|
|
if (bo->ttm)
|
|
@@ -381,7 +417,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
bo->mem.mm_node = NULL;
|
|
bo->mem.mm_node = NULL;
|
|
}
|
|
}
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
atomic_set(&bo->reserved, 0);
|
|
atomic_set(&bo->reserved, 0);
|
|
|
|
|
|
@@ -391,14 +427,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
if (list_empty(&bo->ddestroy)) {
|
|
if (list_empty(&bo->ddestroy)) {
|
|
void *sync_obj = bo->sync_obj;
|
|
void *sync_obj = bo->sync_obj;
|
|
void *sync_obj_arg = bo->sync_obj_arg;
|
|
void *sync_obj_arg = bo->sync_obj_arg;
|
|
|
|
|
|
kref_get(&bo->list_kref);
|
|
kref_get(&bo->list_kref);
|
|
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
|
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
spin_unlock(&bo->lock);
|
|
spin_unlock(&bo->lock);
|
|
|
|
|
|
if (sync_obj)
|
|
if (sync_obj)
|
|
@@ -408,7 +444,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
ret = 0;
|
|
ret = 0;
|
|
|
|
|
|
} else {
|
|
} else {
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
spin_unlock(&bo->lock);
|
|
spin_unlock(&bo->lock);
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
}
|
|
}
|
|
@@ -423,11 +459,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
|
|
|
|
|
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
|
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
|
{
|
|
{
|
|
|
|
+ struct ttm_bo_global *glob = bdev->glob;
|
|
struct ttm_buffer_object *entry, *nentry;
|
|
struct ttm_buffer_object *entry, *nentry;
|
|
struct list_head *list, *next;
|
|
struct list_head *list, *next;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
list_for_each_safe(list, next, &bdev->ddestroy) {
|
|
list_for_each_safe(list, next, &bdev->ddestroy) {
|
|
entry = list_entry(list, struct ttm_buffer_object, ddestroy);
|
|
entry = list_entry(list, struct ttm_buffer_object, ddestroy);
|
|
nentry = NULL;
|
|
nentry = NULL;
|
|
@@ -444,16 +481,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
|
}
|
|
}
|
|
kref_get(&entry->list_kref);
|
|
kref_get(&entry->list_kref);
|
|
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
ret = ttm_bo_cleanup_refs(entry, remove_all);
|
|
ret = ttm_bo_cleanup_refs(entry, remove_all);
|
|
kref_put(&entry->list_kref, ttm_bo_release_list);
|
|
kref_put(&entry->list_kref, ttm_bo_release_list);
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
if (nentry) {
|
|
if (nentry) {
|
|
bool next_onlist = !list_empty(next);
|
|
bool next_onlist = !list_empty(next);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
kref_put(&nentry->list_kref, ttm_bo_release_list);
|
|
kref_put(&nentry->list_kref, ttm_bo_release_list);
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
/*
|
|
/*
|
|
* Someone might have raced us and removed the
|
|
* Someone might have raced us and removed the
|
|
* next entry from the list. We don't bother restarting
|
|
* next entry from the list. We don't bother restarting
|
|
@@ -467,7 +504,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
ret = !list_empty(&bdev->ddestroy);
|
|
ret = !list_empty(&bdev->ddestroy);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -517,6 +554,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
struct ttm_mem_reg evict_mem;
|
|
struct ttm_mem_reg evict_mem;
|
|
uint32_t proposed_placement;
|
|
uint32_t proposed_placement;
|
|
|
|
|
|
@@ -565,12 +603,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
if (evict_mem.mm_node) {
|
|
if (evict_mem.mm_node) {
|
|
drm_mm_put_block(evict_mem.mm_node);
|
|
drm_mm_put_block(evict_mem.mm_node);
|
|
evict_mem.mm_node = NULL;
|
|
evict_mem.mm_node = NULL;
|
|
}
|
|
}
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
bo->evicted = true;
|
|
bo->evicted = true;
|
|
out:
|
|
out:
|
|
return ret;
|
|
return ret;
|
|
@@ -585,6 +623,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
|
|
uint32_t mem_type,
|
|
uint32_t mem_type,
|
|
bool interruptible, bool no_wait)
|
|
bool interruptible, bool no_wait)
|
|
{
|
|
{
|
|
|
|
+ struct ttm_bo_global *glob = bdev->glob;
|
|
struct drm_mm_node *node;
|
|
struct drm_mm_node *node;
|
|
struct ttm_buffer_object *entry;
|
|
struct ttm_buffer_object *entry;
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
|
@@ -598,7 +637,7 @@ retry_pre_get:
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
do {
|
|
do {
|
|
node = drm_mm_search_free(&man->manager, num_pages,
|
|
node = drm_mm_search_free(&man->manager, num_pages,
|
|
mem->page_alignment, 1);
|
|
mem->page_alignment, 1);
|
|
@@ -619,7 +658,7 @@ retry_pre_get:
|
|
if (likely(ret == 0))
|
|
if (likely(ret == 0))
|
|
put_count = ttm_bo_del_from_lru(entry);
|
|
put_count = ttm_bo_del_from_lru(entry);
|
|
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
return ret;
|
|
return ret;
|
|
@@ -635,21 +674,21 @@ retry_pre_get:
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
} while (1);
|
|
} while (1);
|
|
|
|
|
|
if (!node) {
|
|
if (!node) {
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
|
|
node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
|
|
if (unlikely(!node)) {
|
|
if (unlikely(!node)) {
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
goto retry_pre_get;
|
|
goto retry_pre_get;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
mem->mm_node = node;
|
|
mem->mm_node = node;
|
|
mem->mem_type = mem_type;
|
|
mem->mem_type = mem_type;
|
|
return 0;
|
|
return 0;
|
|
@@ -697,6 +736,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|
bool interruptible, bool no_wait)
|
|
bool interruptible, bool no_wait)
|
|
{
|
|
{
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
struct ttm_mem_type_manager *man;
|
|
struct ttm_mem_type_manager *man;
|
|
|
|
|
|
uint32_t num_prios = bdev->driver->num_mem_type_prio;
|
|
uint32_t num_prios = bdev->driver->num_mem_type_prio;
|
|
@@ -733,20 +773,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|
if (unlikely(ret))
|
|
if (unlikely(ret))
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
node = drm_mm_search_free(&man->manager,
|
|
node = drm_mm_search_free(&man->manager,
|
|
mem->num_pages,
|
|
mem->num_pages,
|
|
mem->page_alignment,
|
|
mem->page_alignment,
|
|
1);
|
|
1);
|
|
if (unlikely(!node)) {
|
|
if (unlikely(!node)) {
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
node = drm_mm_get_block_atomic(node,
|
|
node = drm_mm_get_block_atomic(node,
|
|
mem->num_pages,
|
|
mem->num_pages,
|
|
mem->
|
|
mem->
|
|
page_alignment);
|
|
page_alignment);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
} while (!node);
|
|
} while (!node);
|
|
}
|
|
}
|
|
if (node)
|
|
if (node)
|
|
@@ -816,7 +856,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
|
uint32_t proposed_placement,
|
|
uint32_t proposed_placement,
|
|
bool interruptible, bool no_wait)
|
|
bool interruptible, bool no_wait)
|
|
{
|
|
{
|
|
- struct ttm_bo_device *bdev = bo->bdev;
|
|
|
|
|
|
+ struct ttm_bo_global *glob = bo->glob;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
struct ttm_mem_reg mem;
|
|
struct ttm_mem_reg mem;
|
|
|
|
|
|
@@ -852,9 +892,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
if (ret && mem.mm_node) {
|
|
if (ret && mem.mm_node) {
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
drm_mm_put_block(mem.mm_node);
|
|
drm_mm_put_block(mem.mm_node);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
}
|
|
}
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -990,6 +1030,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
|
INIT_LIST_HEAD(&bo->ddestroy);
|
|
INIT_LIST_HEAD(&bo->ddestroy);
|
|
INIT_LIST_HEAD(&bo->swap);
|
|
INIT_LIST_HEAD(&bo->swap);
|
|
bo->bdev = bdev;
|
|
bo->bdev = bdev;
|
|
|
|
+ bo->glob = bdev->glob;
|
|
bo->type = type;
|
|
bo->type = type;
|
|
bo->num_pages = num_pages;
|
|
bo->num_pages = num_pages;
|
|
bo->mem.mem_type = TTM_PL_SYSTEM;
|
|
bo->mem.mem_type = TTM_PL_SYSTEM;
|
|
@@ -1002,6 +1043,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
|
bo->seq_valid = false;
|
|
bo->seq_valid = false;
|
|
bo->persistant_swap_storage = persistant_swap_storage;
|
|
bo->persistant_swap_storage = persistant_swap_storage;
|
|
bo->acc_size = acc_size;
|
|
bo->acc_size = acc_size;
|
|
|
|
+ atomic_inc(&bo->glob->bo_count);
|
|
|
|
|
|
ret = ttm_bo_check_placement(bo, flags, 0ULL);
|
|
ret = ttm_bo_check_placement(bo, flags, 0ULL);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
@@ -1040,13 +1082,13 @@ out_err:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(ttm_buffer_object_init);
|
|
EXPORT_SYMBOL(ttm_buffer_object_init);
|
|
|
|
|
|
-static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
|
|
|
|
|
|
+static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
|
|
unsigned long num_pages)
|
|
unsigned long num_pages)
|
|
{
|
|
{
|
|
size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
|
|
size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
|
|
PAGE_MASK;
|
|
PAGE_MASK;
|
|
|
|
|
|
- return bdev->ttm_bo_size + 2 * page_array_size;
|
|
|
|
|
|
+ return glob->ttm_bo_size + 2 * page_array_size;
|
|
}
|
|
}
|
|
|
|
|
|
int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|
int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|
@@ -1061,10 +1103,10 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|
{
|
|
{
|
|
struct ttm_buffer_object *bo;
|
|
struct ttm_buffer_object *bo;
|
|
int ret;
|
|
int ret;
|
|
- struct ttm_mem_global *mem_glob = bdev->mem_glob;
|
|
|
|
|
|
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
|
|
|
|
|
|
size_t acc_size =
|
|
size_t acc_size =
|
|
- ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
|
|
|
|
|
+ ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
|
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
|
|
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
return ret;
|
|
return ret;
|
|
@@ -1118,6 +1160,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
|
struct list_head *head,
|
|
struct list_head *head,
|
|
unsigned mem_type, bool allow_errors)
|
|
unsigned mem_type, bool allow_errors)
|
|
{
|
|
{
|
|
|
|
+ struct ttm_bo_global *glob = bdev->glob;
|
|
struct ttm_buffer_object *entry;
|
|
struct ttm_buffer_object *entry;
|
|
int ret;
|
|
int ret;
|
|
int put_count;
|
|
int put_count;
|
|
@@ -1126,30 +1169,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
|
* Can't use standard list traversal since we're unlocking.
|
|
* Can't use standard list traversal since we're unlocking.
|
|
*/
|
|
*/
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
|
|
|
|
while (!list_empty(head)) {
|
|
while (!list_empty(head)) {
|
|
entry = list_first_entry(head, struct ttm_buffer_object, lru);
|
|
entry = list_first_entry(head, struct ttm_buffer_object, lru);
|
|
kref_get(&entry->list_kref);
|
|
kref_get(&entry->list_kref);
|
|
ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
|
|
ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
|
|
put_count = ttm_bo_del_from_lru(entry);
|
|
put_count = ttm_bo_del_from_lru(entry);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
while (put_count--)
|
|
while (put_count--)
|
|
kref_put(&entry->list_kref, ttm_bo_ref_bug);
|
|
kref_put(&entry->list_kref, ttm_bo_ref_bug);
|
|
BUG_ON(ret);
|
|
BUG_ON(ret);
|
|
ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
|
|
ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
|
|
ttm_bo_unreserve(entry);
|
|
ttm_bo_unreserve(entry);
|
|
kref_put(&entry->list_kref, ttm_bo_release_list);
|
|
kref_put(&entry->list_kref, ttm_bo_release_list);
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|
{
|
|
{
|
|
|
|
+ struct ttm_bo_global *glob = bdev->glob;
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
|
|
int ret = -EINVAL;
|
|
int ret = -EINVAL;
|
|
|
|
|
|
@@ -1171,13 +1215,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|
if (mem_type > 0) {
|
|
if (mem_type > 0) {
|
|
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
|
|
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
if (drm_mm_clean(&man->manager))
|
|
if (drm_mm_clean(&man->manager))
|
|
drm_mm_takedown(&man->manager);
|
|
drm_mm_takedown(&man->manager);
|
|
else
|
|
else
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
}
|
|
}
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -1251,11 +1295,83 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(ttm_bo_init_mm);
|
|
EXPORT_SYMBOL(ttm_bo_init_mm);
|
|
|
|
|
|
|
|
+static void ttm_bo_global_kobj_release(struct kobject *kobj)
|
|
|
|
+{
|
|
|
|
+ struct ttm_bo_global *glob =
|
|
|
|
+ container_of(kobj, struct ttm_bo_global, kobj);
|
|
|
|
+
|
|
|
|
+ printk(KERN_INFO TTM_PFX "Freeing bo global.\n");
|
|
|
|
+ ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
|
|
|
|
+ __free_page(glob->dummy_read_page);
|
|
|
|
+ kfree(glob);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void ttm_bo_global_release(struct ttm_global_reference *ref)
|
|
|
|
+{
|
|
|
|
+ struct ttm_bo_global *glob = ref->object;
|
|
|
|
+
|
|
|
|
+ kobject_del(&glob->kobj);
|
|
|
|
+ kobject_put(&glob->kobj);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(ttm_bo_global_release);
|
|
|
|
+
|
|
|
|
+int ttm_bo_global_init(struct ttm_global_reference *ref)
|
|
|
|
+{
|
|
|
|
+ struct ttm_bo_global_ref *bo_ref =
|
|
|
|
+ container_of(ref, struct ttm_bo_global_ref, ref);
|
|
|
|
+ struct ttm_bo_global *glob = ref->object;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ mutex_init(&glob->device_list_mutex);
|
|
|
|
+ spin_lock_init(&glob->lru_lock);
|
|
|
|
+ glob->mem_glob = bo_ref->mem_glob;
|
|
|
|
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
|
|
|
+
|
|
|
|
+ if (unlikely(glob->dummy_read_page == NULL)) {
|
|
|
|
+ ret = -ENOMEM;
|
|
|
|
+ goto out_no_drp;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ INIT_LIST_HEAD(&glob->swap_lru);
|
|
|
|
+ INIT_LIST_HEAD(&glob->device_list);
|
|
|
|
+
|
|
|
|
+ ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
|
|
|
|
+ ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
|
|
|
|
+ if (unlikely(ret != 0)) {
|
|
|
|
+ printk(KERN_ERR TTM_PFX
|
|
|
|
+ "Could not register buffer object swapout.\n");
|
|
|
|
+ goto out_no_shrink;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ glob->ttm_bo_extra_size =
|
|
|
|
+ ttm_round_pot(sizeof(struct ttm_tt)) +
|
|
|
|
+ ttm_round_pot(sizeof(struct ttm_backend));
|
|
|
|
+
|
|
|
|
+ glob->ttm_bo_size = glob->ttm_bo_extra_size +
|
|
|
|
+ ttm_round_pot(sizeof(struct ttm_buffer_object));
|
|
|
|
+
|
|
|
|
+ atomic_set(&glob->bo_count, 0);
|
|
|
|
+
|
|
|
|
+ kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
|
|
|
|
+ ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
|
|
|
|
+ if (unlikely(ret != 0))
|
|
|
|
+ kobject_put(&glob->kobj);
|
|
|
|
+ return ret;
|
|
|
|
+out_no_shrink:
|
|
|
|
+ __free_page(glob->dummy_read_page);
|
|
|
|
+out_no_drp:
|
|
|
|
+ kfree(glob);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(ttm_bo_global_init);
|
|
|
|
+
|
|
|
|
+
|
|
int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
|
int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
unsigned i = TTM_NUM_MEM_TYPES;
|
|
unsigned i = TTM_NUM_MEM_TYPES;
|
|
struct ttm_mem_type_manager *man;
|
|
struct ttm_mem_type_manager *man;
|
|
|
|
+ struct ttm_bo_global *glob = bdev->glob;
|
|
|
|
|
|
while (i--) {
|
|
while (i--) {
|
|
man = &bdev->man[i];
|
|
man = &bdev->man[i];
|
|
@@ -1271,98 +1387,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ mutex_lock(&glob->device_list_mutex);
|
|
|
|
+ list_del(&bdev->device_list);
|
|
|
|
+ mutex_unlock(&glob->device_list_mutex);
|
|
|
|
+
|
|
if (!cancel_delayed_work(&bdev->wq))
|
|
if (!cancel_delayed_work(&bdev->wq))
|
|
flush_scheduled_work();
|
|
flush_scheduled_work();
|
|
|
|
|
|
while (ttm_bo_delayed_delete(bdev, true))
|
|
while (ttm_bo_delayed_delete(bdev, true))
|
|
;
|
|
;
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
if (list_empty(&bdev->ddestroy))
|
|
if (list_empty(&bdev->ddestroy))
|
|
TTM_DEBUG("Delayed destroy list was clean\n");
|
|
TTM_DEBUG("Delayed destroy list was clean\n");
|
|
|
|
|
|
if (list_empty(&bdev->man[0].lru))
|
|
if (list_empty(&bdev->man[0].lru))
|
|
TTM_DEBUG("Swap list was clean\n");
|
|
TTM_DEBUG("Swap list was clean\n");
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
- ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
|
|
|
|
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
|
|
BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
|
|
write_lock(&bdev->vm_lock);
|
|
write_lock(&bdev->vm_lock);
|
|
drm_mm_takedown(&bdev->addr_space_mm);
|
|
drm_mm_takedown(&bdev->addr_space_mm);
|
|
write_unlock(&bdev->vm_lock);
|
|
write_unlock(&bdev->vm_lock);
|
|
|
|
|
|
- __free_page(bdev->dummy_read_page);
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(ttm_bo_device_release);
|
|
EXPORT_SYMBOL(ttm_bo_device_release);
|
|
|
|
|
|
-/*
|
|
|
|
- * This function is intended to be called on drm driver load.
|
|
|
|
- * If you decide to call it from firstopen, you must protect the call
|
|
|
|
- * from a potentially racing ttm_bo_driver_finish in lastclose.
|
|
|
|
- * (This may happen on X server restart).
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
|
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
|
- struct ttm_mem_global *mem_glob,
|
|
|
|
- struct ttm_bo_driver *driver, uint64_t file_page_offset)
|
|
|
|
|
|
+ struct ttm_bo_global *glob,
|
|
|
|
+ struct ttm_bo_driver *driver,
|
|
|
|
+ uint64_t file_page_offset)
|
|
{
|
|
{
|
|
int ret = -EINVAL;
|
|
int ret = -EINVAL;
|
|
|
|
|
|
- bdev->dummy_read_page = NULL;
|
|
|
|
rwlock_init(&bdev->vm_lock);
|
|
rwlock_init(&bdev->vm_lock);
|
|
- spin_lock_init(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock_init(&glob->lru_lock);
|
|
|
|
|
|
bdev->driver = driver;
|
|
bdev->driver = driver;
|
|
- bdev->mem_glob = mem_glob;
|
|
|
|
|
|
|
|
memset(bdev->man, 0, sizeof(bdev->man));
|
|
memset(bdev->man, 0, sizeof(bdev->man));
|
|
|
|
|
|
- bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
|
|
|
- if (unlikely(bdev->dummy_read_page == NULL)) {
|
|
|
|
- ret = -ENOMEM;
|
|
|
|
- goto out_err0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Initialize the system memory buffer type.
|
|
* Initialize the system memory buffer type.
|
|
* Other types need to be driver / IOCTL initialized.
|
|
* Other types need to be driver / IOCTL initialized.
|
|
*/
|
|
*/
|
|
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
|
|
ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
- goto out_err1;
|
|
|
|
|
|
+ goto out_no_sys;
|
|
|
|
|
|
bdev->addr_space_rb = RB_ROOT;
|
|
bdev->addr_space_rb = RB_ROOT;
|
|
ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
|
|
ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
|
|
if (unlikely(ret != 0))
|
|
if (unlikely(ret != 0))
|
|
- goto out_err2;
|
|
|
|
|
|
+ goto out_no_addr_mm;
|
|
|
|
|
|
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
|
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
|
bdev->nice_mode = true;
|
|
bdev->nice_mode = true;
|
|
INIT_LIST_HEAD(&bdev->ddestroy);
|
|
INIT_LIST_HEAD(&bdev->ddestroy);
|
|
- INIT_LIST_HEAD(&bdev->swap_lru);
|
|
|
|
bdev->dev_mapping = NULL;
|
|
bdev->dev_mapping = NULL;
|
|
- ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
|
|
|
|
- ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
|
|
|
|
- if (unlikely(ret != 0)) {
|
|
|
|
- printk(KERN_ERR TTM_PFX
|
|
|
|
- "Could not register buffer object swapout.\n");
|
|
|
|
- goto out_err2;
|
|
|
|
- }
|
|
|
|
|
|
+ bdev->glob = glob;
|
|
|
|
|
|
- bdev->ttm_bo_extra_size =
|
|
|
|
- ttm_round_pot(sizeof(struct ttm_tt)) +
|
|
|
|
- ttm_round_pot(sizeof(struct ttm_backend));
|
|
|
|
-
|
|
|
|
- bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
|
|
|
|
- ttm_round_pot(sizeof(struct ttm_buffer_object));
|
|
|
|
|
|
+ mutex_lock(&glob->device_list_mutex);
|
|
|
|
+ list_add_tail(&bdev->device_list, &glob->device_list);
|
|
|
|
+ mutex_unlock(&glob->device_list_mutex);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
-out_err2:
|
|
|
|
|
|
+out_no_addr_mm:
|
|
ttm_bo_clean_mm(bdev, 0);
|
|
ttm_bo_clean_mm(bdev, 0);
|
|
-out_err1:
|
|
|
|
- __free_page(bdev->dummy_read_page);
|
|
|
|
-out_err0:
|
|
|
|
|
|
+out_no_sys:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(ttm_bo_device_init);
|
|
EXPORT_SYMBOL(ttm_bo_device_init);
|
|
@@ -1607,21 +1699,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
|
|
|
|
|
|
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|
{
|
|
{
|
|
- struct ttm_bo_device *bdev =
|
|
|
|
- container_of(shrink, struct ttm_bo_device, shrink);
|
|
|
|
|
|
+ struct ttm_bo_global *glob =
|
|
|
|
+ container_of(shrink, struct ttm_bo_global, shrink);
|
|
struct ttm_buffer_object *bo;
|
|
struct ttm_buffer_object *bo;
|
|
int ret = -EBUSY;
|
|
int ret = -EBUSY;
|
|
int put_count;
|
|
int put_count;
|
|
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
|
|
uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
|
|
|
|
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
while (ret == -EBUSY) {
|
|
while (ret == -EBUSY) {
|
|
- if (unlikely(list_empty(&bdev->swap_lru))) {
|
|
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ if (unlikely(list_empty(&glob->swap_lru))) {
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
- bo = list_first_entry(&bdev->swap_lru,
|
|
|
|
|
|
+ bo = list_first_entry(&glob->swap_lru,
|
|
struct ttm_buffer_object, swap);
|
|
struct ttm_buffer_object, swap);
|
|
kref_get(&bo->list_kref);
|
|
kref_get(&bo->list_kref);
|
|
|
|
|
|
@@ -1633,16 +1725,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|
|
|
|
|
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
|
if (unlikely(ret == -EBUSY)) {
|
|
if (unlikely(ret == -EBUSY)) {
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
ttm_bo_wait_unreserved(bo, false);
|
|
ttm_bo_wait_unreserved(bo, false);
|
|
kref_put(&bo->list_kref, ttm_bo_release_list);
|
|
kref_put(&bo->list_kref, ttm_bo_release_list);
|
|
- spin_lock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_lock(&glob->lru_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
BUG_ON(ret != 0);
|
|
BUG_ON(ret != 0);
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
put_count = ttm_bo_del_from_lru(bo);
|
|
- spin_unlock(&bdev->lru_lock);
|
|
|
|
|
|
+ spin_unlock(&glob->lru_lock);
|
|
|
|
|
|
while (put_count--)
|
|
while (put_count--)
|
|
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
|
kref_put(&bo->list_kref, ttm_bo_ref_bug);
|
|
@@ -1696,6 +1788,6 @@ out:
|
|
|
|
|
|
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
|
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
|
{
|
|
{
|
|
- while (ttm_bo_swapout(&bdev->shrink) == 0)
|
|
|
|
|
|
+ while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
|
|
;
|
|
;
|
|
}
|
|
}
|