|
@@ -949,7 +949,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
struct dma_page *d_page, *next;
|
|
struct dma_page *d_page, *next;
|
|
enum pool_type type;
|
|
enum pool_type type;
|
|
bool is_cached = false;
|
|
bool is_cached = false;
|
|
- unsigned count = 0, i;
|
|
|
|
|
|
+ unsigned count = 0, i, npages = 0;
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
|
|
|
|
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
|
|
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
|
|
@@ -974,8 +974,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
} else {
|
|
} else {
|
|
pool->npages_free += count;
|
|
pool->npages_free += count;
|
|
list_splice(&ttm_dma->pages_list, &pool->free_list);
|
|
list_splice(&ttm_dma->pages_list, &pool->free_list);
|
|
|
|
+ npages = count;
|
|
if (pool->npages_free > _manager->options.max_size) {
|
|
if (pool->npages_free > _manager->options.max_size) {
|
|
- count = pool->npages_free - _manager->options.max_size;
|
|
|
|
|
|
+ npages = pool->npages_free - _manager->options.max_size;
|
|
|
|
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
|
|
|
|
+ * to reduce calls to set_memory_wb */
|
|
|
|
+ if (npages < NUM_PAGES_TO_ALLOC)
|
|
|
|
+ npages = NUM_PAGES_TO_ALLOC;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
|
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
|
@@ -999,9 +1004,9 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|
ttm_dma->dma_address[i] = 0;
|
|
ttm_dma->dma_address[i] = 0;
|
|
}
|
|
}
|
|
|
|
|
|
- /* shrink pool if necessary */
|
|
|
|
- if (count)
|
|
|
|
- ttm_dma_page_pool_free(pool, count);
|
|
|
|
|
|
+ /* shrink pool if necessary (only on !is_cached pools)*/
|
|
|
|
+ if (npages)
|
|
|
|
+ ttm_dma_page_pool_free(pool, npages);
|
|
ttm->state = tt_unpopulated;
|
|
ttm->state = tt_unpopulated;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
|
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|