|
@@ -86,6 +86,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
|
|
|
mutex_lock(&man->io_reserve_mutex);
|
|
|
return 0;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_mem_io_lock);
|
|
|
|
|
|
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
|
|
{
|
|
@@ -94,6 +95,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
|
|
|
|
|
mutex_unlock(&man->io_reserve_mutex);
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_mem_io_unlock);
|
|
|
|
|
|
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
|
|
|
{
|
|
@@ -111,8 +113,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
|
|
|
- struct ttm_mem_reg *mem)
|
|
|
+
|
|
|
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
|
|
|
+ struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
|
|
int ret = 0;
|
|
@@ -134,9 +137,10 @@ retry:
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_mem_io_reserve);
|
|
|
|
|
|
-static void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
|
|
- struct ttm_mem_reg *mem)
|
|
|
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
|
|
+ struct ttm_mem_reg *mem)
|
|
|
{
|
|
|
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
|
|
|
|
@@ -149,6 +153,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
|
|
bdev->driver->io_mem_free(bdev, mem);
|
|
|
|
|
|
}
|
|
|
+EXPORT_SYMBOL(ttm_mem_io_free);
|
|
|
|
|
|
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
|
|
|
{
|