nouveau_ttm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. /*
  2. * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
  3. * All Rights Reserved.
  4. * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. */
  26. #include <subdev/fb.h>
  27. #include <subdev/vm.h>
  28. #include <subdev/instmem.h>
  29. #include "nouveau_drm.h"
  30. #include "nouveau_ttm.h"
  31. #include "nouveau_gem.h"
  32. static int
  33. nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  34. {
  35. /* nothing to do */
  36. return 0;
  37. }
  38. static int
  39. nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
  40. {
  41. /* nothing to do */
  42. return 0;
  43. }
  44. static inline void
  45. nouveau_mem_node_cleanup(struct nouveau_mem *node)
  46. {
  47. if (node->vma[0].node) {
  48. nouveau_vm_unmap(&node->vma[0]);
  49. nouveau_vm_put(&node->vma[0]);
  50. }
  51. if (node->vma[1].node) {
  52. nouveau_vm_unmap(&node->vma[1]);
  53. nouveau_vm_put(&node->vma[1]);
  54. }
  55. }
  56. static void
  57. nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
  58. struct ttm_mem_reg *mem)
  59. {
  60. struct nouveau_drm *drm = nouveau_bdev(man->bdev);
  61. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  62. nouveau_mem_node_cleanup(mem->mm_node);
  63. pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
  64. }
  65. static int
  66. nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
  67. struct ttm_buffer_object *bo,
  68. struct ttm_placement *placement,
  69. struct ttm_mem_reg *mem)
  70. {
  71. struct nouveau_drm *drm = nouveau_bdev(man->bdev);
  72. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  73. struct nouveau_bo *nvbo = nouveau_bo(bo);
  74. struct nouveau_mem *node;
  75. u32 size_nc = 0;
  76. int ret;
  77. if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
  78. size_nc = 1 << nvbo->page_shift;
  79. ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
  80. mem->page_alignment << PAGE_SHIFT, size_nc,
  81. (nvbo->tile_flags >> 8) & 0x3ff, &node);
  82. if (ret) {
  83. mem->mm_node = NULL;
  84. return (ret == -ENOSPC) ? 0 : ret;
  85. }
  86. node->page_shift = nvbo->page_shift;
  87. mem->mm_node = node;
  88. mem->start = node->offset >> PAGE_SHIFT;
  89. return 0;
  90. }
  91. static void
  92. nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  93. {
  94. struct nouveau_mm *mm = man->priv;
  95. struct nouveau_mm_node *r;
  96. u32 total = 0, free = 0;
  97. mutex_lock(&mm->mutex);
  98. list_for_each_entry(r, &mm->nodes, nl_entry) {
  99. printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
  100. prefix, r->type, ((u64)r->offset << 12),
  101. (((u64)r->offset + r->length) << 12));
  102. total += r->length;
  103. if (!r->type)
  104. free += r->length;
  105. }
  106. mutex_unlock(&mm->mutex);
  107. printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
  108. prefix, (u64)total << 12, (u64)free << 12);
  109. printk(KERN_DEBUG "%s block: 0x%08x\n",
  110. prefix, mm->block_size << 12);
  111. }
  112. const struct ttm_mem_type_manager_func nouveau_vram_manager = {
  113. nouveau_vram_manager_init,
  114. nouveau_vram_manager_fini,
  115. nouveau_vram_manager_new,
  116. nouveau_vram_manager_del,
  117. nouveau_vram_manager_debug
  118. };
  119. static int
  120. nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  121. {
  122. return 0;
  123. }
  124. static int
  125. nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
  126. {
  127. return 0;
  128. }
  129. static void
  130. nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
  131. struct ttm_mem_reg *mem)
  132. {
  133. nouveau_mem_node_cleanup(mem->mm_node);
  134. kfree(mem->mm_node);
  135. mem->mm_node = NULL;
  136. }
  137. static int
  138. nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
  139. struct ttm_buffer_object *bo,
  140. struct ttm_placement *placement,
  141. struct ttm_mem_reg *mem)
  142. {
  143. struct nouveau_mem *node;
  144. if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
  145. return -ENOMEM;
  146. node = kzalloc(sizeof(*node), GFP_KERNEL);
  147. if (!node)
  148. return -ENOMEM;
  149. node->page_shift = 12;
  150. mem->mm_node = node;
  151. mem->start = 0;
  152. return 0;
  153. }
  154. static void
  155. nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  156. {
  157. }
  158. const struct ttm_mem_type_manager_func nouveau_gart_manager = {
  159. nouveau_gart_manager_init,
  160. nouveau_gart_manager_fini,
  161. nouveau_gart_manager_new,
  162. nouveau_gart_manager_del,
  163. nouveau_gart_manager_debug
  164. };
  165. #include <core/subdev/vm/nv04.h>
  166. static int
  167. nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  168. {
  169. struct nouveau_drm *drm = nouveau_bdev(man->bdev);
  170. struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
  171. struct nv04_vmmgr_priv *priv = (void *)vmm;
  172. struct nouveau_vm *vm = NULL;
  173. nouveau_vm_ref(priv->vm, &vm, NULL);
  174. man->priv = vm;
  175. return 0;
  176. }
  177. static int
  178. nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
  179. {
  180. struct nouveau_vm *vm = man->priv;
  181. nouveau_vm_ref(NULL, &vm, NULL);
  182. man->priv = NULL;
  183. return 0;
  184. }
  185. static void
  186. nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
  187. {
  188. struct nouveau_mem *node = mem->mm_node;
  189. if (node->vma[0].node)
  190. nouveau_vm_put(&node->vma[0]);
  191. kfree(mem->mm_node);
  192. mem->mm_node = NULL;
  193. }
  194. static int
  195. nv04_gart_manager_new(struct ttm_mem_type_manager *man,
  196. struct ttm_buffer_object *bo,
  197. struct ttm_placement *placement,
  198. struct ttm_mem_reg *mem)
  199. {
  200. struct nouveau_mem *node;
  201. int ret;
  202. node = kzalloc(sizeof(*node), GFP_KERNEL);
  203. if (!node)
  204. return -ENOMEM;
  205. node->page_shift = 12;
  206. ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
  207. NV_MEM_ACCESS_RW, &node->vma[0]);
  208. if (ret) {
  209. kfree(node);
  210. return ret;
  211. }
  212. mem->mm_node = node;
  213. mem->start = node->vma[0].offset >> PAGE_SHIFT;
  214. return 0;
  215. }
  216. static void
  217. nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  218. {
  219. }
  220. const struct ttm_mem_type_manager_func nv04_gart_manager = {
  221. nv04_gart_manager_init,
  222. nv04_gart_manager_fini,
  223. nv04_gart_manager_new,
  224. nv04_gart_manager_del,
  225. nv04_gart_manager_debug
  226. };
  227. int
  228. nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
  229. {
  230. struct drm_file *file_priv = filp->private_data;
  231. struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
  232. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  233. return drm_mmap(filp, vma);
  234. return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
  235. }
  236. static int
  237. nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
  238. {
  239. return ttm_mem_global_init(ref->object);
  240. }
  241. static void
  242. nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
  243. {
  244. ttm_mem_global_release(ref->object);
  245. }
  246. int
  247. nouveau_ttm_global_init(struct nouveau_drm *drm)
  248. {
  249. struct drm_global_reference *global_ref;
  250. int ret;
  251. global_ref = &drm->ttm.mem_global_ref;
  252. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  253. global_ref->size = sizeof(struct ttm_mem_global);
  254. global_ref->init = &nouveau_ttm_mem_global_init;
  255. global_ref->release = &nouveau_ttm_mem_global_release;
  256. ret = drm_global_item_ref(global_ref);
  257. if (unlikely(ret != 0)) {
  258. DRM_ERROR("Failed setting up TTM memory accounting\n");
  259. drm->ttm.mem_global_ref.release = NULL;
  260. return ret;
  261. }
  262. drm->ttm.bo_global_ref.mem_glob = global_ref->object;
  263. global_ref = &drm->ttm.bo_global_ref.ref;
  264. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  265. global_ref->size = sizeof(struct ttm_bo_global);
  266. global_ref->init = &ttm_bo_global_init;
  267. global_ref->release = &ttm_bo_global_release;
  268. ret = drm_global_item_ref(global_ref);
  269. if (unlikely(ret != 0)) {
  270. DRM_ERROR("Failed setting up TTM BO subsystem\n");
  271. drm_global_item_unref(&drm->ttm.mem_global_ref);
  272. drm->ttm.mem_global_ref.release = NULL;
  273. return ret;
  274. }
  275. return 0;
  276. }
  277. void
  278. nouveau_ttm_global_release(struct nouveau_drm *drm)
  279. {
  280. if (drm->ttm.mem_global_ref.release == NULL)
  281. return;
  282. drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
  283. drm_global_item_unref(&drm->ttm.mem_global_ref);
  284. drm->ttm.mem_global_ref.release = NULL;
  285. }
  286. int
  287. nouveau_ttm_init(struct nouveau_drm *drm)
  288. {
  289. struct drm_device *dev = drm->dev;
  290. u32 bits;
  291. int ret;
  292. bits = nouveau_vmmgr(drm->device)->dma_bits;
  293. if ( drm->agp.stat == ENABLED ||
  294. !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
  295. bits = 32;
  296. ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
  297. if (ret)
  298. return ret;
  299. ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
  300. if (ret)
  301. pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
  302. ret = nouveau_ttm_global_init(drm);
  303. if (ret)
  304. return ret;
  305. ret = ttm_bo_device_init(&drm->ttm.bdev,
  306. drm->ttm.bo_global_ref.ref.object,
  307. &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
  308. bits <= 32 ? true : false);
  309. if (ret) {
  310. NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
  311. return ret;
  312. }
  313. /* VRAM init */
  314. drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
  315. drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
  316. ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
  317. drm->gem.vram_available >> PAGE_SHIFT);
  318. if (ret) {
  319. NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
  320. return ret;
  321. }
  322. drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
  323. pci_resource_len(dev->pdev, 1),
  324. DRM_MTRR_WC);
  325. /* GART init */
  326. if (drm->agp.stat != ENABLED) {
  327. drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
  328. if (drm->gem.gart_available > 512 * 1024 * 1024)
  329. drm->gem.gart_available = 512 * 1024 * 1024;
  330. } else {
  331. drm->gem.gart_available = drm->agp.size;
  332. }
  333. ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
  334. drm->gem.gart_available >> PAGE_SHIFT);
  335. if (ret) {
  336. NV_ERROR(drm, "GART mm init failed, %d\n", ret);
  337. return ret;
  338. }
  339. NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
  340. NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
  341. return 0;
  342. }
  343. void
  344. nouveau_ttm_fini(struct nouveau_drm *drm)
  345. {
  346. mutex_lock(&drm->dev->struct_mutex);
  347. ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
  348. ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
  349. mutex_unlock(&drm->dev->struct_mutex);
  350. ttm_bo_device_release(&drm->ttm.bdev);
  351. nouveau_ttm_global_release(drm);
  352. if (drm->ttm.mtrr >= 0) {
  353. drm_mtrr_del(drm->ttm.mtrr,
  354. pci_resource_start(drm->dev->pdev, 1),
  355. pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
  356. drm->ttm.mtrr = -1;
  357. }
  358. }