nouveau_ttm.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /*
  2. * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
  3. * All Rights Reserved.
  4. * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. */
  26. #include "drmP.h"
  27. #include "nouveau_drv.h"
  28. static int
  29. nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  30. {
  31. /* nothing to do */
  32. return 0;
  33. }
  34. static int
  35. nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
  36. {
  37. /* nothing to do */
  38. return 0;
  39. }
  40. static inline void
  41. nouveau_mem_node_cleanup(struct nouveau_mem *node)
  42. {
  43. if (node->vma[0].node) {
  44. nouveau_vm_unmap(&node->vma[0]);
  45. nouveau_vm_put(&node->vma[0]);
  46. }
  47. if (node->vma[1].node) {
  48. nouveau_vm_unmap(&node->vma[1]);
  49. nouveau_vm_put(&node->vma[1]);
  50. }
  51. }
  52. static void
  53. nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
  54. struct ttm_mem_reg *mem)
  55. {
  56. struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
  57. struct drm_device *dev = dev_priv->dev;
  58. nouveau_mem_node_cleanup(mem->mm_node);
  59. nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
  60. }
  61. static int
  62. nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
  63. struct ttm_buffer_object *bo,
  64. struct ttm_placement *placement,
  65. struct ttm_mem_reg *mem)
  66. {
  67. struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
  68. struct drm_device *dev = dev_priv->dev;
  69. struct nouveau_bo *nvbo = nouveau_bo(bo);
  70. struct nouveau_mem *node;
  71. u32 size_nc = 0;
  72. int ret;
  73. if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
  74. size_nc = 1 << nvbo->page_shift;
  75. ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
  76. mem->page_alignment << PAGE_SHIFT, size_nc,
  77. (nvbo->tile_flags >> 8) & 0x3ff, &node);
  78. if (ret) {
  79. mem->mm_node = NULL;
  80. return (ret == -ENOSPC) ? 0 : ret;
  81. }
  82. node->page_shift = nvbo->page_shift;
  83. mem->mm_node = node;
  84. mem->start = node->offset >> PAGE_SHIFT;
  85. return 0;
  86. }
  87. void
  88. nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  89. {
  90. struct nouveau_mm *mm = man->priv;
  91. struct nouveau_mm_node *r;
  92. u32 total = 0, free = 0;
  93. mutex_lock(&mm->mutex);
  94. list_for_each_entry(r, &mm->nodes, nl_entry) {
  95. printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
  96. prefix, r->type, ((u64)r->offset << 12),
  97. (((u64)r->offset + r->length) << 12));
  98. total += r->length;
  99. if (!r->type)
  100. free += r->length;
  101. }
  102. mutex_unlock(&mm->mutex);
  103. printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
  104. prefix, (u64)total << 12, (u64)free << 12);
  105. printk(KERN_DEBUG "%s block: 0x%08x\n",
  106. prefix, mm->block_size << 12);
  107. }
  108. const struct ttm_mem_type_manager_func nouveau_vram_manager = {
  109. nouveau_vram_manager_init,
  110. nouveau_vram_manager_fini,
  111. nouveau_vram_manager_new,
  112. nouveau_vram_manager_del,
  113. nouveau_vram_manager_debug
  114. };
  115. static int
  116. nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  117. {
  118. return 0;
  119. }
  120. static int
  121. nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
  122. {
  123. return 0;
  124. }
  125. static void
  126. nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
  127. struct ttm_mem_reg *mem)
  128. {
  129. nouveau_mem_node_cleanup(mem->mm_node);
  130. kfree(mem->mm_node);
  131. mem->mm_node = NULL;
  132. }
  133. static int
  134. nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
  135. struct ttm_buffer_object *bo,
  136. struct ttm_placement *placement,
  137. struct ttm_mem_reg *mem)
  138. {
  139. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  140. struct nouveau_mem *node;
  141. if (unlikely((mem->num_pages << PAGE_SHIFT) >=
  142. dev_priv->gart_info.aper_size))
  143. return -ENOMEM;
  144. node = kzalloc(sizeof(*node), GFP_KERNEL);
  145. if (!node)
  146. return -ENOMEM;
  147. node->page_shift = 12;
  148. mem->mm_node = node;
  149. mem->start = 0;
  150. return 0;
  151. }
  152. void
  153. nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  154. {
  155. }
  156. const struct ttm_mem_type_manager_func nouveau_gart_manager = {
  157. nouveau_gart_manager_init,
  158. nouveau_gart_manager_fini,
  159. nouveau_gart_manager_new,
  160. nouveau_gart_manager_del,
  161. nouveau_gart_manager_debug
  162. };
  163. static int
  164. nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  165. {
  166. struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
  167. struct drm_device *dev = dev_priv->dev;
  168. man->priv = nv04vm_ref(dev);
  169. return (man->priv != NULL) ? 0 : -ENODEV;
  170. }
  171. static int
  172. nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
  173. {
  174. struct nouveau_vm *vm = man->priv;
  175. nouveau_vm_ref(NULL, &vm, NULL);
  176. man->priv = NULL;
  177. return 0;
  178. }
  179. static void
  180. nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
  181. {
  182. struct nouveau_mem *node = mem->mm_node;
  183. if (node->vma[0].node)
  184. nouveau_vm_put(&node->vma[0]);
  185. kfree(mem->mm_node);
  186. mem->mm_node = NULL;
  187. }
  188. static int
  189. nv04_gart_manager_new(struct ttm_mem_type_manager *man,
  190. struct ttm_buffer_object *bo,
  191. struct ttm_placement *placement,
  192. struct ttm_mem_reg *mem)
  193. {
  194. struct nouveau_mem *node;
  195. int ret;
  196. node = kzalloc(sizeof(*node), GFP_KERNEL);
  197. if (!node)
  198. return -ENOMEM;
  199. node->page_shift = 12;
  200. ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
  201. NV_MEM_ACCESS_RW, &node->vma[0]);
  202. if (ret) {
  203. kfree(node);
  204. return ret;
  205. }
  206. mem->mm_node = node;
  207. mem->start = node->vma[0].offset >> PAGE_SHIFT;
  208. return 0;
  209. }
  210. void
  211. nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  212. {
  213. }
  214. const struct ttm_mem_type_manager_func nv04_gart_manager = {
  215. nv04_gart_manager_init,
  216. nv04_gart_manager_fini,
  217. nv04_gart_manager_new,
  218. nv04_gart_manager_del,
  219. nv04_gart_manager_debug
  220. };
  221. int
  222. nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
  223. {
  224. struct drm_file *file_priv = filp->private_data;
  225. struct drm_nouveau_private *dev_priv =
  226. file_priv->minor->dev->dev_private;
  227. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  228. return drm_mmap(filp, vma);
  229. return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
  230. }
  231. static int
  232. nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
  233. {
  234. return ttm_mem_global_init(ref->object);
  235. }
  236. static void
  237. nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
  238. {
  239. ttm_mem_global_release(ref->object);
  240. }
  241. int
  242. nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
  243. {
  244. struct drm_global_reference *global_ref;
  245. int ret;
  246. global_ref = &dev_priv->ttm.mem_global_ref;
  247. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  248. global_ref->size = sizeof(struct ttm_mem_global);
  249. global_ref->init = &nouveau_ttm_mem_global_init;
  250. global_ref->release = &nouveau_ttm_mem_global_release;
  251. ret = drm_global_item_ref(global_ref);
  252. if (unlikely(ret != 0)) {
  253. DRM_ERROR("Failed setting up TTM memory accounting\n");
  254. dev_priv->ttm.mem_global_ref.release = NULL;
  255. return ret;
  256. }
  257. dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
  258. global_ref = &dev_priv->ttm.bo_global_ref.ref;
  259. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  260. global_ref->size = sizeof(struct ttm_bo_global);
  261. global_ref->init = &ttm_bo_global_init;
  262. global_ref->release = &ttm_bo_global_release;
  263. ret = drm_global_item_ref(global_ref);
  264. if (unlikely(ret != 0)) {
  265. DRM_ERROR("Failed setting up TTM BO subsystem\n");
  266. drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
  267. dev_priv->ttm.mem_global_ref.release = NULL;
  268. return ret;
  269. }
  270. return 0;
  271. }
  272. void
  273. nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
  274. {
  275. if (dev_priv->ttm.mem_global_ref.release == NULL)
  276. return;
  277. drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
  278. drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
  279. dev_priv->ttm.mem_global_ref.release = NULL;
  280. }