nouveau_sgdma.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. #include "drmP.h"
  2. #include "nouveau_drv.h"
  3. #include <linux/pagemap.h>
  4. #include <linux/slab.h>
  5. #define NV_CTXDMA_PAGE_SHIFT 12
  6. #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
  7. #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
  8. struct nouveau_sgdma_be {
  9. struct ttm_backend backend;
  10. struct drm_device *dev;
  11. dma_addr_t *pages;
  12. unsigned nr_pages;
  13. unsigned pte_start;
  14. bool bound;
  15. };
  16. static int
  17. nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
  18. struct page **pages, struct page *dummy_read_page,
  19. dma_addr_t *dma_addrs)
  20. {
  21. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
  22. struct drm_device *dev = nvbe->dev;
  23. NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
  24. if (nvbe->pages)
  25. return -EINVAL;
  26. nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
  27. if (!nvbe->pages)
  28. return -ENOMEM;
  29. nvbe->nr_pages = 0;
  30. while (num_pages--) {
  31. nvbe->pages[nvbe->nr_pages] =
  32. pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
  33. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  34. if (pci_dma_mapping_error(dev->pdev,
  35. nvbe->pages[nvbe->nr_pages])) {
  36. be->func->clear(be);
  37. return -EFAULT;
  38. }
  39. nvbe->nr_pages++;
  40. }
  41. return 0;
  42. }
  43. static void
  44. nouveau_sgdma_clear(struct ttm_backend *be)
  45. {
  46. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
  47. struct drm_device *dev;
  48. if (nvbe && nvbe->pages) {
  49. dev = nvbe->dev;
  50. NV_DEBUG(dev, "\n");
  51. if (nvbe->bound)
  52. be->func->unbind(be);
  53. while (nvbe->nr_pages--) {
  54. pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
  55. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  56. }
  57. kfree(nvbe->pages);
  58. nvbe->pages = NULL;
  59. nvbe->nr_pages = 0;
  60. }
  61. }
  62. static inline unsigned
  63. nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
  64. {
  65. struct drm_nouveau_private *dev_priv = dev->dev_private;
  66. unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
  67. if (dev_priv->card_type < NV_50)
  68. return pte + 2;
  69. return pte << 1;
  70. }
  71. static int
  72. nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
  73. {
  74. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
  75. struct drm_device *dev = nvbe->dev;
  76. struct drm_nouveau_private *dev_priv = dev->dev_private;
  77. struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
  78. unsigned i, j, pte;
  79. NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
  80. pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
  81. nvbe->pte_start = pte;
  82. for (i = 0; i < nvbe->nr_pages; i++) {
  83. dma_addr_t dma_offset = nvbe->pages[i];
  84. uint32_t offset_l = lower_32_bits(dma_offset);
  85. uint32_t offset_h = upper_32_bits(dma_offset);
  86. for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
  87. if (dev_priv->card_type < NV_50) {
  88. nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
  89. pte += 1;
  90. } else {
  91. nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
  92. nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
  93. pte += 2;
  94. }
  95. dma_offset += NV_CTXDMA_PAGE_SIZE;
  96. }
  97. }
  98. dev_priv->engine.instmem.flush(nvbe->dev);
  99. if (dev_priv->card_type == NV_50) {
  100. nv50_vm_flush(dev, 5); /* PGRAPH */
  101. nv50_vm_flush(dev, 0); /* PFIFO */
  102. }
  103. nvbe->bound = true;
  104. return 0;
  105. }
  106. static int
  107. nouveau_sgdma_unbind(struct ttm_backend *be)
  108. {
  109. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
  110. struct drm_device *dev = nvbe->dev;
  111. struct drm_nouveau_private *dev_priv = dev->dev_private;
  112. struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
  113. unsigned i, j, pte;
  114. NV_DEBUG(dev, "\n");
  115. if (!nvbe->bound)
  116. return 0;
  117. pte = nvbe->pte_start;
  118. for (i = 0; i < nvbe->nr_pages; i++) {
  119. dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
  120. for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
  121. if (dev_priv->card_type < NV_50) {
  122. nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
  123. pte += 1;
  124. } else {
  125. nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
  126. nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
  127. pte += 2;
  128. }
  129. dma_offset += NV_CTXDMA_PAGE_SIZE;
  130. }
  131. }
  132. dev_priv->engine.instmem.flush(nvbe->dev);
  133. if (dev_priv->card_type == NV_50) {
  134. nv50_vm_flush(dev, 5);
  135. nv50_vm_flush(dev, 0);
  136. }
  137. nvbe->bound = false;
  138. return 0;
  139. }
  140. static void
  141. nouveau_sgdma_destroy(struct ttm_backend *be)
  142. {
  143. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
  144. if (be) {
  145. NV_DEBUG(nvbe->dev, "\n");
  146. if (nvbe) {
  147. if (nvbe->pages)
  148. be->func->clear(be);
  149. kfree(nvbe);
  150. }
  151. }
  152. }
  153. static struct ttm_backend_func nouveau_sgdma_backend = {
  154. .populate = nouveau_sgdma_populate,
  155. .clear = nouveau_sgdma_clear,
  156. .bind = nouveau_sgdma_bind,
  157. .unbind = nouveau_sgdma_unbind,
  158. .destroy = nouveau_sgdma_destroy
  159. };
  160. struct ttm_backend *
  161. nouveau_sgdma_init_ttm(struct drm_device *dev)
  162. {
  163. struct drm_nouveau_private *dev_priv = dev->dev_private;
  164. struct nouveau_sgdma_be *nvbe;
  165. if (!dev_priv->gart_info.sg_ctxdma)
  166. return NULL;
  167. nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
  168. if (!nvbe)
  169. return NULL;
  170. nvbe->dev = dev;
  171. nvbe->backend.func = &nouveau_sgdma_backend;
  172. return &nvbe->backend;
  173. }
  174. int
  175. nouveau_sgdma_init(struct drm_device *dev)
  176. {
  177. struct drm_nouveau_private *dev_priv = dev->dev_private;
  178. struct pci_dev *pdev = dev->pdev;
  179. struct nouveau_gpuobj *gpuobj = NULL;
  180. uint32_t aper_size, obj_size;
  181. int i, ret;
  182. if (dev_priv->card_type < NV_50) {
  183. aper_size = (64 * 1024 * 1024);
  184. obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
  185. obj_size += 8; /* ctxdma header */
  186. } else {
  187. /* 1 entire VM page table */
  188. aper_size = (512 * 1024 * 1024);
  189. obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
  190. }
  191. ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
  192. NVOBJ_FLAG_ZERO_ALLOC |
  193. NVOBJ_FLAG_ZERO_FREE, &gpuobj);
  194. if (ret) {
  195. NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
  196. return ret;
  197. }
  198. dev_priv->gart_info.sg_dummy_page =
  199. alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
  200. if (!dev_priv->gart_info.sg_dummy_page) {
  201. nouveau_gpuobj_ref(NULL, &gpuobj);
  202. return -ENOMEM;
  203. }
  204. set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
  205. dev_priv->gart_info.sg_dummy_bus =
  206. pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
  207. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  208. if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
  209. nouveau_gpuobj_ref(NULL, &gpuobj);
  210. return -EFAULT;
  211. }
  212. if (dev_priv->card_type < NV_50) {
  213. /* special case, allocated from global instmem heap so
  214. * cinst is invalid, we use it on all channels though so
  215. * cinst needs to be valid, set it the same as pinst
  216. */
  217. gpuobj->cinst = gpuobj->pinst;
  218. /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
  219. * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
  220. * on those cards? */
  221. nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
  222. (1 << 12) /* PT present */ |
  223. (0 << 13) /* PT *not* linear */ |
  224. (NV_DMA_ACCESS_RW << 14) |
  225. (NV_DMA_TARGET_PCI << 16));
  226. nv_wo32(gpuobj, 4, aper_size - 1);
  227. for (i = 2; i < 2 + (aper_size >> 12); i++) {
  228. nv_wo32(gpuobj, i * 4,
  229. dev_priv->gart_info.sg_dummy_bus | 3);
  230. }
  231. } else {
  232. for (i = 0; i < obj_size; i += 8) {
  233. nv_wo32(gpuobj, i + 0, 0x00000000);
  234. nv_wo32(gpuobj, i + 4, 0x00000000);
  235. }
  236. }
  237. dev_priv->engine.instmem.flush(dev);
  238. dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
  239. dev_priv->gart_info.aper_base = 0;
  240. dev_priv->gart_info.aper_size = aper_size;
  241. dev_priv->gart_info.sg_ctxdma = gpuobj;
  242. return 0;
  243. }
  244. void
  245. nouveau_sgdma_takedown(struct drm_device *dev)
  246. {
  247. struct drm_nouveau_private *dev_priv = dev->dev_private;
  248. if (dev_priv->gart_info.sg_dummy_page) {
  249. pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
  250. NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  251. unlock_page(dev_priv->gart_info.sg_dummy_page);
  252. __free_page(dev_priv->gart_info.sg_dummy_page);
  253. dev_priv->gart_info.sg_dummy_page = NULL;
  254. dev_priv->gart_info.sg_dummy_bus = 0;
  255. }
  256. nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
  257. }
  258. int
  259. nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
  260. {
  261. struct drm_nouveau_private *dev_priv = dev->dev_private;
  262. struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
  263. int pte;
  264. pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
  265. if (dev_priv->card_type < NV_50) {
  266. *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
  267. return 0;
  268. }
  269. NV_ERROR(dev, "Unimplemented on NV50\n");
  270. return -EINVAL;
  271. }