nouveau_sgdma.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. #include <linux/pagemap.h>
  2. #include <linux/slab.h>
  3. #include <subdev/fb.h>
  4. #include "nouveau_drm.h"
  5. #include "nouveau_ttm.h"
  6. struct nouveau_sgdma_be {
  7. /* this has to be the first field so populate/unpopulated in
  8. * nouve_bo.c works properly, otherwise have to move them here
  9. */
  10. struct ttm_dma_tt ttm;
  11. struct drm_device *dev;
  12. struct nouveau_mem *node;
  13. };
  14. static void
  15. nouveau_sgdma_destroy(struct ttm_tt *ttm)
  16. {
  17. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  18. if (ttm) {
  19. ttm_dma_tt_fini(&nvbe->ttm);
  20. kfree(nvbe);
  21. }
  22. }
  23. static int
  24. nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  25. {
  26. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  27. struct nouveau_mem *node = mem->mm_node;
  28. u64 size = mem->num_pages << 12;
  29. if (ttm->sg) {
  30. node->sg = ttm->sg;
  31. nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
  32. } else {
  33. node->pages = nvbe->ttm.dma_address;
  34. nouveau_vm_map_sg(&node->vma[0], 0, size, node);
  35. }
  36. nvbe->node = node;
  37. return 0;
  38. }
  39. static int
  40. nv04_sgdma_unbind(struct ttm_tt *ttm)
  41. {
  42. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  43. nouveau_vm_unmap(&nvbe->node->vma[0]);
  44. return 0;
  45. }
  46. static struct ttm_backend_func nv04_sgdma_backend = {
  47. .bind = nv04_sgdma_bind,
  48. .unbind = nv04_sgdma_unbind,
  49. .destroy = nouveau_sgdma_destroy
  50. };
  51. static int
  52. nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  53. {
  54. struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  55. struct nouveau_mem *node = mem->mm_node;
  56. /* noop: bound in move_notify() */
  57. if (ttm->sg) {
  58. node->sg = ttm->sg;
  59. } else
  60. node->pages = nvbe->ttm.dma_address;
  61. return 0;
  62. }
  63. static int
  64. nv50_sgdma_unbind(struct ttm_tt *ttm)
  65. {
  66. /* noop: unbound in move_notify() */
  67. return 0;
  68. }
  69. static struct ttm_backend_func nv50_sgdma_backend = {
  70. .bind = nv50_sgdma_bind,
  71. .unbind = nv50_sgdma_unbind,
  72. .destroy = nouveau_sgdma_destroy
  73. };
  74. struct ttm_tt *
  75. nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
  76. unsigned long size, uint32_t page_flags,
  77. struct page *dummy_read_page)
  78. {
  79. struct nouveau_drm *drm = nouveau_bdev(bdev);
  80. struct nouveau_sgdma_be *nvbe;
  81. nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
  82. if (!nvbe)
  83. return NULL;
  84. nvbe->dev = drm->dev;
  85. if (nv_device(drm->device)->card_type < NV_50)
  86. nvbe->ttm.ttm.func = &nv04_sgdma_backend;
  87. else
  88. nvbe->ttm.ttm.func = &nv50_sgdma_backend;
  89. if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
  90. kfree(nvbe);
  91. return NULL;
  92. }
  93. return &nvbe->ttm.ttm;
  94. }