nvc0_instmem.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. struct nvc0_gpuobj_node {
  27. struct nouveau_bo *vram;
  28. struct drm_mm_node *ramin;
  29. u32 align;
  30. };
  31. int
  32. nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
  33. {
  34. struct drm_device *dev = gpuobj->dev;
  35. struct nvc0_gpuobj_node *node = NULL;
  36. int ret;
  37. node = kzalloc(sizeof(*node), GFP_KERNEL);
  38. if (!node)
  39. return -ENOMEM;
  40. node->align = align;
  41. ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
  42. 0, 0x0000, true, false, &node->vram);
  43. if (ret) {
  44. NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
  45. return ret;
  46. }
  47. ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
  48. if (ret) {
  49. NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
  50. nouveau_bo_ref(NULL, &node->vram);
  51. return ret;
  52. }
  53. gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
  54. gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT;
  55. gpuobj->node = node;
  56. return 0;
  57. }
  58. void
  59. nvc0_instmem_put(struct nouveau_gpuobj *gpuobj)
  60. {
  61. struct nvc0_gpuobj_node *node;
  62. node = gpuobj->node;
  63. gpuobj->node = NULL;
  64. nouveau_bo_unpin(node->vram);
  65. nouveau_bo_ref(NULL, &node->vram);
  66. kfree(node);
  67. }
  68. int
  69. nvc0_instmem_map(struct nouveau_gpuobj *gpuobj)
  70. {
  71. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  72. struct nvc0_gpuobj_node *node = gpuobj->node;
  73. struct drm_device *dev = gpuobj->dev;
  74. struct drm_mm_node *ramin = NULL;
  75. u32 pte, pte_end;
  76. u64 vram;
  77. do {
  78. if (drm_mm_pre_get(&dev_priv->ramin_heap))
  79. return -ENOMEM;
  80. spin_lock(&dev_priv->ramin_lock);
  81. ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
  82. node->align, 0);
  83. if (ramin == NULL) {
  84. spin_unlock(&dev_priv->ramin_lock);
  85. return -ENOMEM;
  86. }
  87. ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
  88. spin_unlock(&dev_priv->ramin_lock);
  89. } while (ramin == NULL);
  90. pte = (ramin->start >> 12) << 1;
  91. pte_end = ((ramin->size >> 12) << 1) + pte;
  92. vram = gpuobj->vinst;
  93. NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
  94. ramin->start, pte, pte_end);
  95. NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
  96. while (pte < pte_end) {
  97. nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
  98. nv_wr32(dev, 0x702004 + (pte * 8), 0);
  99. vram += 4096;
  100. pte++;
  101. }
  102. dev_priv->engine.instmem.flush(dev);
  103. if (1) {
  104. u32 chan = nv_rd32(dev, 0x1700) << 16;
  105. nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
  106. nv_wr32(dev, 0x100cbc, 0x80000005);
  107. }
  108. node->ramin = ramin;
  109. gpuobj->pinst = ramin->start;
  110. return 0;
  111. }
  112. void
  113. nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj)
  114. {
  115. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  116. struct nvc0_gpuobj_node *node = gpuobj->node;
  117. u32 pte, pte_end;
  118. if (!node->ramin || !dev_priv->ramin_available)
  119. return;
  120. pte = (node->ramin->start >> 12) << 1;
  121. pte_end = ((node->ramin->size >> 12) << 1) + pte;
  122. while (pte < pte_end) {
  123. nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0);
  124. nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0);
  125. pte++;
  126. }
  127. dev_priv->engine.instmem.flush(gpuobj->dev);
  128. spin_lock(&dev_priv->ramin_lock);
  129. drm_mm_put_block(node->ramin);
  130. node->ramin = NULL;
  131. spin_unlock(&dev_priv->ramin_lock);
  132. }
  133. void
  134. nvc0_instmem_flush(struct drm_device *dev)
  135. {
  136. nv_wr32(dev, 0x070000, 1);
  137. if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
  138. NV_ERROR(dev, "PRAMIN flush timeout\n");
  139. }
  140. int
  141. nvc0_instmem_suspend(struct drm_device *dev)
  142. {
  143. struct drm_nouveau_private *dev_priv = dev->dev_private;
  144. u32 *buf;
  145. int i;
  146. dev_priv->susres.ramin_copy = vmalloc(65536);
  147. if (!dev_priv->susres.ramin_copy)
  148. return -ENOMEM;
  149. buf = dev_priv->susres.ramin_copy;
  150. for (i = 0; i < 65536; i += 4)
  151. buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
  152. return 0;
  153. }
  154. void
  155. nvc0_instmem_resume(struct drm_device *dev)
  156. {
  157. struct drm_nouveau_private *dev_priv = dev->dev_private;
  158. u32 *buf = dev_priv->susres.ramin_copy;
  159. u64 chan;
  160. int i;
  161. chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
  162. nv_wr32(dev, 0x001700, chan >> 16);
  163. for (i = 0; i < 65536; i += 4)
  164. nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
  165. vfree(dev_priv->susres.ramin_copy);
  166. dev_priv->susres.ramin_copy = NULL;
  167. nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
  168. }
  169. int
  170. nvc0_instmem_init(struct drm_device *dev)
  171. {
  172. struct drm_nouveau_private *dev_priv = dev->dev_private;
  173. u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
  174. int ret, i;
  175. dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
  176. chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
  177. imem = 4096 + 4096 + 32768;
  178. nv_wr32(dev, 0x001700, chan >> 16);
  179. /* channel setup */
  180. nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
  181. nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
  182. nv_wr32(dev, 0x700208, lower_32_bits(lim3));
  183. nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
  184. /* point pgd -> pgt */
  185. nv_wr32(dev, 0x701000, 0);
  186. nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
  187. /* point pgt -> physical vram for channel */
  188. pgt3 = 0x2000;
  189. for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
  190. nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
  191. nv_wr32(dev, 0x700004 + pgt3, 0);
  192. }
  193. /* clear rest of pgt */
  194. for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
  195. nv_wr32(dev, 0x700000 + pgt3, 0);
  196. nv_wr32(dev, 0x700004 + pgt3, 0);
  197. }
  198. /* point bar3 at the channel */
  199. nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
  200. /* Global PRAMIN heap */
  201. ret = drm_mm_init(&dev_priv->ramin_heap, imem,
  202. dev_priv->ramin_size - imem);
  203. if (ret) {
  204. NV_ERROR(dev, "Failed to init RAMIN heap\n");
  205. return -ENOMEM;
  206. }
  207. return 0;
  208. }
  209. void
  210. nvc0_instmem_takedown(struct drm_device *dev)
  211. {
  212. }