nouveau_vm.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_mm.h"
  27. #include "nouveau_vm.h"
  28. void
  29. nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
  30. {
  31. struct nouveau_vm *vm = vma->vm;
  32. struct nouveau_mm_node *r;
  33. u32 offset = vma->node->offset + (delta >> 12);
  34. u32 bits = vma->node->type - 12;
  35. u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
  36. u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
  37. u32 max = 1 << (vm->pgt_bits - bits);
  38. u32 end, len;
  39. list_for_each_entry(r, &vram->regions, rl_entry) {
  40. u64 phys = (u64)r->offset << 12;
  41. u32 num = r->length >> bits;
  42. while (num) {
  43. struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
  44. end = (pte + num);
  45. if (unlikely(end >= max))
  46. end = max;
  47. len = end - pte;
  48. vm->map(vma, pgt, vram, pte, len, phys);
  49. num -= len;
  50. pte += len;
  51. if (unlikely(end >= max)) {
  52. pde++;
  53. pte = 0;
  54. }
  55. }
  56. }
  57. vm->flush(vm);
  58. }
  59. void
  60. nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
  61. {
  62. nouveau_vm_map_at(vma, 0, vram);
  63. }
  64. void
  65. nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
  66. dma_addr_t *list)
  67. {
  68. struct nouveau_vm *vm = vma->vm;
  69. u32 offset = vma->node->offset + (delta >> 12);
  70. u32 bits = vma->node->type - 12;
  71. u32 num = length >> vma->node->type;
  72. u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
  73. u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
  74. u32 max = 1 << (vm->pgt_bits - bits);
  75. u32 end, len;
  76. while (num) {
  77. struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
  78. end = (pte + num);
  79. if (unlikely(end >= max))
  80. end = max;
  81. len = end - pte;
  82. vm->map_sg(vma, pgt, pte, list, len);
  83. num -= len;
  84. pte += len;
  85. list += len;
  86. if (unlikely(end >= max)) {
  87. pde++;
  88. pte = 0;
  89. }
  90. }
  91. vm->flush(vm);
  92. }
  93. void
  94. nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
  95. {
  96. struct nouveau_vm *vm = vma->vm;
  97. u32 offset = vma->node->offset + (delta >> 12);
  98. u32 bits = vma->node->type - 12;
  99. u32 num = length >> vma->node->type;
  100. u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
  101. u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
  102. u32 max = 1 << (vm->pgt_bits - bits);
  103. u32 end, len;
  104. while (num) {
  105. struct nouveau_gpuobj *pgt = vm->pgt[pde].obj;
  106. end = (pte + num);
  107. if (unlikely(end >= max))
  108. end = max;
  109. len = end - pte;
  110. vm->unmap(pgt, pte, len);
  111. num -= len;
  112. pte += len;
  113. if (unlikely(end >= max)) {
  114. pde++;
  115. pte = 0;
  116. }
  117. }
  118. vm->flush(vm);
  119. }
  120. void
  121. nouveau_vm_unmap(struct nouveau_vma *vma)
  122. {
  123. nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
  124. }
  125. static void
  126. nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde)
  127. {
  128. struct nouveau_vm_pgd *vpgd;
  129. struct nouveau_vm_pgt *vpgt;
  130. struct nouveau_gpuobj *pgt;
  131. u32 pde;
  132. for (pde = fpde; pde <= lpde; pde++) {
  133. vpgt = &vm->pgt[pde - vm->fpde];
  134. if (--vpgt->refcount)
  135. continue;
  136. list_for_each_entry(vpgd, &vm->pgd_list, head) {
  137. vm->unmap_pgt(vpgd->obj, pde);
  138. }
  139. pgt = vpgt->obj;
  140. vpgt->obj = NULL;
  141. mutex_unlock(&vm->mm->mutex);
  142. nouveau_gpuobj_ref(NULL, &pgt);
  143. mutex_lock(&vm->mm->mutex);
  144. }
  145. }
  146. static int
  147. nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
  148. {
  149. struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
  150. struct nouveau_vm_pgd *vpgd;
  151. struct nouveau_gpuobj *pgt;
  152. u32 pgt_size;
  153. int ret;
  154. pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
  155. pgt_size *= 8;
  156. mutex_unlock(&vm->mm->mutex);
  157. ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
  158. NVOBJ_FLAG_ZERO_ALLOC, &pgt);
  159. mutex_lock(&vm->mm->mutex);
  160. if (unlikely(ret))
  161. return ret;
  162. /* someone beat us to filling the PDE while we didn't have the lock */
  163. if (unlikely(vpgt->refcount++)) {
  164. mutex_unlock(&vm->mm->mutex);
  165. nouveau_gpuobj_ref(NULL, &pgt);
  166. mutex_lock(&vm->mm->mutex);
  167. return 0;
  168. }
  169. list_for_each_entry(vpgd, &vm->pgd_list, head) {
  170. vm->map_pgt(vpgd->obj, type, pde, pgt);
  171. }
  172. vpgt->page_shift = type;
  173. vpgt->obj = pgt;
  174. return 0;
  175. }
  176. int
  177. nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
  178. u32 access, struct nouveau_vma *vma)
  179. {
  180. u32 align = (1 << page_shift) >> 12;
  181. u32 msize = size >> 12;
  182. u32 fpde, lpde, pde;
  183. int ret;
  184. mutex_lock(&vm->mm->mutex);
  185. ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
  186. if (unlikely(ret != 0)) {
  187. mutex_unlock(&vm->mm->mutex);
  188. return ret;
  189. }
  190. fpde = (vma->node->offset >> vm->pgt_bits);
  191. lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
  192. for (pde = fpde; pde <= lpde; pde++) {
  193. struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
  194. if (likely(vpgt->refcount)) {
  195. vpgt->refcount++;
  196. continue;
  197. }
  198. ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
  199. if (ret) {
  200. if (pde != fpde)
  201. nouveau_vm_unmap_pgt(vm, fpde, pde - 1);
  202. nouveau_mm_put(vm->mm, vma->node);
  203. mutex_unlock(&vm->mm->mutex);
  204. vma->node = NULL;
  205. return ret;
  206. }
  207. }
  208. mutex_unlock(&vm->mm->mutex);
  209. vma->vm = vm;
  210. vma->offset = (u64)vma->node->offset << 12;
  211. vma->access = access;
  212. return 0;
  213. }
  214. void
  215. nouveau_vm_put(struct nouveau_vma *vma)
  216. {
  217. struct nouveau_vm *vm = vma->vm;
  218. u32 fpde, lpde;
  219. if (unlikely(vma->node == NULL))
  220. return;
  221. fpde = (vma->node->offset >> vm->pgt_bits);
  222. lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
  223. mutex_lock(&vm->mm->mutex);
  224. nouveau_mm_put(vm->mm, vma->node);
  225. vma->node = NULL;
  226. nouveau_vm_unmap_pgt(vm, fpde, lpde);
  227. mutex_unlock(&vm->mm->mutex);
  228. }
  229. int
  230. nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
  231. u8 pgt_bits, u8 spg_shift, u8 lpg_shift,
  232. struct nouveau_vm **pvm)
  233. {
  234. struct drm_nouveau_private *dev_priv = dev->dev_private;
  235. struct nouveau_vm *vm;
  236. u64 mm_length = (offset + length) - mm_offset;
  237. u32 block;
  238. int ret;
  239. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  240. if (!vm)
  241. return -ENOMEM;
  242. if (dev_priv->card_type == NV_50) {
  243. vm->map_pgt = nv50_vm_map_pgt;
  244. vm->unmap_pgt = nv50_vm_unmap_pgt;
  245. vm->map = nv50_vm_map;
  246. vm->map_sg = nv50_vm_map_sg;
  247. vm->unmap = nv50_vm_unmap;
  248. vm->flush = nv50_vm_flush;
  249. } else {
  250. kfree(vm);
  251. return -ENOSYS;
  252. }
  253. vm->fpde = offset >> pgt_bits;
  254. vm->lpde = (offset + length - 1) >> pgt_bits;
  255. vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
  256. if (!vm->pgt) {
  257. kfree(vm);
  258. return -ENOMEM;
  259. }
  260. INIT_LIST_HEAD(&vm->pgd_list);
  261. vm->dev = dev;
  262. vm->refcount = 1;
  263. vm->pgt_bits = pgt_bits - 12;
  264. vm->spg_shift = spg_shift;
  265. vm->lpg_shift = lpg_shift;
  266. block = (1 << pgt_bits);
  267. if (length < block)
  268. block = length;
  269. ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
  270. block >> 12);
  271. if (ret) {
  272. kfree(vm);
  273. return ret;
  274. }
  275. *pvm = vm;
  276. return 0;
  277. }
  278. static int
  279. nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
  280. {
  281. struct nouveau_vm_pgd *vpgd;
  282. int i;
  283. if (!pgd)
  284. return 0;
  285. vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
  286. if (!vpgd)
  287. return -ENOMEM;
  288. nouveau_gpuobj_ref(pgd, &vpgd->obj);
  289. mutex_lock(&vm->mm->mutex);
  290. for (i = vm->fpde; i <= vm->lpde; i++) {
  291. struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde];
  292. if (!vpgt->obj) {
  293. vm->unmap_pgt(pgd, i);
  294. continue;
  295. }
  296. vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj);
  297. }
  298. list_add(&vpgd->head, &vm->pgd_list);
  299. mutex_unlock(&vm->mm->mutex);
  300. return 0;
  301. }
  302. static void
  303. nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
  304. {
  305. struct nouveau_vm_pgd *vpgd, *tmp;
  306. if (!pgd)
  307. return;
  308. mutex_lock(&vm->mm->mutex);
  309. list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
  310. if (vpgd->obj != pgd)
  311. continue;
  312. list_del(&vpgd->head);
  313. nouveau_gpuobj_ref(NULL, &vpgd->obj);
  314. kfree(vpgd);
  315. }
  316. mutex_unlock(&vm->mm->mutex);
  317. }
  318. static void
  319. nouveau_vm_del(struct nouveau_vm *vm)
  320. {
  321. struct nouveau_vm_pgd *vpgd, *tmp;
  322. list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
  323. nouveau_vm_unlink(vm, vpgd->obj);
  324. }
  325. WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
  326. kfree(vm->pgt);
  327. kfree(vm);
  328. }
  329. int
  330. nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
  331. struct nouveau_gpuobj *pgd)
  332. {
  333. struct nouveau_vm *vm;
  334. int ret;
  335. vm = ref;
  336. if (vm) {
  337. ret = nouveau_vm_link(vm, pgd);
  338. if (ret)
  339. return ret;
  340. vm->refcount++;
  341. }
  342. vm = *ptr;
  343. *ptr = ref;
  344. if (vm) {
  345. nouveau_vm_unlink(vm, pgd);
  346. if (--vm->refcount == 0)
  347. nouveau_vm_del(vm);
  348. }
  349. return 0;
  350. }