nouveau_vm.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_mm.h"
  27. #include "nouveau_vm.h"
  28. void
  29. nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
  30. {
  31. struct nouveau_vm *vm = vma->vm;
  32. struct nouveau_mm_node *r;
  33. int big = vma->node->type != vm->spg_shift;
  34. u32 offset = vma->node->offset + (delta >> 12);
  35. u32 bits = vma->node->type - 12;
  36. u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
  37. u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
  38. u32 max = 1 << (vm->pgt_bits - bits);
  39. u32 end, len;
  40. delta = 0;
  41. list_for_each_entry(r, &node->regions, rl_entry) {
  42. u64 phys = (u64)r->offset << 12;
  43. u32 num = r->length >> bits;
  44. while (num) {
  45. struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
  46. end = (pte + num);
  47. if (unlikely(end >= max))
  48. end = max;
  49. len = end - pte;
  50. vm->map(vma, pgt, node, pte, len, phys, delta);
  51. num -= len;
  52. pte += len;
  53. if (unlikely(end >= max)) {
  54. phys += len << (bits + 12);
  55. pde++;
  56. pte = 0;
  57. }
  58. delta += (u64)len << vma->node->type;
  59. }
  60. }
  61. vm->flush(vm);
  62. }
  63. void
  64. nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
  65. {
  66. nouveau_vm_map_at(vma, 0, node);
  67. }
  68. void
  69. nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
  70. struct nouveau_mem *mem)
  71. {
  72. struct nouveau_vm *vm = vma->vm;
  73. dma_addr_t *list = mem->pages;
  74. int big = vma->node->type != vm->spg_shift;
  75. u32 offset = vma->node->offset + (delta >> 12);
  76. u32 bits = vma->node->type - 12;
  77. u32 num = length >> vma->node->type;
  78. u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
  79. u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
  80. u32 max = 1 << (vm->pgt_bits - bits);
  81. u32 end, len;
  82. while (num) {
  83. struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
  84. end = (pte + num);
  85. if (unlikely(end >= max))
  86. end = max;
  87. len = end - pte;
  88. vm->map_sg(vma, pgt, mem, pte, len, list);
  89. num -= len;
  90. pte += len;
  91. list += len;
  92. if (unlikely(end >= max)) {
  93. pde++;
  94. pte = 0;
  95. }
  96. }
  97. vm->flush(vm);
  98. }
  99. void
  100. nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
  101. {
  102. struct nouveau_vm *vm = vma->vm;
  103. int big = vma->node->type != vm->spg_shift;
  104. u32 offset = vma->node->offset + (delta >> 12);
  105. u32 bits = vma->node->type - 12;
  106. u32 num = length >> vma->node->type;
  107. u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
  108. u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
  109. u32 max = 1 << (vm->pgt_bits - bits);
  110. u32 end, len;
  111. while (num) {
  112. struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
  113. end = (pte + num);
  114. if (unlikely(end >= max))
  115. end = max;
  116. len = end - pte;
  117. vm->unmap(pgt, pte, len);
  118. num -= len;
  119. pte += len;
  120. if (unlikely(end >= max)) {
  121. pde++;
  122. pte = 0;
  123. }
  124. }
  125. vm->flush(vm);
  126. }
  127. void
  128. nouveau_vm_unmap(struct nouveau_vma *vma)
  129. {
  130. nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
  131. }
  132. static void
  133. nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
  134. {
  135. struct nouveau_vm_pgd *vpgd;
  136. struct nouveau_vm_pgt *vpgt;
  137. struct nouveau_gpuobj *pgt;
  138. u32 pde;
  139. for (pde = fpde; pde <= lpde; pde++) {
  140. vpgt = &vm->pgt[pde - vm->fpde];
  141. if (--vpgt->refcount[big])
  142. continue;
  143. pgt = vpgt->obj[big];
  144. vpgt->obj[big] = NULL;
  145. list_for_each_entry(vpgd, &vm->pgd_list, head) {
  146. vm->map_pgt(vpgd->obj, pde, vpgt->obj);
  147. }
  148. mutex_unlock(&vm->mm.mutex);
  149. nouveau_gpuobj_ref(NULL, &pgt);
  150. mutex_lock(&vm->mm.mutex);
  151. }
  152. }
  153. static int
  154. nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
  155. {
  156. struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
  157. struct nouveau_vm_pgd *vpgd;
  158. struct nouveau_gpuobj *pgt;
  159. int big = (type != vm->spg_shift);
  160. u32 pgt_size;
  161. int ret;
  162. pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
  163. pgt_size *= 8;
  164. mutex_unlock(&vm->mm.mutex);
  165. ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
  166. NVOBJ_FLAG_ZERO_ALLOC, &pgt);
  167. mutex_lock(&vm->mm.mutex);
  168. if (unlikely(ret))
  169. return ret;
  170. /* someone beat us to filling the PDE while we didn't have the lock */
  171. if (unlikely(vpgt->refcount[big]++)) {
  172. mutex_unlock(&vm->mm.mutex);
  173. nouveau_gpuobj_ref(NULL, &pgt);
  174. mutex_lock(&vm->mm.mutex);
  175. return 0;
  176. }
  177. vpgt->obj[big] = pgt;
  178. list_for_each_entry(vpgd, &vm->pgd_list, head) {
  179. vm->map_pgt(vpgd->obj, pde, vpgt->obj);
  180. }
  181. return 0;
  182. }
  183. int
  184. nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
  185. u32 access, struct nouveau_vma *vma)
  186. {
  187. u32 align = (1 << page_shift) >> 12;
  188. u32 msize = size >> 12;
  189. u32 fpde, lpde, pde;
  190. int ret;
  191. mutex_lock(&vm->mm.mutex);
  192. ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
  193. if (unlikely(ret != 0)) {
  194. mutex_unlock(&vm->mm.mutex);
  195. return ret;
  196. }
  197. fpde = (vma->node->offset >> vm->pgt_bits);
  198. lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
  199. for (pde = fpde; pde <= lpde; pde++) {
  200. struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
  201. int big = (vma->node->type != vm->spg_shift);
  202. if (likely(vpgt->refcount[big])) {
  203. vpgt->refcount[big]++;
  204. continue;
  205. }
  206. ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
  207. if (ret) {
  208. if (pde != fpde)
  209. nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
  210. nouveau_mm_put(&vm->mm, vma->node);
  211. mutex_unlock(&vm->mm.mutex);
  212. vma->node = NULL;
  213. return ret;
  214. }
  215. }
  216. mutex_unlock(&vm->mm.mutex);
  217. vma->vm = vm;
  218. vma->offset = (u64)vma->node->offset << 12;
  219. vma->access = access;
  220. return 0;
  221. }
  222. void
  223. nouveau_vm_put(struct nouveau_vma *vma)
  224. {
  225. struct nouveau_vm *vm = vma->vm;
  226. u32 fpde, lpde;
  227. if (unlikely(vma->node == NULL))
  228. return;
  229. fpde = (vma->node->offset >> vm->pgt_bits);
  230. lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
  231. mutex_lock(&vm->mm.mutex);
  232. nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
  233. nouveau_mm_put(&vm->mm, vma->node);
  234. vma->node = NULL;
  235. mutex_unlock(&vm->mm.mutex);
  236. }
  237. int
  238. nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
  239. struct nouveau_vm **pvm)
  240. {
  241. struct drm_nouveau_private *dev_priv = dev->dev_private;
  242. struct nouveau_vm *vm;
  243. u64 mm_length = (offset + length) - mm_offset;
  244. u32 block, pgt_bits;
  245. int ret;
  246. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  247. if (!vm)
  248. return -ENOMEM;
  249. if (dev_priv->card_type == NV_50) {
  250. vm->map_pgt = nv50_vm_map_pgt;
  251. vm->map = nv50_vm_map;
  252. vm->map_sg = nv50_vm_map_sg;
  253. vm->unmap = nv50_vm_unmap;
  254. vm->flush = nv50_vm_flush;
  255. vm->spg_shift = 12;
  256. vm->lpg_shift = 16;
  257. pgt_bits = 29;
  258. block = (1 << pgt_bits);
  259. if (length < block)
  260. block = length;
  261. } else
  262. if (dev_priv->card_type >= NV_C0) {
  263. vm->map_pgt = nvc0_vm_map_pgt;
  264. vm->map = nvc0_vm_map;
  265. vm->map_sg = nvc0_vm_map_sg;
  266. vm->unmap = nvc0_vm_unmap;
  267. vm->flush = nvc0_vm_flush;
  268. vm->spg_shift = 12;
  269. vm->lpg_shift = 17;
  270. pgt_bits = 27;
  271. block = 4096;
  272. } else {
  273. kfree(vm);
  274. return -ENOSYS;
  275. }
  276. vm->fpde = offset >> pgt_bits;
  277. vm->lpde = (offset + length - 1) >> pgt_bits;
  278. vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
  279. if (!vm->pgt) {
  280. kfree(vm);
  281. return -ENOMEM;
  282. }
  283. INIT_LIST_HEAD(&vm->pgd_list);
  284. vm->dev = dev;
  285. vm->refcount = 1;
  286. vm->pgt_bits = pgt_bits - 12;
  287. ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
  288. block >> 12);
  289. if (ret) {
  290. kfree(vm);
  291. return ret;
  292. }
  293. *pvm = vm;
  294. return 0;
  295. }
  296. static int
  297. nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
  298. {
  299. struct nouveau_vm_pgd *vpgd;
  300. int i;
  301. if (!pgd)
  302. return 0;
  303. vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
  304. if (!vpgd)
  305. return -ENOMEM;
  306. nouveau_gpuobj_ref(pgd, &vpgd->obj);
  307. mutex_lock(&vm->mm.mutex);
  308. for (i = vm->fpde; i <= vm->lpde; i++)
  309. vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
  310. list_add(&vpgd->head, &vm->pgd_list);
  311. mutex_unlock(&vm->mm.mutex);
  312. return 0;
  313. }
  314. static void
  315. nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
  316. {
  317. struct nouveau_vm_pgd *vpgd, *tmp;
  318. struct nouveau_gpuobj *pgd = NULL;
  319. if (!mpgd)
  320. return;
  321. mutex_lock(&vm->mm.mutex);
  322. list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
  323. if (vpgd->obj == mpgd) {
  324. pgd = vpgd->obj;
  325. list_del(&vpgd->head);
  326. kfree(vpgd);
  327. break;
  328. }
  329. }
  330. mutex_unlock(&vm->mm.mutex);
  331. nouveau_gpuobj_ref(NULL, &pgd);
  332. }
  333. static void
  334. nouveau_vm_del(struct nouveau_vm *vm)
  335. {
  336. struct nouveau_vm_pgd *vpgd, *tmp;
  337. list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
  338. nouveau_vm_unlink(vm, vpgd->obj);
  339. }
  340. nouveau_mm_fini(&vm->mm);
  341. kfree(vm->pgt);
  342. kfree(vm);
  343. }
  344. int
  345. nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
  346. struct nouveau_gpuobj *pgd)
  347. {
  348. struct nouveau_vm *vm;
  349. int ret;
  350. vm = ref;
  351. if (vm) {
  352. ret = nouveau_vm_link(vm, pgd);
  353. if (ret)
  354. return ret;
  355. vm->refcount++;
  356. }
  357. vm = *ptr;
  358. *ptr = ref;
  359. if (vm) {
  360. nouveau_vm_unlink(vm, pgd);
  361. if (--vm->refcount == 0)
  362. nouveau_vm_del(vm);
  363. }
  364. return 0;
  365. }