ttm_bo_vm.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #define pr_fmt(fmt) "[TTM] " fmt
  31. #include <ttm/ttm_module.h>
  32. #include <ttm/ttm_bo_driver.h>
  33. #include <ttm/ttm_placement.h>
  34. #include <drm/drm_vma_manager.h>
  35. #include <linux/mm.h>
  36. #include <linux/rbtree.h>
  37. #include <linux/module.h>
  38. #include <linux/uaccess.h>
  39. #define TTM_BO_VM_NUM_PREFAULT 16
  40. static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
  41. struct vm_area_struct *vma,
  42. struct vm_fault *vmf)
  43. {
  44. struct ttm_bo_device *bdev = bo->bdev;
  45. int ret = 0;
  46. spin_lock(&bdev->fence_lock);
  47. if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
  48. goto out_unlock;
  49. /*
  50. * Quick non-stalling check for idle.
  51. */
  52. ret = ttm_bo_wait(bo, false, false, true);
  53. if (likely(ret == 0))
  54. goto out_unlock;
  55. /*
  56. * If possible, avoid waiting for GPU with mmap_sem
  57. * held.
  58. */
  59. if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
  60. ret = VM_FAULT_RETRY;
  61. if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
  62. goto out_unlock;
  63. up_read(&vma->vm_mm->mmap_sem);
  64. (void) ttm_bo_wait(bo, false, true, false);
  65. goto out_unlock;
  66. }
  67. /*
  68. * Ordinary wait.
  69. */
  70. ret = ttm_bo_wait(bo, false, true, false);
  71. if (unlikely(ret != 0))
  72. ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
  73. VM_FAULT_NOPAGE;
  74. out_unlock:
  75. spin_unlock(&bdev->fence_lock);
  76. return ret;
  77. }
  78. static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  79. {
  80. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  81. vma->vm_private_data;
  82. struct ttm_bo_device *bdev = bo->bdev;
  83. unsigned long page_offset;
  84. unsigned long page_last;
  85. unsigned long pfn;
  86. struct ttm_tt *ttm = NULL;
  87. struct page *page;
  88. int ret;
  89. int i;
  90. unsigned long address = (unsigned long)vmf->virtual_address;
  91. int retval = VM_FAULT_NOPAGE;
  92. struct ttm_mem_type_manager *man =
  93. &bdev->man[bo->mem.mem_type];
  94. /*
  95. * Work around locking order reversal in fault / nopfn
  96. * between mmap_sem and bo_reserve: Perform a trylock operation
  97. * for reserve, and if it fails, retry the fault after scheduling.
  98. */
  99. ret = ttm_bo_reserve(bo, true, true, false, 0);
  100. if (unlikely(ret != 0)) {
  101. if (ret == -EBUSY)
  102. set_need_resched();
  103. return VM_FAULT_NOPAGE;
  104. }
  105. if (bdev->driver->fault_reserve_notify) {
  106. ret = bdev->driver->fault_reserve_notify(bo);
  107. switch (ret) {
  108. case 0:
  109. break;
  110. case -EBUSY:
  111. set_need_resched();
  112. case -ERESTARTSYS:
  113. retval = VM_FAULT_NOPAGE;
  114. goto out_unlock;
  115. default:
  116. retval = VM_FAULT_SIGBUS;
  117. goto out_unlock;
  118. }
  119. }
  120. /*
  121. * Wait for buffer data in transit, due to a pipelined
  122. * move.
  123. */
  124. ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
  125. if (unlikely(ret != 0)) {
  126. retval = ret;
  127. goto out_unlock;
  128. }
  129. ret = ttm_mem_io_lock(man, true);
  130. if (unlikely(ret != 0)) {
  131. retval = VM_FAULT_NOPAGE;
  132. goto out_unlock;
  133. }
  134. ret = ttm_mem_io_reserve_vm(bo);
  135. if (unlikely(ret != 0)) {
  136. retval = VM_FAULT_SIGBUS;
  137. goto out_io_unlock;
  138. }
  139. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  140. drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
  141. page_last = vma_pages(vma) +
  142. drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
  143. if (unlikely(page_offset >= bo->num_pages)) {
  144. retval = VM_FAULT_SIGBUS;
  145. goto out_io_unlock;
  146. }
  147. /*
  148. * Strictly, we're not allowed to modify vma->vm_page_prot here,
  149. * since the mmap_sem is only held in read mode. However, we
  150. * modify only the caching bits of vma->vm_page_prot and
  151. * consider those bits protected by
  152. * the bo->mutex, as we should be the only writers.
  153. * There shouldn't really be any readers of these bits except
  154. * within vm_insert_mixed()? fork?
  155. *
  156. * TODO: Add a list of vmas to the bo, and change the
  157. * vma->vm_page_prot when the object changes caching policy, with
  158. * the correct locks held.
  159. */
  160. if (bo->mem.bus.is_iomem) {
  161. vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
  162. vma->vm_page_prot);
  163. } else {
  164. ttm = bo->ttm;
  165. vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
  166. vm_get_page_prot(vma->vm_flags) :
  167. ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
  168. /* Allocate all page at once, most common usage */
  169. if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
  170. retval = VM_FAULT_OOM;
  171. goto out_io_unlock;
  172. }
  173. }
  174. /*
  175. * Speculatively prefault a number of pages. Only error on
  176. * first page.
  177. */
  178. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  179. if (bo->mem.bus.is_iomem)
  180. pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
  181. else {
  182. page = ttm->pages[page_offset];
  183. if (unlikely(!page && i == 0)) {
  184. retval = VM_FAULT_OOM;
  185. goto out_io_unlock;
  186. } else if (unlikely(!page)) {
  187. break;
  188. }
  189. pfn = page_to_pfn(page);
  190. }
  191. ret = vm_insert_mixed(vma, address, pfn);
  192. /*
  193. * Somebody beat us to this PTE or prefaulting to
  194. * an already populated PTE, or prefaulting error.
  195. */
  196. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  197. break;
  198. else if (unlikely(ret != 0)) {
  199. retval =
  200. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  201. goto out_io_unlock;
  202. }
  203. address += PAGE_SIZE;
  204. if (unlikely(++page_offset >= page_last))
  205. break;
  206. }
  207. out_io_unlock:
  208. ttm_mem_io_unlock(man);
  209. out_unlock:
  210. ttm_bo_unreserve(bo);
  211. return retval;
  212. }
  213. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  214. {
  215. struct ttm_buffer_object *bo =
  216. (struct ttm_buffer_object *)vma->vm_private_data;
  217. (void)ttm_bo_reference(bo);
  218. }
  219. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  220. {
  221. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
  222. ttm_bo_unref(&bo);
  223. vma->vm_private_data = NULL;
  224. }
  225. static const struct vm_operations_struct ttm_bo_vm_ops = {
  226. .fault = ttm_bo_vm_fault,
  227. .open = ttm_bo_vm_open,
  228. .close = ttm_bo_vm_close
  229. };
  230. static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
  231. unsigned long offset,
  232. unsigned long pages)
  233. {
  234. struct drm_vma_offset_node *node;
  235. struct ttm_buffer_object *bo = NULL;
  236. drm_vma_offset_lock_lookup(&bdev->vma_manager);
  237. node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
  238. if (likely(node)) {
  239. bo = container_of(node, struct ttm_buffer_object, vma_node);
  240. if (!kref_get_unless_zero(&bo->kref))
  241. bo = NULL;
  242. }
  243. drm_vma_offset_unlock_lookup(&bdev->vma_manager);
  244. if (!bo)
  245. pr_err("Could not find buffer object to map\n");
  246. return bo;
  247. }
  248. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  249. struct ttm_bo_device *bdev)
  250. {
  251. struct ttm_bo_driver *driver;
  252. struct ttm_buffer_object *bo;
  253. int ret;
  254. bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
  255. if (unlikely(!bo))
  256. return -EINVAL;
  257. driver = bo->bdev->driver;
  258. if (unlikely(!driver->verify_access)) {
  259. ret = -EPERM;
  260. goto out_unref;
  261. }
  262. ret = driver->verify_access(bo, filp);
  263. if (unlikely(ret != 0))
  264. goto out_unref;
  265. vma->vm_ops = &ttm_bo_vm_ops;
  266. /*
  267. * Note: We're transferring the bo reference to
  268. * vma->vm_private_data here.
  269. */
  270. vma->vm_private_data = bo;
  271. vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
  272. return 0;
  273. out_unref:
  274. ttm_bo_unref(&bo);
  275. return ret;
  276. }
  277. EXPORT_SYMBOL(ttm_bo_mmap);
  278. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  279. {
  280. if (vma->vm_pgoff != 0)
  281. return -EACCES;
  282. vma->vm_ops = &ttm_bo_vm_ops;
  283. vma->vm_private_data = ttm_bo_reference(bo);
  284. vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  285. return 0;
  286. }
  287. EXPORT_SYMBOL(ttm_fbdev_mmap);