ttm_bo_vm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include <ttm/ttm_module.h>
  31. #include <ttm/ttm_bo_driver.h>
  32. #include <ttm/ttm_placement.h>
  33. #include <linux/mm.h>
  34. #include <linux/rbtree.h>
  35. #include <linux/module.h>
  36. #include <linux/uaccess.h>
  37. #define TTM_BO_VM_NUM_PREFAULT 16
  38. static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
  39. unsigned long page_start,
  40. unsigned long num_pages)
  41. {
  42. struct rb_node *cur = bdev->addr_space_rb.rb_node;
  43. unsigned long cur_offset;
  44. struct ttm_buffer_object *bo;
  45. struct ttm_buffer_object *best_bo = NULL;
  46. while (likely(cur != NULL)) {
  47. bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
  48. cur_offset = bo->vm_node->start;
  49. if (page_start >= cur_offset) {
  50. cur = cur->rb_right;
  51. best_bo = bo;
  52. if (page_start == cur_offset)
  53. break;
  54. } else
  55. cur = cur->rb_left;
  56. }
  57. if (unlikely(best_bo == NULL))
  58. return NULL;
  59. if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
  60. (page_start + num_pages)))
  61. return NULL;
  62. return best_bo;
  63. }
  64. static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  65. {
  66. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  67. vma->vm_private_data;
  68. struct ttm_bo_device *bdev = bo->bdev;
  69. unsigned long page_offset;
  70. unsigned long page_last;
  71. unsigned long pfn;
  72. struct ttm_tt *ttm = NULL;
  73. struct page *page;
  74. int ret;
  75. int i;
  76. unsigned long address = (unsigned long)vmf->virtual_address;
  77. int retval = VM_FAULT_NOPAGE;
  78. struct ttm_mem_type_manager *man =
  79. &bdev->man[bo->mem.mem_type];
  80. /*
  81. * Work around locking order reversal in fault / nopfn
  82. * between mmap_sem and bo_reserve: Perform a trylock operation
  83. * for reserve, and if it fails, retry the fault after scheduling.
  84. */
  85. ret = ttm_bo_reserve(bo, true, true, false, 0);
  86. if (unlikely(ret != 0)) {
  87. if (ret == -EBUSY)
  88. set_need_resched();
  89. return VM_FAULT_NOPAGE;
  90. }
  91. if (bdev->driver->fault_reserve_notify) {
  92. ret = bdev->driver->fault_reserve_notify(bo);
  93. switch (ret) {
  94. case 0:
  95. break;
  96. case -EBUSY:
  97. set_need_resched();
  98. case -ERESTARTSYS:
  99. retval = VM_FAULT_NOPAGE;
  100. goto out_unlock;
  101. default:
  102. retval = VM_FAULT_SIGBUS;
  103. goto out_unlock;
  104. }
  105. }
  106. /*
  107. * Wait for buffer data in transit, due to a pipelined
  108. * move.
  109. */
  110. spin_lock(&bdev->fence_lock);
  111. if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
  112. ret = ttm_bo_wait(bo, false, true, false);
  113. spin_unlock(&bdev->fence_lock);
  114. if (unlikely(ret != 0)) {
  115. retval = (ret != -ERESTARTSYS) ?
  116. VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
  117. goto out_unlock;
  118. }
  119. } else
  120. spin_unlock(&bdev->fence_lock);
  121. ret = ttm_mem_io_lock(man, true);
  122. if (unlikely(ret != 0)) {
  123. retval = VM_FAULT_NOPAGE;
  124. goto out_unlock;
  125. }
  126. ret = ttm_mem_io_reserve_vm(bo);
  127. if (unlikely(ret != 0)) {
  128. retval = VM_FAULT_SIGBUS;
  129. goto out_io_unlock;
  130. }
  131. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  132. bo->vm_node->start - vma->vm_pgoff;
  133. page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
  134. bo->vm_node->start - vma->vm_pgoff;
  135. if (unlikely(page_offset >= bo->num_pages)) {
  136. retval = VM_FAULT_SIGBUS;
  137. goto out_io_unlock;
  138. }
  139. /*
  140. * Strictly, we're not allowed to modify vma->vm_page_prot here,
  141. * since the mmap_sem is only held in read mode. However, we
  142. * modify only the caching bits of vma->vm_page_prot and
  143. * consider those bits protected by
  144. * the bo->mutex, as we should be the only writers.
  145. * There shouldn't really be any readers of these bits except
  146. * within vm_insert_mixed()? fork?
  147. *
  148. * TODO: Add a list of vmas to the bo, and change the
  149. * vma->vm_page_prot when the object changes caching policy, with
  150. * the correct locks held.
  151. */
  152. if (bo->mem.bus.is_iomem) {
  153. vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
  154. vma->vm_page_prot);
  155. } else {
  156. ttm = bo->ttm;
  157. vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
  158. vm_get_page_prot(vma->vm_flags) :
  159. ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
  160. }
  161. /*
  162. * Speculatively prefault a number of pages. Only error on
  163. * first page.
  164. */
  165. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  166. if (bo->mem.bus.is_iomem)
  167. pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
  168. else {
  169. page = ttm_tt_get_page(ttm, page_offset);
  170. if (unlikely(!page && i == 0)) {
  171. retval = VM_FAULT_OOM;
  172. goto out_io_unlock;
  173. } else if (unlikely(!page)) {
  174. break;
  175. }
  176. pfn = page_to_pfn(page);
  177. }
  178. ret = vm_insert_mixed(vma, address, pfn);
  179. /*
  180. * Somebody beat us to this PTE or prefaulting to
  181. * an already populated PTE, or prefaulting error.
  182. */
  183. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  184. break;
  185. else if (unlikely(ret != 0)) {
  186. retval =
  187. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  188. goto out_io_unlock;
  189. }
  190. address += PAGE_SIZE;
  191. if (unlikely(++page_offset >= page_last))
  192. break;
  193. }
  194. out_io_unlock:
  195. ttm_mem_io_unlock(man);
  196. out_unlock:
  197. ttm_bo_unreserve(bo);
  198. return retval;
  199. }
  200. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  201. {
  202. struct ttm_buffer_object *bo =
  203. (struct ttm_buffer_object *)vma->vm_private_data;
  204. (void)ttm_bo_reference(bo);
  205. }
  206. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  207. {
  208. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
  209. ttm_bo_unref(&bo);
  210. vma->vm_private_data = NULL;
  211. }
  212. static const struct vm_operations_struct ttm_bo_vm_ops = {
  213. .fault = ttm_bo_vm_fault,
  214. .open = ttm_bo_vm_open,
  215. .close = ttm_bo_vm_close
  216. };
  217. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  218. struct ttm_bo_device *bdev)
  219. {
  220. struct ttm_bo_driver *driver;
  221. struct ttm_buffer_object *bo;
  222. int ret;
  223. read_lock(&bdev->vm_lock);
  224. bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
  225. (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  226. if (likely(bo != NULL))
  227. ttm_bo_reference(bo);
  228. read_unlock(&bdev->vm_lock);
  229. if (unlikely(bo == NULL)) {
  230. printk(KERN_ERR TTM_PFX
  231. "Could not find buffer object to map.\n");
  232. return -EINVAL;
  233. }
  234. driver = bo->bdev->driver;
  235. if (unlikely(!driver->verify_access)) {
  236. ret = -EPERM;
  237. goto out_unref;
  238. }
  239. ret = driver->verify_access(bo, filp);
  240. if (unlikely(ret != 0))
  241. goto out_unref;
  242. vma->vm_ops = &ttm_bo_vm_ops;
  243. /*
  244. * Note: We're transferring the bo reference to
  245. * vma->vm_private_data here.
  246. */
  247. vma->vm_private_data = bo;
  248. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  249. return 0;
  250. out_unref:
  251. ttm_bo_unref(&bo);
  252. return ret;
  253. }
  254. EXPORT_SYMBOL(ttm_bo_mmap);
  255. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  256. {
  257. if (vma->vm_pgoff != 0)
  258. return -EACCES;
  259. vma->vm_ops = &ttm_bo_vm_ops;
  260. vma->vm_private_data = ttm_bo_reference(bo);
  261. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  262. return 0;
  263. }
  264. EXPORT_SYMBOL(ttm_fbdev_mmap);
  265. ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
  266. const char __user *wbuf, char __user *rbuf, size_t count,
  267. loff_t *f_pos, bool write)
  268. {
  269. struct ttm_buffer_object *bo;
  270. struct ttm_bo_driver *driver;
  271. struct ttm_bo_kmap_obj map;
  272. unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
  273. unsigned long kmap_offset;
  274. unsigned long kmap_end;
  275. unsigned long kmap_num;
  276. size_t io_size;
  277. unsigned int page_offset;
  278. char *virtual;
  279. int ret;
  280. bool no_wait = false;
  281. bool dummy;
  282. read_lock(&bdev->vm_lock);
  283. bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
  284. if (likely(bo != NULL))
  285. ttm_bo_reference(bo);
  286. read_unlock(&bdev->vm_lock);
  287. if (unlikely(bo == NULL))
  288. return -EFAULT;
  289. driver = bo->bdev->driver;
  290. if (unlikely(!driver->verify_access)) {
  291. ret = -EPERM;
  292. goto out_unref;
  293. }
  294. ret = driver->verify_access(bo, filp);
  295. if (unlikely(ret != 0))
  296. goto out_unref;
  297. kmap_offset = dev_offset - bo->vm_node->start;
  298. if (unlikely(kmap_offset >= bo->num_pages)) {
  299. ret = -EFBIG;
  300. goto out_unref;
  301. }
  302. page_offset = *f_pos & ~PAGE_MASK;
  303. io_size = bo->num_pages - kmap_offset;
  304. io_size = (io_size << PAGE_SHIFT) - page_offset;
  305. if (count < io_size)
  306. io_size = count;
  307. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  308. kmap_num = kmap_end - kmap_offset + 1;
  309. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  310. switch (ret) {
  311. case 0:
  312. break;
  313. case -EBUSY:
  314. ret = -EAGAIN;
  315. goto out_unref;
  316. default:
  317. goto out_unref;
  318. }
  319. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  320. if (unlikely(ret != 0)) {
  321. ttm_bo_unreserve(bo);
  322. goto out_unref;
  323. }
  324. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  325. virtual += page_offset;
  326. if (write)
  327. ret = copy_from_user(virtual, wbuf, io_size);
  328. else
  329. ret = copy_to_user(rbuf, virtual, io_size);
  330. ttm_bo_kunmap(&map);
  331. ttm_bo_unreserve(bo);
  332. ttm_bo_unref(&bo);
  333. if (unlikely(ret != 0))
  334. return -EFBIG;
  335. *f_pos += io_size;
  336. return io_size;
  337. out_unref:
  338. ttm_bo_unref(&bo);
  339. return ret;
  340. }
  341. ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
  342. char __user *rbuf, size_t count, loff_t *f_pos,
  343. bool write)
  344. {
  345. struct ttm_bo_kmap_obj map;
  346. unsigned long kmap_offset;
  347. unsigned long kmap_end;
  348. unsigned long kmap_num;
  349. size_t io_size;
  350. unsigned int page_offset;
  351. char *virtual;
  352. int ret;
  353. bool no_wait = false;
  354. bool dummy;
  355. kmap_offset = (*f_pos >> PAGE_SHIFT);
  356. if (unlikely(kmap_offset >= bo->num_pages))
  357. return -EFBIG;
  358. page_offset = *f_pos & ~PAGE_MASK;
  359. io_size = bo->num_pages - kmap_offset;
  360. io_size = (io_size << PAGE_SHIFT) - page_offset;
  361. if (count < io_size)
  362. io_size = count;
  363. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  364. kmap_num = kmap_end - kmap_offset + 1;
  365. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  366. switch (ret) {
  367. case 0:
  368. break;
  369. case -EBUSY:
  370. return -EAGAIN;
  371. default:
  372. return ret;
  373. }
  374. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  375. if (unlikely(ret != 0)) {
  376. ttm_bo_unreserve(bo);
  377. return ret;
  378. }
  379. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  380. virtual += page_offset;
  381. if (write)
  382. ret = copy_from_user(virtual, wbuf, io_size);
  383. else
  384. ret = copy_to_user(rbuf, virtual, io_size);
  385. ttm_bo_kunmap(&map);
  386. ttm_bo_unreserve(bo);
  387. ttm_bo_unref(&bo);
  388. if (unlikely(ret != 0))
  389. return ret;
  390. *f_pos += io_size;
  391. return io_size;
  392. }