ttm_bo_vm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include <ttm/ttm_module.h>
  31. #include <ttm/ttm_bo_driver.h>
  32. #include <ttm/ttm_placement.h>
  33. #include <linux/mm.h>
  34. #include <linux/rbtree.h>
  35. #include <linux/module.h>
  36. #include <linux/uaccess.h>
  37. #define TTM_BO_VM_NUM_PREFAULT 16
  38. static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
  39. unsigned long page_start,
  40. unsigned long num_pages)
  41. {
  42. struct rb_node *cur = bdev->addr_space_rb.rb_node;
  43. unsigned long cur_offset;
  44. struct ttm_buffer_object *bo;
  45. struct ttm_buffer_object *best_bo = NULL;
  46. while (likely(cur != NULL)) {
  47. bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
  48. cur_offset = bo->vm_node->start;
  49. if (page_start >= cur_offset) {
  50. cur = cur->rb_right;
  51. best_bo = bo;
  52. if (page_start == cur_offset)
  53. break;
  54. } else
  55. cur = cur->rb_left;
  56. }
  57. if (unlikely(best_bo == NULL))
  58. return NULL;
  59. if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
  60. (page_start + num_pages)))
  61. return NULL;
  62. return best_bo;
  63. }
  64. static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  65. {
  66. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  67. vma->vm_private_data;
  68. struct ttm_bo_device *bdev = bo->bdev;
  69. unsigned long page_offset;
  70. unsigned long page_last;
  71. unsigned long pfn;
  72. struct ttm_tt *ttm = NULL;
  73. struct page *page;
  74. int ret;
  75. int i;
  76. unsigned long address = (unsigned long)vmf->virtual_address;
  77. int retval = VM_FAULT_NOPAGE;
  78. struct ttm_mem_type_manager *man =
  79. &bdev->man[bo->mem.mem_type];
  80. /*
  81. * Work around locking order reversal in fault / nopfn
  82. * between mmap_sem and bo_reserve: Perform a trylock operation
  83. * for reserve, and if it fails, retry the fault after scheduling.
  84. */
  85. ret = ttm_bo_reserve(bo, true, true, false, 0);
  86. if (unlikely(ret != 0)) {
  87. if (ret == -EBUSY)
  88. set_need_resched();
  89. return VM_FAULT_NOPAGE;
  90. }
  91. if (bdev->driver->fault_reserve_notify) {
  92. ret = bdev->driver->fault_reserve_notify(bo);
  93. switch (ret) {
  94. case 0:
  95. break;
  96. case -EBUSY:
  97. set_need_resched();
  98. case -ERESTARTSYS:
  99. retval = VM_FAULT_NOPAGE;
  100. goto out_unlock;
  101. default:
  102. retval = VM_FAULT_SIGBUS;
  103. goto out_unlock;
  104. }
  105. }
  106. /*
  107. * Wait for buffer data in transit, due to a pipelined
  108. * move.
  109. */
  110. spin_lock(&bdev->fence_lock);
  111. if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
  112. ret = ttm_bo_wait(bo, false, true, false);
  113. spin_unlock(&bdev->fence_lock);
  114. if (unlikely(ret != 0)) {
  115. retval = (ret != -ERESTARTSYS) ?
  116. VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
  117. goto out_unlock;
  118. }
  119. } else
  120. spin_unlock(&bdev->fence_lock);
  121. ret = ttm_mem_io_lock(man, true);
  122. if (unlikely(ret != 0)) {
  123. retval = VM_FAULT_NOPAGE;
  124. goto out_unlock;
  125. }
  126. ret = ttm_mem_io_reserve_vm(bo);
  127. if (unlikely(ret != 0)) {
  128. retval = VM_FAULT_SIGBUS;
  129. goto out_io_unlock;
  130. }
  131. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  132. bo->vm_node->start - vma->vm_pgoff;
  133. page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
  134. bo->vm_node->start - vma->vm_pgoff;
  135. if (unlikely(page_offset >= bo->num_pages)) {
  136. retval = VM_FAULT_SIGBUS;
  137. goto out_io_unlock;
  138. }
  139. /*
  140. * Strictly, we're not allowed to modify vma->vm_page_prot here,
  141. * since the mmap_sem is only held in read mode. However, we
  142. * modify only the caching bits of vma->vm_page_prot and
  143. * consider those bits protected by
  144. * the bo->mutex, as we should be the only writers.
  145. * There shouldn't really be any readers of these bits except
  146. * within vm_insert_mixed()? fork?
  147. *
  148. * TODO: Add a list of vmas to the bo, and change the
  149. * vma->vm_page_prot when the object changes caching policy, with
  150. * the correct locks held.
  151. */
  152. if (bo->mem.bus.is_iomem) {
  153. vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
  154. vma->vm_page_prot);
  155. } else {
  156. ttm = bo->ttm;
  157. vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
  158. vm_get_page_prot(vma->vm_flags) :
  159. ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
  160. /* Allocate all page at once, most common usage */
  161. if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
  162. retval = VM_FAULT_OOM;
  163. goto out_io_unlock;
  164. }
  165. }
  166. /*
  167. * Speculatively prefault a number of pages. Only error on
  168. * first page.
  169. */
  170. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  171. if (bo->mem.bus.is_iomem)
  172. pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
  173. else {
  174. page = ttm->pages[page_offset];
  175. if (unlikely(!page && i == 0)) {
  176. retval = VM_FAULT_OOM;
  177. goto out_io_unlock;
  178. } else if (unlikely(!page)) {
  179. break;
  180. }
  181. pfn = page_to_pfn(page);
  182. }
  183. ret = vm_insert_mixed(vma, address, pfn);
  184. /*
  185. * Somebody beat us to this PTE or prefaulting to
  186. * an already populated PTE, or prefaulting error.
  187. */
  188. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  189. break;
  190. else if (unlikely(ret != 0)) {
  191. retval =
  192. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  193. goto out_io_unlock;
  194. }
  195. address += PAGE_SIZE;
  196. if (unlikely(++page_offset >= page_last))
  197. break;
  198. }
  199. out_io_unlock:
  200. ttm_mem_io_unlock(man);
  201. out_unlock:
  202. ttm_bo_unreserve(bo);
  203. return retval;
  204. }
  205. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  206. {
  207. struct ttm_buffer_object *bo =
  208. (struct ttm_buffer_object *)vma->vm_private_data;
  209. (void)ttm_bo_reference(bo);
  210. }
  211. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  212. {
  213. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
  214. ttm_bo_unref(&bo);
  215. vma->vm_private_data = NULL;
  216. }
  217. static const struct vm_operations_struct ttm_bo_vm_ops = {
  218. .fault = ttm_bo_vm_fault,
  219. .open = ttm_bo_vm_open,
  220. .close = ttm_bo_vm_close
  221. };
  222. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  223. struct ttm_bo_device *bdev)
  224. {
  225. struct ttm_bo_driver *driver;
  226. struct ttm_buffer_object *bo;
  227. int ret;
  228. read_lock(&bdev->vm_lock);
  229. bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
  230. (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  231. if (likely(bo != NULL))
  232. ttm_bo_reference(bo);
  233. read_unlock(&bdev->vm_lock);
  234. if (unlikely(bo == NULL)) {
  235. printk(KERN_ERR TTM_PFX
  236. "Could not find buffer object to map.\n");
  237. return -EINVAL;
  238. }
  239. driver = bo->bdev->driver;
  240. if (unlikely(!driver->verify_access)) {
  241. ret = -EPERM;
  242. goto out_unref;
  243. }
  244. ret = driver->verify_access(bo, filp);
  245. if (unlikely(ret != 0))
  246. goto out_unref;
  247. vma->vm_ops = &ttm_bo_vm_ops;
  248. /*
  249. * Note: We're transferring the bo reference to
  250. * vma->vm_private_data here.
  251. */
  252. vma->vm_private_data = bo;
  253. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  254. return 0;
  255. out_unref:
  256. ttm_bo_unref(&bo);
  257. return ret;
  258. }
  259. EXPORT_SYMBOL(ttm_bo_mmap);
  260. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  261. {
  262. if (vma->vm_pgoff != 0)
  263. return -EACCES;
  264. vma->vm_ops = &ttm_bo_vm_ops;
  265. vma->vm_private_data = ttm_bo_reference(bo);
  266. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  267. return 0;
  268. }
  269. EXPORT_SYMBOL(ttm_fbdev_mmap);
  270. ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
  271. const char __user *wbuf, char __user *rbuf, size_t count,
  272. loff_t *f_pos, bool write)
  273. {
  274. struct ttm_buffer_object *bo;
  275. struct ttm_bo_driver *driver;
  276. struct ttm_bo_kmap_obj map;
  277. unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
  278. unsigned long kmap_offset;
  279. unsigned long kmap_end;
  280. unsigned long kmap_num;
  281. size_t io_size;
  282. unsigned int page_offset;
  283. char *virtual;
  284. int ret;
  285. bool no_wait = false;
  286. bool dummy;
  287. read_lock(&bdev->vm_lock);
  288. bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
  289. if (likely(bo != NULL))
  290. ttm_bo_reference(bo);
  291. read_unlock(&bdev->vm_lock);
  292. if (unlikely(bo == NULL))
  293. return -EFAULT;
  294. driver = bo->bdev->driver;
  295. if (unlikely(!driver->verify_access)) {
  296. ret = -EPERM;
  297. goto out_unref;
  298. }
  299. ret = driver->verify_access(bo, filp);
  300. if (unlikely(ret != 0))
  301. goto out_unref;
  302. kmap_offset = dev_offset - bo->vm_node->start;
  303. if (unlikely(kmap_offset >= bo->num_pages)) {
  304. ret = -EFBIG;
  305. goto out_unref;
  306. }
  307. page_offset = *f_pos & ~PAGE_MASK;
  308. io_size = bo->num_pages - kmap_offset;
  309. io_size = (io_size << PAGE_SHIFT) - page_offset;
  310. if (count < io_size)
  311. io_size = count;
  312. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  313. kmap_num = kmap_end - kmap_offset + 1;
  314. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  315. switch (ret) {
  316. case 0:
  317. break;
  318. case -EBUSY:
  319. ret = -EAGAIN;
  320. goto out_unref;
  321. default:
  322. goto out_unref;
  323. }
  324. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  325. if (unlikely(ret != 0)) {
  326. ttm_bo_unreserve(bo);
  327. goto out_unref;
  328. }
  329. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  330. virtual += page_offset;
  331. if (write)
  332. ret = copy_from_user(virtual, wbuf, io_size);
  333. else
  334. ret = copy_to_user(rbuf, virtual, io_size);
  335. ttm_bo_kunmap(&map);
  336. ttm_bo_unreserve(bo);
  337. ttm_bo_unref(&bo);
  338. if (unlikely(ret != 0))
  339. return -EFBIG;
  340. *f_pos += io_size;
  341. return io_size;
  342. out_unref:
  343. ttm_bo_unref(&bo);
  344. return ret;
  345. }
  346. ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
  347. char __user *rbuf, size_t count, loff_t *f_pos,
  348. bool write)
  349. {
  350. struct ttm_bo_kmap_obj map;
  351. unsigned long kmap_offset;
  352. unsigned long kmap_end;
  353. unsigned long kmap_num;
  354. size_t io_size;
  355. unsigned int page_offset;
  356. char *virtual;
  357. int ret;
  358. bool no_wait = false;
  359. bool dummy;
  360. kmap_offset = (*f_pos >> PAGE_SHIFT);
  361. if (unlikely(kmap_offset >= bo->num_pages))
  362. return -EFBIG;
  363. page_offset = *f_pos & ~PAGE_MASK;
  364. io_size = bo->num_pages - kmap_offset;
  365. io_size = (io_size << PAGE_SHIFT) - page_offset;
  366. if (count < io_size)
  367. io_size = count;
  368. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  369. kmap_num = kmap_end - kmap_offset + 1;
  370. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  371. switch (ret) {
  372. case 0:
  373. break;
  374. case -EBUSY:
  375. return -EAGAIN;
  376. default:
  377. return ret;
  378. }
  379. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  380. if (unlikely(ret != 0)) {
  381. ttm_bo_unreserve(bo);
  382. return ret;
  383. }
  384. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  385. virtual += page_offset;
  386. if (write)
  387. ret = copy_from_user(virtual, wbuf, io_size);
  388. else
  389. ret = copy_to_user(rbuf, virtual, io_size);
  390. ttm_bo_kunmap(&map);
  391. ttm_bo_unreserve(bo);
  392. ttm_bo_unref(&bo);
  393. if (unlikely(ret != 0))
  394. return ret;
  395. *f_pos += io_size;
  396. return io_size;
  397. }