ttm_bo_vm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include <ttm/ttm_module.h>
  31. #include <ttm/ttm_bo_driver.h>
  32. #include <ttm/ttm_placement.h>
  33. #include <linux/mm.h>
  34. #include <linux/rbtree.h>
  35. #include <linux/module.h>
  36. #include <linux/uaccess.h>
  37. #define TTM_BO_VM_NUM_PREFAULT 16
  38. static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
  39. unsigned long page_start,
  40. unsigned long num_pages)
  41. {
  42. struct rb_node *cur = bdev->addr_space_rb.rb_node;
  43. unsigned long cur_offset;
  44. struct ttm_buffer_object *bo;
  45. struct ttm_buffer_object *best_bo = NULL;
  46. while (likely(cur != NULL)) {
  47. bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
  48. cur_offset = bo->vm_node->start;
  49. if (page_start >= cur_offset) {
  50. cur = cur->rb_right;
  51. best_bo = bo;
  52. if (page_start == cur_offset)
  53. break;
  54. } else
  55. cur = cur->rb_left;
  56. }
  57. if (unlikely(best_bo == NULL))
  58. return NULL;
  59. if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
  60. (page_start + num_pages)))
  61. return NULL;
  62. return best_bo;
  63. }
  64. static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  65. {
  66. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  67. vma->vm_private_data;
  68. struct ttm_bo_device *bdev = bo->bdev;
  69. unsigned long bus_base;
  70. unsigned long bus_offset;
  71. unsigned long bus_size;
  72. unsigned long page_offset;
  73. unsigned long page_last;
  74. unsigned long pfn;
  75. struct ttm_tt *ttm = NULL;
  76. struct page *page;
  77. int ret;
  78. int i;
  79. bool is_iomem;
  80. unsigned long address = (unsigned long)vmf->virtual_address;
  81. int retval = VM_FAULT_NOPAGE;
  82. /*
  83. * Work around locking order reversal in fault / nopfn
  84. * between mmap_sem and bo_reserve: Perform a trylock operation
  85. * for reserve, and if it fails, retry the fault after scheduling.
  86. */
  87. ret = ttm_bo_reserve(bo, true, true, false, 0);
  88. if (unlikely(ret != 0)) {
  89. if (ret == -EBUSY)
  90. set_need_resched();
  91. return VM_FAULT_NOPAGE;
  92. }
  93. /*
  94. * Wait for buffer data in transit, due to a pipelined
  95. * move.
  96. */
  97. spin_lock(&bo->lock);
  98. if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
  99. ret = ttm_bo_wait(bo, false, true, false);
  100. spin_unlock(&bo->lock);
  101. if (unlikely(ret != 0)) {
  102. retval = (ret != -ERESTART) ?
  103. VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
  104. goto out_unlock;
  105. }
  106. } else
  107. spin_unlock(&bo->lock);
  108. ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
  109. &bus_size);
  110. if (unlikely(ret != 0)) {
  111. retval = VM_FAULT_SIGBUS;
  112. goto out_unlock;
  113. }
  114. is_iomem = (bus_size != 0);
  115. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  116. bo->vm_node->start - vma->vm_pgoff;
  117. page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
  118. bo->vm_node->start - vma->vm_pgoff;
  119. if (unlikely(page_offset >= bo->num_pages)) {
  120. retval = VM_FAULT_SIGBUS;
  121. goto out_unlock;
  122. }
  123. /*
  124. * Strictly, we're not allowed to modify vma->vm_page_prot here,
  125. * since the mmap_sem is only held in read mode. However, we
  126. * modify only the caching bits of vma->vm_page_prot and
  127. * consider those bits protected by
  128. * the bo->mutex, as we should be the only writers.
  129. * There shouldn't really be any readers of these bits except
  130. * within vm_insert_mixed()? fork?
  131. *
  132. * TODO: Add a list of vmas to the bo, and change the
  133. * vma->vm_page_prot when the object changes caching policy, with
  134. * the correct locks held.
  135. */
  136. if (is_iomem) {
  137. vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
  138. vma->vm_page_prot);
  139. } else {
  140. ttm = bo->ttm;
  141. vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
  142. vm_get_page_prot(vma->vm_flags) :
  143. ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
  144. }
  145. /*
  146. * Speculatively prefault a number of pages. Only error on
  147. * first page.
  148. */
  149. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  150. if (is_iomem)
  151. pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
  152. page_offset;
  153. else {
  154. page = ttm_tt_get_page(ttm, page_offset);
  155. if (unlikely(!page && i == 0)) {
  156. retval = VM_FAULT_OOM;
  157. goto out_unlock;
  158. } else if (unlikely(!page)) {
  159. break;
  160. }
  161. pfn = page_to_pfn(page);
  162. }
  163. ret = vm_insert_mixed(vma, address, pfn);
  164. /*
  165. * Somebody beat us to this PTE or prefaulting to
  166. * an already populated PTE, or prefaulting error.
  167. */
  168. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  169. break;
  170. else if (unlikely(ret != 0)) {
  171. retval =
  172. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  173. goto out_unlock;
  174. }
  175. address += PAGE_SIZE;
  176. if (unlikely(++page_offset >= page_last))
  177. break;
  178. }
  179. out_unlock:
  180. ttm_bo_unreserve(bo);
  181. return retval;
  182. }
  183. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  184. {
  185. struct ttm_buffer_object *bo =
  186. (struct ttm_buffer_object *)vma->vm_private_data;
  187. (void)ttm_bo_reference(bo);
  188. }
  189. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  190. {
  191. struct ttm_buffer_object *bo =
  192. (struct ttm_buffer_object *)vma->vm_private_data;
  193. ttm_bo_unref(&bo);
  194. vma->vm_private_data = NULL;
  195. }
  196. static struct vm_operations_struct ttm_bo_vm_ops = {
  197. .fault = ttm_bo_vm_fault,
  198. .open = ttm_bo_vm_open,
  199. .close = ttm_bo_vm_close
  200. };
  201. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  202. struct ttm_bo_device *bdev)
  203. {
  204. struct ttm_bo_driver *driver;
  205. struct ttm_buffer_object *bo;
  206. int ret;
  207. read_lock(&bdev->vm_lock);
  208. bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
  209. (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  210. if (likely(bo != NULL))
  211. ttm_bo_reference(bo);
  212. read_unlock(&bdev->vm_lock);
  213. if (unlikely(bo == NULL)) {
  214. printk(KERN_ERR TTM_PFX
  215. "Could not find buffer object to map.\n");
  216. return -EINVAL;
  217. }
  218. driver = bo->bdev->driver;
  219. if (unlikely(!driver->verify_access)) {
  220. ret = -EPERM;
  221. goto out_unref;
  222. }
  223. ret = driver->verify_access(bo, filp);
  224. if (unlikely(ret != 0))
  225. goto out_unref;
  226. vma->vm_ops = &ttm_bo_vm_ops;
  227. /*
  228. * Note: We're transferring the bo reference to
  229. * vma->vm_private_data here.
  230. */
  231. vma->vm_private_data = bo;
  232. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  233. return 0;
  234. out_unref:
  235. ttm_bo_unref(&bo);
  236. return ret;
  237. }
  238. EXPORT_SYMBOL(ttm_bo_mmap);
  239. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  240. {
  241. if (vma->vm_pgoff != 0)
  242. return -EACCES;
  243. vma->vm_ops = &ttm_bo_vm_ops;
  244. vma->vm_private_data = ttm_bo_reference(bo);
  245. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  246. return 0;
  247. }
  248. EXPORT_SYMBOL(ttm_fbdev_mmap);
  249. ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
  250. const char __user *wbuf, char __user *rbuf, size_t count,
  251. loff_t *f_pos, bool write)
  252. {
  253. struct ttm_buffer_object *bo;
  254. struct ttm_bo_driver *driver;
  255. struct ttm_bo_kmap_obj map;
  256. unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
  257. unsigned long kmap_offset;
  258. unsigned long kmap_end;
  259. unsigned long kmap_num;
  260. size_t io_size;
  261. unsigned int page_offset;
  262. char *virtual;
  263. int ret;
  264. bool no_wait = false;
  265. bool dummy;
  266. read_lock(&bdev->vm_lock);
  267. bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
  268. if (likely(bo != NULL))
  269. ttm_bo_reference(bo);
  270. read_unlock(&bdev->vm_lock);
  271. if (unlikely(bo == NULL))
  272. return -EFAULT;
  273. driver = bo->bdev->driver;
  274. if (unlikely(driver->verify_access)) {
  275. ret = -EPERM;
  276. goto out_unref;
  277. }
  278. ret = driver->verify_access(bo, filp);
  279. if (unlikely(ret != 0))
  280. goto out_unref;
  281. kmap_offset = dev_offset - bo->vm_node->start;
  282. if (unlikely(kmap_offset >= bo->num_pages)) {
  283. ret = -EFBIG;
  284. goto out_unref;
  285. }
  286. page_offset = *f_pos & ~PAGE_MASK;
  287. io_size = bo->num_pages - kmap_offset;
  288. io_size = (io_size << PAGE_SHIFT) - page_offset;
  289. if (count < io_size)
  290. io_size = count;
  291. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  292. kmap_num = kmap_end - kmap_offset + 1;
  293. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  294. switch (ret) {
  295. case 0:
  296. break;
  297. case -ERESTART:
  298. ret = -EINTR;
  299. goto out_unref;
  300. case -EBUSY:
  301. ret = -EAGAIN;
  302. goto out_unref;
  303. default:
  304. goto out_unref;
  305. }
  306. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  307. if (unlikely(ret != 0)) {
  308. ttm_bo_unreserve(bo);
  309. goto out_unref;
  310. }
  311. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  312. virtual += page_offset;
  313. if (write)
  314. ret = copy_from_user(virtual, wbuf, io_size);
  315. else
  316. ret = copy_to_user(rbuf, virtual, io_size);
  317. ttm_bo_kunmap(&map);
  318. ttm_bo_unreserve(bo);
  319. ttm_bo_unref(&bo);
  320. if (unlikely(ret != 0))
  321. return -EFBIG;
  322. *f_pos += io_size;
  323. return io_size;
  324. out_unref:
  325. ttm_bo_unref(&bo);
  326. return ret;
  327. }
  328. ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
  329. char __user *rbuf, size_t count, loff_t *f_pos,
  330. bool write)
  331. {
  332. struct ttm_bo_kmap_obj map;
  333. unsigned long kmap_offset;
  334. unsigned long kmap_end;
  335. unsigned long kmap_num;
  336. size_t io_size;
  337. unsigned int page_offset;
  338. char *virtual;
  339. int ret;
  340. bool no_wait = false;
  341. bool dummy;
  342. kmap_offset = (*f_pos >> PAGE_SHIFT);
  343. if (unlikely(kmap_offset >= bo->num_pages))
  344. return -EFBIG;
  345. page_offset = *f_pos & ~PAGE_MASK;
  346. io_size = bo->num_pages - kmap_offset;
  347. io_size = (io_size << PAGE_SHIFT) - page_offset;
  348. if (count < io_size)
  349. io_size = count;
  350. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  351. kmap_num = kmap_end - kmap_offset + 1;
  352. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  353. switch (ret) {
  354. case 0:
  355. break;
  356. case -ERESTART:
  357. return -EINTR;
  358. case -EBUSY:
  359. return -EAGAIN;
  360. default:
  361. return ret;
  362. }
  363. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  364. if (unlikely(ret != 0)) {
  365. ttm_bo_unreserve(bo);
  366. return ret;
  367. }
  368. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  369. virtual += page_offset;
  370. if (write)
  371. ret = copy_from_user(virtual, wbuf, io_size);
  372. else
  373. ret = copy_to_user(rbuf, virtual, io_size);
  374. ttm_bo_kunmap(&map);
  375. ttm_bo_unreserve(bo);
  376. ttm_bo_unref(&bo);
  377. if (unlikely(ret != 0))
  378. return ret;
  379. *f_pos += io_size;
  380. return io_size;
  381. }