ttm_bo_vm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include <ttm/ttm_module.h>
  31. #include <ttm/ttm_bo_driver.h>
  32. #include <ttm/ttm_placement.h>
  33. #include <linux/mm.h>
  34. #include <linux/rbtree.h>
  35. #include <linux/module.h>
  36. #include <linux/uaccess.h>
  37. #define TTM_BO_VM_NUM_PREFAULT 16
  38. static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
  39. unsigned long page_start,
  40. unsigned long num_pages)
  41. {
  42. struct rb_node *cur = bdev->addr_space_rb.rb_node;
  43. unsigned long cur_offset;
  44. struct ttm_buffer_object *bo;
  45. struct ttm_buffer_object *best_bo = NULL;
  46. while (likely(cur != NULL)) {
  47. bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
  48. cur_offset = bo->vm_node->start;
  49. if (page_start >= cur_offset) {
  50. cur = cur->rb_right;
  51. best_bo = bo;
  52. if (page_start == cur_offset)
  53. break;
  54. } else
  55. cur = cur->rb_left;
  56. }
  57. if (unlikely(best_bo == NULL))
  58. return NULL;
  59. if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
  60. (page_start + num_pages)))
  61. return NULL;
  62. return best_bo;
  63. }
  64. static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  65. {
  66. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  67. vma->vm_private_data;
  68. struct ttm_bo_device *bdev = bo->bdev;
  69. unsigned long page_offset;
  70. unsigned long page_last;
  71. unsigned long pfn;
  72. struct ttm_tt *ttm = NULL;
  73. struct page *page;
  74. int ret;
  75. int i;
  76. unsigned long address = (unsigned long)vmf->virtual_address;
  77. int retval = VM_FAULT_NOPAGE;
  78. /*
  79. * Work around locking order reversal in fault / nopfn
  80. * between mmap_sem and bo_reserve: Perform a trylock operation
  81. * for reserve, and if it fails, retry the fault after scheduling.
  82. */
  83. ret = ttm_bo_reserve(bo, true, true, false, 0);
  84. if (unlikely(ret != 0)) {
  85. if (ret == -EBUSY)
  86. set_need_resched();
  87. return VM_FAULT_NOPAGE;
  88. }
  89. if (bdev->driver->fault_reserve_notify) {
  90. ret = bdev->driver->fault_reserve_notify(bo);
  91. switch (ret) {
  92. case 0:
  93. break;
  94. case -EBUSY:
  95. set_need_resched();
  96. case -ERESTARTSYS:
  97. retval = VM_FAULT_NOPAGE;
  98. goto out_unlock;
  99. default:
  100. retval = VM_FAULT_SIGBUS;
  101. goto out_unlock;
  102. }
  103. }
  104. /*
  105. * Wait for buffer data in transit, due to a pipelined
  106. * move.
  107. */
  108. spin_lock(&bo->lock);
  109. if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
  110. ret = ttm_bo_wait(bo, false, true, false);
  111. spin_unlock(&bo->lock);
  112. if (unlikely(ret != 0)) {
  113. retval = (ret != -ERESTARTSYS) ?
  114. VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
  115. goto out_unlock;
  116. }
  117. } else
  118. spin_unlock(&bo->lock);
  119. ret = ttm_mem_io_reserve(bdev, &bo->mem);
  120. if (ret) {
  121. retval = VM_FAULT_SIGBUS;
  122. goto out_unlock;
  123. }
  124. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  125. bo->vm_node->start - vma->vm_pgoff;
  126. page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
  127. bo->vm_node->start - vma->vm_pgoff;
  128. if (unlikely(page_offset >= bo->num_pages)) {
  129. retval = VM_FAULT_SIGBUS;
  130. goto out_unlock;
  131. }
  132. /*
  133. * Strictly, we're not allowed to modify vma->vm_page_prot here,
  134. * since the mmap_sem is only held in read mode. However, we
  135. * modify only the caching bits of vma->vm_page_prot and
  136. * consider those bits protected by
  137. * the bo->mutex, as we should be the only writers.
  138. * There shouldn't really be any readers of these bits except
  139. * within vm_insert_mixed()? fork?
  140. *
  141. * TODO: Add a list of vmas to the bo, and change the
  142. * vma->vm_page_prot when the object changes caching policy, with
  143. * the correct locks held.
  144. */
  145. if (bo->mem.bus.is_iomem) {
  146. vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
  147. vma->vm_page_prot);
  148. } else {
  149. ttm = bo->ttm;
  150. vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
  151. vm_get_page_prot(vma->vm_flags) :
  152. ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
  153. }
  154. /*
  155. * Speculatively prefault a number of pages. Only error on
  156. * first page.
  157. */
  158. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  159. if (bo->mem.bus.is_iomem)
  160. pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
  161. else {
  162. page = ttm_tt_get_page(ttm, page_offset);
  163. if (unlikely(!page && i == 0)) {
  164. retval = VM_FAULT_OOM;
  165. goto out_unlock;
  166. } else if (unlikely(!page)) {
  167. break;
  168. }
  169. pfn = page_to_pfn(page);
  170. }
  171. ret = vm_insert_mixed(vma, address, pfn);
  172. /*
  173. * Somebody beat us to this PTE or prefaulting to
  174. * an already populated PTE, or prefaulting error.
  175. */
  176. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  177. break;
  178. else if (unlikely(ret != 0)) {
  179. retval =
  180. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  181. goto out_unlock;
  182. }
  183. address += PAGE_SIZE;
  184. if (unlikely(++page_offset >= page_last))
  185. break;
  186. }
  187. out_unlock:
  188. ttm_bo_unreserve(bo);
  189. return retval;
  190. }
  191. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  192. {
  193. struct ttm_buffer_object *bo =
  194. (struct ttm_buffer_object *)vma->vm_private_data;
  195. (void)ttm_bo_reference(bo);
  196. }
  197. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  198. {
  199. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
  200. ttm_bo_unref(&bo);
  201. vma->vm_private_data = NULL;
  202. }
  203. static const struct vm_operations_struct ttm_bo_vm_ops = {
  204. .fault = ttm_bo_vm_fault,
  205. .open = ttm_bo_vm_open,
  206. .close = ttm_bo_vm_close
  207. };
  208. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  209. struct ttm_bo_device *bdev)
  210. {
  211. struct ttm_bo_driver *driver;
  212. struct ttm_buffer_object *bo;
  213. int ret;
  214. read_lock(&bdev->vm_lock);
  215. bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
  216. (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
  217. if (likely(bo != NULL))
  218. ttm_bo_reference(bo);
  219. read_unlock(&bdev->vm_lock);
  220. if (unlikely(bo == NULL)) {
  221. printk(KERN_ERR TTM_PFX
  222. "Could not find buffer object to map.\n");
  223. return -EINVAL;
  224. }
  225. driver = bo->bdev->driver;
  226. if (unlikely(!driver->verify_access)) {
  227. ret = -EPERM;
  228. goto out_unref;
  229. }
  230. ret = driver->verify_access(bo, filp);
  231. if (unlikely(ret != 0))
  232. goto out_unref;
  233. vma->vm_ops = &ttm_bo_vm_ops;
  234. /*
  235. * Note: We're transferring the bo reference to
  236. * vma->vm_private_data here.
  237. */
  238. vma->vm_private_data = bo;
  239. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  240. return 0;
  241. out_unref:
  242. ttm_bo_unref(&bo);
  243. return ret;
  244. }
  245. EXPORT_SYMBOL(ttm_bo_mmap);
  246. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  247. {
  248. if (vma->vm_pgoff != 0)
  249. return -EACCES;
  250. vma->vm_ops = &ttm_bo_vm_ops;
  251. vma->vm_private_data = ttm_bo_reference(bo);
  252. vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
  253. return 0;
  254. }
  255. EXPORT_SYMBOL(ttm_fbdev_mmap);
  256. ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
  257. const char __user *wbuf, char __user *rbuf, size_t count,
  258. loff_t *f_pos, bool write)
  259. {
  260. struct ttm_buffer_object *bo;
  261. struct ttm_bo_driver *driver;
  262. struct ttm_bo_kmap_obj map;
  263. unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
  264. unsigned long kmap_offset;
  265. unsigned long kmap_end;
  266. unsigned long kmap_num;
  267. size_t io_size;
  268. unsigned int page_offset;
  269. char *virtual;
  270. int ret;
  271. bool no_wait = false;
  272. bool dummy;
  273. read_lock(&bdev->vm_lock);
  274. bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
  275. if (likely(bo != NULL))
  276. ttm_bo_reference(bo);
  277. read_unlock(&bdev->vm_lock);
  278. if (unlikely(bo == NULL))
  279. return -EFAULT;
  280. driver = bo->bdev->driver;
  281. if (unlikely(!driver->verify_access)) {
  282. ret = -EPERM;
  283. goto out_unref;
  284. }
  285. ret = driver->verify_access(bo, filp);
  286. if (unlikely(ret != 0))
  287. goto out_unref;
  288. kmap_offset = dev_offset - bo->vm_node->start;
  289. if (unlikely(kmap_offset >= bo->num_pages)) {
  290. ret = -EFBIG;
  291. goto out_unref;
  292. }
  293. page_offset = *f_pos & ~PAGE_MASK;
  294. io_size = bo->num_pages - kmap_offset;
  295. io_size = (io_size << PAGE_SHIFT) - page_offset;
  296. if (count < io_size)
  297. io_size = count;
  298. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  299. kmap_num = kmap_end - kmap_offset + 1;
  300. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  301. switch (ret) {
  302. case 0:
  303. break;
  304. case -EBUSY:
  305. ret = -EAGAIN;
  306. goto out_unref;
  307. default:
  308. goto out_unref;
  309. }
  310. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  311. if (unlikely(ret != 0)) {
  312. ttm_bo_unreserve(bo);
  313. goto out_unref;
  314. }
  315. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  316. virtual += page_offset;
  317. if (write)
  318. ret = copy_from_user(virtual, wbuf, io_size);
  319. else
  320. ret = copy_to_user(rbuf, virtual, io_size);
  321. ttm_bo_kunmap(&map);
  322. ttm_bo_unreserve(bo);
  323. ttm_bo_unref(&bo);
  324. if (unlikely(ret != 0))
  325. return -EFBIG;
  326. *f_pos += io_size;
  327. return io_size;
  328. out_unref:
  329. ttm_bo_unref(&bo);
  330. return ret;
  331. }
  332. ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
  333. char __user *rbuf, size_t count, loff_t *f_pos,
  334. bool write)
  335. {
  336. struct ttm_bo_kmap_obj map;
  337. unsigned long kmap_offset;
  338. unsigned long kmap_end;
  339. unsigned long kmap_num;
  340. size_t io_size;
  341. unsigned int page_offset;
  342. char *virtual;
  343. int ret;
  344. bool no_wait = false;
  345. bool dummy;
  346. kmap_offset = (*f_pos >> PAGE_SHIFT);
  347. if (unlikely(kmap_offset >= bo->num_pages))
  348. return -EFBIG;
  349. page_offset = *f_pos & ~PAGE_MASK;
  350. io_size = bo->num_pages - kmap_offset;
  351. io_size = (io_size << PAGE_SHIFT) - page_offset;
  352. if (count < io_size)
  353. io_size = count;
  354. kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
  355. kmap_num = kmap_end - kmap_offset + 1;
  356. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  357. switch (ret) {
  358. case 0:
  359. break;
  360. case -EBUSY:
  361. return -EAGAIN;
  362. default:
  363. return ret;
  364. }
  365. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  366. if (unlikely(ret != 0)) {
  367. ttm_bo_unreserve(bo);
  368. return ret;
  369. }
  370. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  371. virtual += page_offset;
  372. if (write)
  373. ret = copy_from_user(virtual, wbuf, io_size);
  374. else
  375. ret = copy_to_user(rbuf, virtual, io_size);
  376. ttm_bo_kunmap(&map);
  377. ttm_bo_unreserve(bo);
  378. ttm_bo_unref(&bo);
  379. if (unlikely(ret != 0))
  380. return ret;
  381. *f_pos += io_size;
  382. return io_size;
  383. }