vmwgfx_gmr.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "drmP.h"
  29. #include "ttm/ttm_bo_driver.h"
  30. /**
  31. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  32. * the number of used descriptors.
  33. */
  34. static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
  35. struct page *pages[],
  36. unsigned long num_pages)
  37. {
  38. struct page *page, *next;
  39. struct svga_guest_mem_descriptor *page_virtual = NULL;
  40. struct svga_guest_mem_descriptor *desc_virtual = NULL;
  41. unsigned int desc_per_page;
  42. unsigned long prev_pfn;
  43. unsigned long pfn;
  44. int ret;
  45. desc_per_page = PAGE_SIZE /
  46. sizeof(struct svga_guest_mem_descriptor) - 1;
  47. while (likely(num_pages != 0)) {
  48. page = alloc_page(__GFP_HIGHMEM);
  49. if (unlikely(page == NULL)) {
  50. ret = -ENOMEM;
  51. goto out_err;
  52. }
  53. list_add_tail(&page->lru, desc_pages);
  54. /*
  55. * Point previous page terminating descriptor to this
  56. * page before unmapping it.
  57. */
  58. if (likely(page_virtual != NULL)) {
  59. desc_virtual->ppn = page_to_pfn(page);
  60. kunmap_atomic(page_virtual, KM_USER0);
  61. }
  62. page_virtual = kmap_atomic(page, KM_USER0);
  63. desc_virtual = page_virtual - 1;
  64. prev_pfn = ~(0UL);
  65. while (likely(num_pages != 0)) {
  66. pfn = page_to_pfn(*pages);
  67. if (pfn != prev_pfn + 1) {
  68. if (desc_virtual - page_virtual ==
  69. desc_per_page - 1)
  70. break;
  71. (++desc_virtual)->ppn = cpu_to_le32(pfn);
  72. desc_virtual->num_pages = cpu_to_le32(1);
  73. } else {
  74. uint32_t tmp =
  75. le32_to_cpu(desc_virtual->num_pages);
  76. desc_virtual->num_pages = cpu_to_le32(tmp + 1);
  77. }
  78. prev_pfn = pfn;
  79. --num_pages;
  80. ++pages;
  81. }
  82. (++desc_virtual)->ppn = cpu_to_le32(0);
  83. desc_virtual->num_pages = cpu_to_le32(0);
  84. }
  85. if (likely(page_virtual != NULL))
  86. kunmap_atomic(page_virtual, KM_USER0);
  87. return 0;
  88. out_err:
  89. list_for_each_entry_safe(page, next, desc_pages, lru) {
  90. list_del_init(&page->lru);
  91. __free_page(page);
  92. }
  93. return ret;
  94. }
  95. static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
  96. {
  97. struct page *page, *next;
  98. list_for_each_entry_safe(page, next, desc_pages, lru) {
  99. list_del_init(&page->lru);
  100. __free_page(page);
  101. }
  102. }
  103. static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
  104. int gmr_id, struct list_head *desc_pages)
  105. {
  106. struct page *page;
  107. if (unlikely(list_empty(desc_pages)))
  108. return;
  109. page = list_entry(desc_pages->next, struct page, lru);
  110. mutex_lock(&dev_priv->hw_mutex);
  111. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  112. wmb();
  113. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
  114. mb();
  115. mutex_unlock(&dev_priv->hw_mutex);
  116. }
  117. /**
  118. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  119. * the number of used descriptors.
  120. */
  121. static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
  122. unsigned long num_pages)
  123. {
  124. unsigned long prev_pfn = ~(0UL);
  125. unsigned long pfn;
  126. unsigned long descriptors = 0;
  127. while (num_pages--) {
  128. pfn = page_to_pfn(*pages++);
  129. if (prev_pfn + 1 != pfn)
  130. ++descriptors;
  131. prev_pfn = pfn;
  132. }
  133. return descriptors;
  134. }
  135. int vmw_gmr_bind(struct vmw_private *dev_priv,
  136. struct ttm_buffer_object *bo)
  137. {
  138. struct ttm_tt *ttm = bo->ttm;
  139. unsigned long descriptors;
  140. int ret;
  141. uint32_t id;
  142. struct list_head desc_pages;
  143. if (!(dev_priv->capabilities & SVGA_CAP_GMR))
  144. return -EINVAL;
  145. ret = ttm_tt_populate(ttm);
  146. if (unlikely(ret != 0))
  147. return ret;
  148. descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
  149. if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
  150. return -EINVAL;
  151. INIT_LIST_HEAD(&desc_pages);
  152. ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
  153. ttm->num_pages);
  154. if (unlikely(ret != 0))
  155. return ret;
  156. ret = vmw_gmr_id_alloc(dev_priv, &id);
  157. if (unlikely(ret != 0))
  158. goto out_no_id;
  159. vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
  160. vmw_gmr_free_descriptors(&desc_pages);
  161. vmw_dmabuf_set_gmr(bo, id);
  162. return 0;
  163. out_no_id:
  164. vmw_gmr_free_descriptors(&desc_pages);
  165. return ret;
  166. }
  167. void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
  168. {
  169. mutex_lock(&dev_priv->hw_mutex);
  170. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  171. wmb();
  172. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
  173. mb();
  174. mutex_unlock(&dev_priv->hw_mutex);
  175. }