vmwgfx_gmr.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "drmP.h"
  29. #include "ttm/ttm_bo_driver.h"
  30. #define VMW_PPN_SIZE sizeof(unsigned long)
  31. static int vmw_gmr2_bind(struct vmw_private *dev_priv,
  32. struct page *pages[],
  33. unsigned long num_pages,
  34. int gmr_id)
  35. {
  36. SVGAFifoCmdDefineGMR2 define_cmd;
  37. SVGAFifoCmdRemapGMR2 remap_cmd;
  38. uint32_t define_size = sizeof(define_cmd) + 4;
  39. uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
  40. uint32_t *cmd;
  41. uint32_t *cmd_orig;
  42. uint32_t i;
  43. cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
  44. if (unlikely(cmd == NULL))
  45. return -ENOMEM;
  46. define_cmd.gmrId = gmr_id;
  47. define_cmd.numPages = num_pages;
  48. remap_cmd.gmrId = gmr_id;
  49. remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
  50. SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
  51. remap_cmd.offsetPages = 0;
  52. remap_cmd.numPages = num_pages;
  53. *cmd++ = SVGA_CMD_DEFINE_GMR2;
  54. memcpy(cmd, &define_cmd, sizeof(define_cmd));
  55. cmd += sizeof(define_cmd) / sizeof(uint32);
  56. *cmd++ = SVGA_CMD_REMAP_GMR2;
  57. memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
  58. cmd += sizeof(remap_cmd) / sizeof(uint32);
  59. for (i = 0; i < num_pages; ++i) {
  60. if (VMW_PPN_SIZE > 4)
  61. *cmd = page_to_pfn(*pages++);
  62. else
  63. *((uint64_t *)cmd) = page_to_pfn(*pages++);
  64. cmd += VMW_PPN_SIZE / sizeof(*cmd);
  65. }
  66. vmw_fifo_commit(dev_priv, define_size + remap_size);
  67. return 0;
  68. }
  69. static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
  70. int gmr_id)
  71. {
  72. SVGAFifoCmdDefineGMR2 define_cmd;
  73. uint32_t define_size = sizeof(define_cmd) + 4;
  74. uint32_t *cmd;
  75. cmd = vmw_fifo_reserve(dev_priv, define_size);
  76. if (unlikely(cmd == NULL)) {
  77. DRM_ERROR("GMR2 unbind failed.\n");
  78. return;
  79. }
  80. define_cmd.gmrId = gmr_id;
  81. define_cmd.numPages = 0;
  82. *cmd++ = SVGA_CMD_DEFINE_GMR2;
  83. memcpy(cmd, &define_cmd, sizeof(define_cmd));
  84. vmw_fifo_commit(dev_priv, define_size);
  85. }
  86. /**
  87. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  88. * the number of used descriptors.
  89. */
  90. static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
  91. struct page *pages[],
  92. unsigned long num_pages)
  93. {
  94. struct page *page, *next;
  95. struct svga_guest_mem_descriptor *page_virtual = NULL;
  96. struct svga_guest_mem_descriptor *desc_virtual = NULL;
  97. unsigned int desc_per_page;
  98. unsigned long prev_pfn;
  99. unsigned long pfn;
  100. int ret;
  101. desc_per_page = PAGE_SIZE /
  102. sizeof(struct svga_guest_mem_descriptor) - 1;
  103. while (likely(num_pages != 0)) {
  104. page = alloc_page(__GFP_HIGHMEM);
  105. if (unlikely(page == NULL)) {
  106. ret = -ENOMEM;
  107. goto out_err;
  108. }
  109. list_add_tail(&page->lru, desc_pages);
  110. /*
  111. * Point previous page terminating descriptor to this
  112. * page before unmapping it.
  113. */
  114. if (likely(page_virtual != NULL)) {
  115. desc_virtual->ppn = page_to_pfn(page);
  116. kunmap_atomic(page_virtual, KM_USER0);
  117. }
  118. page_virtual = kmap_atomic(page, KM_USER0);
  119. desc_virtual = page_virtual - 1;
  120. prev_pfn = ~(0UL);
  121. while (likely(num_pages != 0)) {
  122. pfn = page_to_pfn(*pages);
  123. if (pfn != prev_pfn + 1) {
  124. if (desc_virtual - page_virtual ==
  125. desc_per_page - 1)
  126. break;
  127. (++desc_virtual)->ppn = cpu_to_le32(pfn);
  128. desc_virtual->num_pages = cpu_to_le32(1);
  129. } else {
  130. uint32_t tmp =
  131. le32_to_cpu(desc_virtual->num_pages);
  132. desc_virtual->num_pages = cpu_to_le32(tmp + 1);
  133. }
  134. prev_pfn = pfn;
  135. --num_pages;
  136. ++pages;
  137. }
  138. (++desc_virtual)->ppn = cpu_to_le32(0);
  139. desc_virtual->num_pages = cpu_to_le32(0);
  140. }
  141. if (likely(page_virtual != NULL))
  142. kunmap_atomic(page_virtual, KM_USER0);
  143. return 0;
  144. out_err:
  145. list_for_each_entry_safe(page, next, desc_pages, lru) {
  146. list_del_init(&page->lru);
  147. __free_page(page);
  148. }
  149. return ret;
  150. }
  151. static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
  152. {
  153. struct page *page, *next;
  154. list_for_each_entry_safe(page, next, desc_pages, lru) {
  155. list_del_init(&page->lru);
  156. __free_page(page);
  157. }
  158. }
  159. static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
  160. int gmr_id, struct list_head *desc_pages)
  161. {
  162. struct page *page;
  163. if (unlikely(list_empty(desc_pages)))
  164. return;
  165. page = list_entry(desc_pages->next, struct page, lru);
  166. mutex_lock(&dev_priv->hw_mutex);
  167. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  168. wmb();
  169. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
  170. mb();
  171. mutex_unlock(&dev_priv->hw_mutex);
  172. }
  173. /**
  174. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  175. * the number of used descriptors.
  176. */
  177. static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
  178. unsigned long num_pages)
  179. {
  180. unsigned long prev_pfn = ~(0UL);
  181. unsigned long pfn;
  182. unsigned long descriptors = 0;
  183. while (num_pages--) {
  184. pfn = page_to_pfn(*pages++);
  185. if (prev_pfn + 1 != pfn)
  186. ++descriptors;
  187. prev_pfn = pfn;
  188. }
  189. return descriptors;
  190. }
  191. int vmw_gmr_bind(struct vmw_private *dev_priv,
  192. struct page *pages[],
  193. unsigned long num_pages,
  194. int gmr_id)
  195. {
  196. struct list_head desc_pages;
  197. int ret;
  198. if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
  199. return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
  200. if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
  201. return -EINVAL;
  202. if (vmw_gmr_count_descriptors(pages, num_pages) >
  203. dev_priv->max_gmr_descriptors)
  204. return -EINVAL;
  205. INIT_LIST_HEAD(&desc_pages);
  206. ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
  207. if (unlikely(ret != 0))
  208. return ret;
  209. vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
  210. vmw_gmr_free_descriptors(&desc_pages);
  211. return 0;
  212. }
  213. void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
  214. {
  215. if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
  216. vmw_gmr2_unbind(dev_priv, gmr_id);
  217. return;
  218. }
  219. mutex_lock(&dev_priv->hw_mutex);
  220. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  221. wmb();
  222. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
  223. mb();
  224. mutex_unlock(&dev_priv->hw_mutex);
  225. }