vmwgfx_gmr.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include <drm/drmP.h>
  29. #include <drm/ttm/ttm_bo_driver.h>
  30. #define VMW_PPN_SIZE (sizeof(unsigned long))
  31. /* A future safe maximum remap size. */
  32. #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
  33. #define DMA_ADDR_INVALID ((dma_addr_t) 0)
  34. #define DMA_PAGE_INVALID 0UL
  35. static int vmw_gmr2_bind(struct vmw_private *dev_priv,
  36. struct vmw_piter *iter,
  37. unsigned long num_pages,
  38. int gmr_id)
  39. {
  40. SVGAFifoCmdDefineGMR2 define_cmd;
  41. SVGAFifoCmdRemapGMR2 remap_cmd;
  42. uint32_t *cmd;
  43. uint32_t *cmd_orig;
  44. uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
  45. uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
  46. uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
  47. uint32_t remap_pos = 0;
  48. uint32_t cmd_size = define_size + remap_size;
  49. uint32_t i;
  50. cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  51. if (unlikely(cmd == NULL))
  52. return -ENOMEM;
  53. define_cmd.gmrId = gmr_id;
  54. define_cmd.numPages = num_pages;
  55. *cmd++ = SVGA_CMD_DEFINE_GMR2;
  56. memcpy(cmd, &define_cmd, sizeof(define_cmd));
  57. cmd += sizeof(define_cmd) / sizeof(*cmd);
  58. /*
  59. * Need to split the command if there are too many
  60. * pages that goes into the gmr.
  61. */
  62. remap_cmd.gmrId = gmr_id;
  63. remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
  64. SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
  65. while (num_pages > 0) {
  66. unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
  67. remap_cmd.offsetPages = remap_pos;
  68. remap_cmd.numPages = nr;
  69. *cmd++ = SVGA_CMD_REMAP_GMR2;
  70. memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
  71. cmd += sizeof(remap_cmd) / sizeof(*cmd);
  72. for (i = 0; i < nr; ++i) {
  73. if (VMW_PPN_SIZE <= 4)
  74. *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
  75. else
  76. *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
  77. PAGE_SHIFT;
  78. cmd += VMW_PPN_SIZE / sizeof(*cmd);
  79. vmw_piter_next(iter);
  80. }
  81. num_pages -= nr;
  82. remap_pos += nr;
  83. }
  84. BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
  85. vmw_fifo_commit(dev_priv, cmd_size);
  86. return 0;
  87. }
  88. static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
  89. int gmr_id)
  90. {
  91. SVGAFifoCmdDefineGMR2 define_cmd;
  92. uint32_t define_size = sizeof(define_cmd) + 4;
  93. uint32_t *cmd;
  94. cmd = vmw_fifo_reserve(dev_priv, define_size);
  95. if (unlikely(cmd == NULL)) {
  96. DRM_ERROR("GMR2 unbind failed.\n");
  97. return;
  98. }
  99. define_cmd.gmrId = gmr_id;
  100. define_cmd.numPages = 0;
  101. *cmd++ = SVGA_CMD_DEFINE_GMR2;
  102. memcpy(cmd, &define_cmd, sizeof(define_cmd));
  103. vmw_fifo_commit(dev_priv, define_size);
  104. }
  105. static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
  106. struct list_head *desc_pages)
  107. {
  108. struct page *page, *next;
  109. struct svga_guest_mem_descriptor *page_virtual;
  110. unsigned int desc_per_page = PAGE_SIZE /
  111. sizeof(struct svga_guest_mem_descriptor) - 1;
  112. if (list_empty(desc_pages))
  113. return;
  114. list_for_each_entry_safe(page, next, desc_pages, lru) {
  115. list_del_init(&page->lru);
  116. if (likely(desc_dma != DMA_ADDR_INVALID)) {
  117. dma_unmap_page(dev, desc_dma, PAGE_SIZE,
  118. DMA_TO_DEVICE);
  119. }
  120. page_virtual = kmap_atomic(page);
  121. desc_dma = (dma_addr_t)
  122. le32_to_cpu(page_virtual[desc_per_page].ppn) <<
  123. PAGE_SHIFT;
  124. kunmap_atomic(page_virtual);
  125. __free_page(page);
  126. }
  127. }
  128. /**
  129. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  130. * the number of used descriptors.
  131. *
  132. */
  133. static int vmw_gmr_build_descriptors(struct device *dev,
  134. struct list_head *desc_pages,
  135. struct vmw_piter *iter,
  136. unsigned long num_pages,
  137. dma_addr_t *first_dma)
  138. {
  139. struct page *page;
  140. struct svga_guest_mem_descriptor *page_virtual = NULL;
  141. struct svga_guest_mem_descriptor *desc_virtual = NULL;
  142. unsigned int desc_per_page;
  143. unsigned long prev_pfn;
  144. unsigned long pfn;
  145. int ret;
  146. dma_addr_t desc_dma;
  147. desc_per_page = PAGE_SIZE /
  148. sizeof(struct svga_guest_mem_descriptor) - 1;
  149. while (likely(num_pages != 0)) {
  150. page = alloc_page(__GFP_HIGHMEM);
  151. if (unlikely(page == NULL)) {
  152. ret = -ENOMEM;
  153. goto out_err;
  154. }
  155. list_add_tail(&page->lru, desc_pages);
  156. page_virtual = kmap_atomic(page);
  157. desc_virtual = page_virtual - 1;
  158. prev_pfn = ~(0UL);
  159. while (likely(num_pages != 0)) {
  160. pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
  161. if (pfn != prev_pfn + 1) {
  162. if (desc_virtual - page_virtual ==
  163. desc_per_page - 1)
  164. break;
  165. (++desc_virtual)->ppn = cpu_to_le32(pfn);
  166. desc_virtual->num_pages = cpu_to_le32(1);
  167. } else {
  168. uint32_t tmp =
  169. le32_to_cpu(desc_virtual->num_pages);
  170. desc_virtual->num_pages = cpu_to_le32(tmp + 1);
  171. }
  172. prev_pfn = pfn;
  173. --num_pages;
  174. vmw_piter_next(iter);
  175. }
  176. (++desc_virtual)->ppn = DMA_PAGE_INVALID;
  177. desc_virtual->num_pages = cpu_to_le32(0);
  178. kunmap_atomic(page_virtual);
  179. }
  180. desc_dma = 0;
  181. list_for_each_entry_reverse(page, desc_pages, lru) {
  182. page_virtual = kmap_atomic(page);
  183. page_virtual[desc_per_page].ppn = cpu_to_le32
  184. (desc_dma >> PAGE_SHIFT);
  185. kunmap_atomic(page_virtual);
  186. desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
  187. DMA_TO_DEVICE);
  188. if (unlikely(dma_mapping_error(dev, desc_dma)))
  189. goto out_err;
  190. }
  191. *first_dma = desc_dma;
  192. return 0;
  193. out_err:
  194. vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
  195. return ret;
  196. }
  197. static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
  198. int gmr_id, dma_addr_t desc_dma)
  199. {
  200. mutex_lock(&dev_priv->hw_mutex);
  201. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  202. wmb();
  203. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
  204. mb();
  205. mutex_unlock(&dev_priv->hw_mutex);
  206. }
  207. int vmw_gmr_bind(struct vmw_private *dev_priv,
  208. const struct vmw_sg_table *vsgt,
  209. unsigned long num_pages,
  210. int gmr_id)
  211. {
  212. struct list_head desc_pages;
  213. dma_addr_t desc_dma = 0;
  214. struct device *dev = dev_priv->dev->dev;
  215. struct vmw_piter data_iter;
  216. int ret;
  217. vmw_piter_start(&data_iter, vsgt, 0);
  218. if (unlikely(!vmw_piter_next(&data_iter)))
  219. return 0;
  220. if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
  221. return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
  222. if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
  223. return -EINVAL;
  224. if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
  225. return -EINVAL;
  226. INIT_LIST_HEAD(&desc_pages);
  227. ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
  228. num_pages, &desc_dma);
  229. if (unlikely(ret != 0))
  230. return ret;
  231. vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
  232. vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
  233. return 0;
  234. }
  235. void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
  236. {
  237. if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
  238. vmw_gmr2_unbind(dev_priv, gmr_id);
  239. return;
  240. }
  241. mutex_lock(&dev_priv->hw_mutex);
  242. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  243. wmb();
  244. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
  245. mb();
  246. mutex_unlock(&dev_priv->hw_mutex);
  247. }