gem.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/mm.h>
  21. #include <linux/slab.h>
  22. #include <linux/mutex.h>
  23. #include <linux/export.h>
  24. #include <linux/dma-mapping.h>
  25. #include <drm/drmP.h>
  26. #include <drm/drm.h>
  27. #include "gem.h"
  28. static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
  29. {
  30. return container_of(bo, struct tegra_bo, base);
  31. }
  32. static void tegra_bo_put(struct host1x_bo *bo)
  33. {
  34. struct tegra_bo *obj = host1x_to_drm_bo(bo);
  35. struct drm_device *drm = obj->gem.dev;
  36. mutex_lock(&drm->struct_mutex);
  37. drm_gem_object_unreference(&obj->gem);
  38. mutex_unlock(&drm->struct_mutex);
  39. }
  40. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  41. {
  42. struct tegra_bo *obj = host1x_to_drm_bo(bo);
  43. return obj->paddr;
  44. }
  45. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  46. {
  47. }
  48. static void *tegra_bo_mmap(struct host1x_bo *bo)
  49. {
  50. struct tegra_bo *obj = host1x_to_drm_bo(bo);
  51. return obj->vaddr;
  52. }
  53. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  54. {
  55. }
  56. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  57. {
  58. struct tegra_bo *obj = host1x_to_drm_bo(bo);
  59. return obj->vaddr + page * PAGE_SIZE;
  60. }
  61. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  62. void *addr)
  63. {
  64. }
  65. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  66. {
  67. struct tegra_bo *obj = host1x_to_drm_bo(bo);
  68. struct drm_device *drm = obj->gem.dev;
  69. mutex_lock(&drm->struct_mutex);
  70. drm_gem_object_reference(&obj->gem);
  71. mutex_unlock(&drm->struct_mutex);
  72. return bo;
  73. }
  74. const struct host1x_bo_ops tegra_bo_ops = {
  75. .get = tegra_bo_get,
  76. .put = tegra_bo_put,
  77. .pin = tegra_bo_pin,
  78. .unpin = tegra_bo_unpin,
  79. .mmap = tegra_bo_mmap,
  80. .munmap = tegra_bo_munmap,
  81. .kmap = tegra_bo_kmap,
  82. .kunmap = tegra_bo_kunmap,
  83. };
  84. static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
  85. {
  86. dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
  87. }
  88. unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
  89. {
  90. return (unsigned int)drm_vma_node_offset_addr(&bo->gem.vma_node);
  91. }
  92. struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
  93. {
  94. struct tegra_bo *bo;
  95. int err;
  96. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  97. if (!bo)
  98. return ERR_PTR(-ENOMEM);
  99. host1x_bo_init(&bo->base, &tegra_bo_ops);
  100. size = round_up(size, PAGE_SIZE);
  101. bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
  102. GFP_KERNEL | __GFP_NOWARN);
  103. if (!bo->vaddr) {
  104. dev_err(drm->dev, "failed to allocate buffer with size %u\n",
  105. size);
  106. err = -ENOMEM;
  107. goto err_dma;
  108. }
  109. err = drm_gem_object_init(drm, &bo->gem, size);
  110. if (err)
  111. goto err_init;
  112. err = drm_gem_create_mmap_offset(&bo->gem);
  113. if (err)
  114. goto err_mmap;
  115. return bo;
  116. err_mmap:
  117. drm_gem_object_release(&bo->gem);
  118. err_init:
  119. tegra_bo_destroy(drm, bo);
  120. err_dma:
  121. kfree(bo);
  122. return ERR_PTR(err);
  123. }
  124. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  125. struct drm_device *drm,
  126. unsigned int size,
  127. unsigned int *handle)
  128. {
  129. struct tegra_bo *bo;
  130. int ret;
  131. bo = tegra_bo_create(drm, size);
  132. if (IS_ERR(bo))
  133. return bo;
  134. ret = drm_gem_handle_create(file, &bo->gem, handle);
  135. if (ret)
  136. goto err;
  137. drm_gem_object_unreference_unlocked(&bo->gem);
  138. return bo;
  139. err:
  140. tegra_bo_free_object(&bo->gem);
  141. return ERR_PTR(ret);
  142. }
  143. void tegra_bo_free_object(struct drm_gem_object *gem)
  144. {
  145. struct tegra_bo *bo = to_tegra_bo(gem);
  146. drm_gem_free_mmap_offset(gem);
  147. drm_gem_object_release(gem);
  148. tegra_bo_destroy(gem->dev, bo);
  149. kfree(bo);
  150. }
  151. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  152. struct drm_mode_create_dumb *args)
  153. {
  154. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  155. struct tegra_bo *bo;
  156. if (args->pitch < min_pitch)
  157. args->pitch = min_pitch;
  158. if (args->size < args->pitch * args->height)
  159. args->size = args->pitch * args->height;
  160. bo = tegra_bo_create_with_handle(file, drm, args->size,
  161. &args->handle);
  162. if (IS_ERR(bo))
  163. return PTR_ERR(bo);
  164. return 0;
  165. }
  166. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  167. uint32_t handle, uint64_t *offset)
  168. {
  169. struct drm_gem_object *gem;
  170. struct tegra_bo *bo;
  171. mutex_lock(&drm->struct_mutex);
  172. gem = drm_gem_object_lookup(drm, file, handle);
  173. if (!gem) {
  174. dev_err(drm->dev, "failed to lookup GEM object\n");
  175. mutex_unlock(&drm->struct_mutex);
  176. return -EINVAL;
  177. }
  178. bo = to_tegra_bo(gem);
  179. *offset = tegra_bo_get_mmap_offset(bo);
  180. drm_gem_object_unreference(gem);
  181. mutex_unlock(&drm->struct_mutex);
  182. return 0;
  183. }
  184. const struct vm_operations_struct tegra_bo_vm_ops = {
  185. .open = drm_gem_vm_open,
  186. .close = drm_gem_vm_close,
  187. };
  188. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  189. {
  190. struct drm_gem_object *gem;
  191. struct tegra_bo *bo;
  192. int ret;
  193. ret = drm_gem_mmap(file, vma);
  194. if (ret)
  195. return ret;
  196. gem = vma->vm_private_data;
  197. bo = to_tegra_bo(gem);
  198. ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
  199. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  200. if (ret)
  201. drm_gem_vm_close(vma);
  202. return ret;
  203. }