gem.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * NVIDIA Tegra DRM GEM helper functions
  3. *
  4. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  5. * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
  6. *
  7. * Based on the GEM/CMA helpers
  8. *
  9. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <drm/tegra_drm.h>
  21. #include "gem.h"
  22. static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
  23. {
  24. return container_of(bo, struct tegra_bo, base);
  25. }
  26. static void tegra_bo_put(struct host1x_bo *bo)
  27. {
  28. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  29. struct drm_device *drm = obj->gem.dev;
  30. mutex_lock(&drm->struct_mutex);
  31. drm_gem_object_unreference(&obj->gem);
  32. mutex_unlock(&drm->struct_mutex);
  33. }
  34. static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
  35. {
  36. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  37. return obj->paddr;
  38. }
  39. static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
  40. {
  41. }
  42. static void *tegra_bo_mmap(struct host1x_bo *bo)
  43. {
  44. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  45. return obj->vaddr;
  46. }
  47. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  48. {
  49. }
  50. static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
  51. {
  52. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  53. return obj->vaddr + page * PAGE_SIZE;
  54. }
  55. static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
  56. void *addr)
  57. {
  58. }
  59. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  60. {
  61. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  62. struct drm_device *drm = obj->gem.dev;
  63. mutex_lock(&drm->struct_mutex);
  64. drm_gem_object_reference(&obj->gem);
  65. mutex_unlock(&drm->struct_mutex);
  66. return bo;
  67. }
  68. const struct host1x_bo_ops tegra_bo_ops = {
  69. .get = tegra_bo_get,
  70. .put = tegra_bo_put,
  71. .pin = tegra_bo_pin,
  72. .unpin = tegra_bo_unpin,
  73. .mmap = tegra_bo_mmap,
  74. .munmap = tegra_bo_munmap,
  75. .kmap = tegra_bo_kmap,
  76. .kunmap = tegra_bo_kunmap,
  77. };
  78. static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
  79. {
  80. dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
  81. }
  82. struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
  83. unsigned long flags)
  84. {
  85. struct tegra_bo *bo;
  86. int err;
  87. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  88. if (!bo)
  89. return ERR_PTR(-ENOMEM);
  90. host1x_bo_init(&bo->base, &tegra_bo_ops);
  91. size = round_up(size, PAGE_SIZE);
  92. bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
  93. GFP_KERNEL | __GFP_NOWARN);
  94. if (!bo->vaddr) {
  95. dev_err(drm->dev, "failed to allocate buffer with size %u\n",
  96. size);
  97. err = -ENOMEM;
  98. goto err_dma;
  99. }
  100. err = drm_gem_object_init(drm, &bo->gem, size);
  101. if (err)
  102. goto err_init;
  103. err = drm_gem_create_mmap_offset(&bo->gem);
  104. if (err)
  105. goto err_mmap;
  106. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  107. bo->flags |= TEGRA_BO_TILED;
  108. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  109. bo->flags |= TEGRA_BO_BOTTOM_UP;
  110. return bo;
  111. err_mmap:
  112. drm_gem_object_release(&bo->gem);
  113. err_init:
  114. tegra_bo_destroy(drm, bo);
  115. err_dma:
  116. kfree(bo);
  117. return ERR_PTR(err);
  118. }
  119. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  120. struct drm_device *drm,
  121. unsigned int size,
  122. unsigned long flags,
  123. unsigned int *handle)
  124. {
  125. struct tegra_bo *bo;
  126. int ret;
  127. bo = tegra_bo_create(drm, size, flags);
  128. if (IS_ERR(bo))
  129. return bo;
  130. ret = drm_gem_handle_create(file, &bo->gem, handle);
  131. if (ret)
  132. goto err;
  133. drm_gem_object_unreference_unlocked(&bo->gem);
  134. return bo;
  135. err:
  136. tegra_bo_free_object(&bo->gem);
  137. return ERR_PTR(ret);
  138. }
  139. void tegra_bo_free_object(struct drm_gem_object *gem)
  140. {
  141. struct tegra_bo *bo = to_tegra_bo(gem);
  142. drm_gem_free_mmap_offset(gem);
  143. drm_gem_object_release(gem);
  144. tegra_bo_destroy(gem->dev, bo);
  145. kfree(bo);
  146. }
  147. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  148. struct drm_mode_create_dumb *args)
  149. {
  150. int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  151. struct tegra_bo *bo;
  152. if (args->pitch < min_pitch)
  153. args->pitch = min_pitch;
  154. if (args->size < args->pitch * args->height)
  155. args->size = args->pitch * args->height;
  156. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  157. &args->handle);
  158. if (IS_ERR(bo))
  159. return PTR_ERR(bo);
  160. return 0;
  161. }
  162. int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
  163. uint32_t handle, uint64_t *offset)
  164. {
  165. struct drm_gem_object *gem;
  166. struct tegra_bo *bo;
  167. mutex_lock(&drm->struct_mutex);
  168. gem = drm_gem_object_lookup(drm, file, handle);
  169. if (!gem) {
  170. dev_err(drm->dev, "failed to lookup GEM object\n");
  171. mutex_unlock(&drm->struct_mutex);
  172. return -EINVAL;
  173. }
  174. bo = to_tegra_bo(gem);
  175. *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  176. drm_gem_object_unreference(gem);
  177. mutex_unlock(&drm->struct_mutex);
  178. return 0;
  179. }
  180. const struct vm_operations_struct tegra_bo_vm_ops = {
  181. .open = drm_gem_vm_open,
  182. .close = drm_gem_vm_close,
  183. };
  184. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  185. {
  186. struct drm_gem_object *gem;
  187. struct tegra_bo *bo;
  188. int ret;
  189. ret = drm_gem_mmap(file, vma);
  190. if (ret)
  191. return ret;
  192. gem = vma->vm_private_data;
  193. bo = to_tegra_bo(gem);
  194. ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
  195. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  196. if (ret)
  197. drm_gem_vm_close(vma);
  198. return ret;
  199. }