exynos_drm_buf.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /* exynos_drm_buf.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <drm/exynos_drm.h>
  13. #include "exynos_drm_drv.h"
  14. #include "exynos_drm_gem.h"
  15. #include "exynos_drm_buf.h"
  16. #include "exynos_drm_iommu.h"
  17. static int lowlevel_buffer_allocate(struct drm_device *dev,
  18. unsigned int flags, struct exynos_drm_gem_buf *buf)
  19. {
  20. int ret = 0;
  21. enum dma_attr attr;
  22. unsigned int nr_pages;
  23. if (buf->dma_addr) {
  24. DRM_DEBUG_KMS("already allocated.\n");
  25. return 0;
  26. }
  27. init_dma_attrs(&buf->dma_attrs);
  28. /*
  29. * if EXYNOS_BO_CONTIG, fully physically contiguous memory
  30. * region will be allocated else physically contiguous
  31. * as possible.
  32. */
  33. if (!(flags & EXYNOS_BO_NONCONTIG))
  34. dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
  35. /*
  36. * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
  37. * else cachable mapping.
  38. */
  39. if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
  40. attr = DMA_ATTR_WRITE_COMBINE;
  41. else
  42. attr = DMA_ATTR_NON_CONSISTENT;
  43. dma_set_attr(attr, &buf->dma_attrs);
  44. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
  45. nr_pages = buf->size >> PAGE_SHIFT;
  46. if (!is_drm_iommu_supported(dev)) {
  47. dma_addr_t start_addr;
  48. unsigned int i = 0;
  49. buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
  50. if (!buf->pages) {
  51. DRM_ERROR("failed to allocate pages.\n");
  52. return -ENOMEM;
  53. }
  54. buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
  55. &buf->dma_addr, GFP_KERNEL,
  56. &buf->dma_attrs);
  57. if (!buf->kvaddr) {
  58. DRM_ERROR("failed to allocate buffer.\n");
  59. ret = -ENOMEM;
  60. goto err_free;
  61. }
  62. start_addr = buf->dma_addr;
  63. while (i < nr_pages) {
  64. buf->pages[i] = phys_to_page(start_addr);
  65. start_addr += PAGE_SIZE;
  66. i++;
  67. }
  68. } else {
  69. buf->pages = dma_alloc_attrs(dev->dev, buf->size,
  70. &buf->dma_addr, GFP_KERNEL,
  71. &buf->dma_attrs);
  72. if (!buf->pages) {
  73. DRM_ERROR("failed to allocate buffer.\n");
  74. return -ENOMEM;
  75. }
  76. }
  77. buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
  78. if (!buf->sgt) {
  79. DRM_ERROR("failed to get sg table.\n");
  80. ret = -ENOMEM;
  81. goto err_free_attrs;
  82. }
  83. DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
  84. (unsigned long)buf->dma_addr,
  85. buf->size);
  86. return ret;
  87. err_free_attrs:
  88. dma_free_attrs(dev->dev, buf->size, buf->pages,
  89. (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
  90. buf->dma_addr = (dma_addr_t)NULL;
  91. err_free:
  92. if (!is_drm_iommu_supported(dev))
  93. drm_free_large(buf->pages);
  94. return ret;
  95. }
  96. static void lowlevel_buffer_deallocate(struct drm_device *dev,
  97. unsigned int flags, struct exynos_drm_gem_buf *buf)
  98. {
  99. if (!buf->dma_addr) {
  100. DRM_DEBUG_KMS("dma_addr is invalid.\n");
  101. return;
  102. }
  103. DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
  104. (unsigned long)buf->dma_addr,
  105. buf->size);
  106. sg_free_table(buf->sgt);
  107. kfree(buf->sgt);
  108. buf->sgt = NULL;
  109. if (!is_drm_iommu_supported(dev)) {
  110. dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
  111. (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
  112. drm_free_large(buf->pages);
  113. } else
  114. dma_free_attrs(dev->dev, buf->size, buf->pages,
  115. (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
  116. buf->dma_addr = (dma_addr_t)NULL;
  117. }
  118. struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
  119. unsigned int size)
  120. {
  121. struct exynos_drm_gem_buf *buffer;
  122. DRM_DEBUG_KMS("desired size = 0x%x\n", size);
  123. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  124. if (!buffer) {
  125. DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
  126. return NULL;
  127. }
  128. buffer->size = size;
  129. return buffer;
  130. }
  131. void exynos_drm_fini_buf(struct drm_device *dev,
  132. struct exynos_drm_gem_buf *buffer)
  133. {
  134. if (!buffer) {
  135. DRM_DEBUG_KMS("buffer is null.\n");
  136. return;
  137. }
  138. kfree(buffer);
  139. buffer = NULL;
  140. }
  141. int exynos_drm_alloc_buf(struct drm_device *dev,
  142. struct exynos_drm_gem_buf *buf, unsigned int flags)
  143. {
  144. /*
  145. * allocate memory region and set the memory information
  146. * to vaddr and dma_addr of a buffer object.
  147. */
  148. if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
  149. return -ENOMEM;
  150. return 0;
  151. }
  152. void exynos_drm_free_buf(struct drm_device *dev,
  153. unsigned int flags, struct exynos_drm_gem_buf *buffer)
  154. {
  155. lowlevel_buffer_deallocate(dev, flags, buffer);
  156. }