apbio.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * Copyright (C) 2010 NVIDIA Corporation.
  3. * Copyright (C) 2010 Google, Inc.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/io.h>
  17. #include <linux/of.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/completion.h>
  22. #include <linux/sched.h>
  23. #include <linux/mutex.h>
  24. #include "apbio.h"
  25. #include "iomap.h"
  26. #if defined(CONFIG_TEGRA20_APB_DMA)
  27. static DEFINE_MUTEX(tegra_apb_dma_lock);
  28. static u32 *tegra_apb_bb;
  29. static dma_addr_t tegra_apb_bb_phys;
  30. static DECLARE_COMPLETION(tegra_apb_wait);
  31. static u32 tegra_apb_readl_direct(unsigned long offset);
  32. static void tegra_apb_writel_direct(u32 value, unsigned long offset);
  33. static struct dma_chan *tegra_apb_dma_chan;
  34. static struct dma_slave_config dma_sconfig;
  35. bool tegra_apb_dma_init(void)
  36. {
  37. dma_cap_mask_t mask;
  38. mutex_lock(&tegra_apb_dma_lock);
  39. /* Check to see if we raced to setup */
  40. if (tegra_apb_dma_chan)
  41. goto skip_init;
  42. dma_cap_zero(mask);
  43. dma_cap_set(DMA_SLAVE, mask);
  44. tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
  45. if (!tegra_apb_dma_chan) {
  46. /*
  47. * This is common until the device is probed, so don't
  48. * shout about it.
  49. */
  50. pr_debug("%s: can not allocate dma channel\n", __func__);
  51. goto err_dma_alloc;
  52. }
  53. tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
  54. &tegra_apb_bb_phys, GFP_KERNEL);
  55. if (!tegra_apb_bb) {
  56. pr_err("%s: can not allocate bounce buffer\n", __func__);
  57. goto err_buff_alloc;
  58. }
  59. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  60. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  61. dma_sconfig.src_maxburst = 1;
  62. dma_sconfig.dst_maxburst = 1;
  63. skip_init:
  64. mutex_unlock(&tegra_apb_dma_lock);
  65. return true;
  66. err_buff_alloc:
  67. dma_release_channel(tegra_apb_dma_chan);
  68. tegra_apb_dma_chan = NULL;
  69. err_dma_alloc:
  70. mutex_unlock(&tegra_apb_dma_lock);
  71. return false;
  72. }
  73. static void apb_dma_complete(void *args)
  74. {
  75. complete(&tegra_apb_wait);
  76. }
  77. static int do_dma_transfer(unsigned long apb_add,
  78. enum dma_transfer_direction dir)
  79. {
  80. struct dma_async_tx_descriptor *dma_desc;
  81. int ret;
  82. if (dir == DMA_DEV_TO_MEM)
  83. dma_sconfig.src_addr = apb_add;
  84. else
  85. dma_sconfig.dst_addr = apb_add;
  86. ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
  87. if (ret)
  88. return ret;
  89. dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
  90. tegra_apb_bb_phys, sizeof(u32), dir,
  91. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  92. if (!dma_desc)
  93. return -EINVAL;
  94. dma_desc->callback = apb_dma_complete;
  95. dma_desc->callback_param = NULL;
  96. INIT_COMPLETION(tegra_apb_wait);
  97. dmaengine_submit(dma_desc);
  98. dma_async_issue_pending(tegra_apb_dma_chan);
  99. ret = wait_for_completion_timeout(&tegra_apb_wait,
  100. msecs_to_jiffies(50));
  101. if (WARN(ret == 0, "apb read dma timed out")) {
  102. dmaengine_terminate_all(tegra_apb_dma_chan);
  103. return -EFAULT;
  104. }
  105. return 0;
  106. }
  107. static u32 tegra_apb_readl_using_dma(unsigned long offset)
  108. {
  109. int ret;
  110. if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
  111. return tegra_apb_readl_direct(offset);
  112. mutex_lock(&tegra_apb_dma_lock);
  113. ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
  114. if (ret < 0) {
  115. pr_err("error in reading offset 0x%08lx using dma\n", offset);
  116. *(u32 *)tegra_apb_bb = 0;
  117. }
  118. mutex_unlock(&tegra_apb_dma_lock);
  119. return *((u32 *)tegra_apb_bb);
  120. }
  121. static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
  122. {
  123. int ret;
  124. if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
  125. tegra_apb_writel_direct(value, offset);
  126. return;
  127. }
  128. mutex_lock(&tegra_apb_dma_lock);
  129. *((u32 *)tegra_apb_bb) = value;
  130. ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
  131. if (ret < 0)
  132. pr_err("error in writing offset 0x%08lx using dma\n", offset);
  133. mutex_unlock(&tegra_apb_dma_lock);
  134. }
  135. #else
  136. #define tegra_apb_readl_using_dma tegra_apb_readl_direct
  137. #define tegra_apb_writel_using_dma tegra_apb_writel_direct
  138. #endif
  139. typedef u32 (*apbio_read_fptr)(unsigned long offset);
  140. typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
  141. static apbio_read_fptr apbio_read;
  142. static apbio_write_fptr apbio_write;
  143. static u32 tegra_apb_readl_direct(unsigned long offset)
  144. {
  145. return readl(IO_ADDRESS(offset));
  146. }
  147. static void tegra_apb_writel_direct(u32 value, unsigned long offset)
  148. {
  149. writel(value, IO_ADDRESS(offset));
  150. }
  151. void tegra_apb_io_init(void)
  152. {
  153. /* Need to use dma only when it is Tegra20 based platform */
  154. if (of_machine_is_compatible("nvidia,tegra20") ||
  155. !of_have_populated_dt()) {
  156. apbio_read = tegra_apb_readl_using_dma;
  157. apbio_write = tegra_apb_writel_using_dma;
  158. } else {
  159. apbio_read = tegra_apb_readl_direct;
  160. apbio_write = tegra_apb_writel_direct;
  161. }
  162. }
  163. u32 tegra_apb_readl(unsigned long offset)
  164. {
  165. return apbio_read(offset);
  166. }
  167. void tegra_apb_writel(u32 value, unsigned long offset)
  168. {
  169. apbio_write(value, offset);
  170. }