apbio.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * Copyright (C) 2010 NVIDIA Corporation.
  3. * Copyright (C) 2010 Google, Inc.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/io.h>
  17. #include <mach/iomap.h>
  18. #include <linux/of.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/completion.h>
  23. #include <linux/sched.h>
  24. #include <linux/mutex.h>
  25. #include <mach/dma.h>
  26. #include "apbio.h"
  27. #if defined(CONFIG_TEGRA20_APB_DMA)
  28. static DEFINE_MUTEX(tegra_apb_dma_lock);
  29. static u32 *tegra_apb_bb;
  30. static dma_addr_t tegra_apb_bb_phys;
  31. static DECLARE_COMPLETION(tegra_apb_wait);
  32. static u32 tegra_apb_readl_direct(unsigned long offset);
  33. static void tegra_apb_writel_direct(u32 value, unsigned long offset);
  34. static struct dma_chan *tegra_apb_dma_chan;
  35. static struct dma_slave_config dma_sconfig;
  36. bool tegra_apb_dma_init(void)
  37. {
  38. dma_cap_mask_t mask;
  39. mutex_lock(&tegra_apb_dma_lock);
  40. /* Check to see if we raced to setup */
  41. if (tegra_apb_dma_chan)
  42. goto skip_init;
  43. dma_cap_zero(mask);
  44. dma_cap_set(DMA_SLAVE, mask);
  45. tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
  46. if (!tegra_apb_dma_chan) {
  47. /*
  48. * This is common until the device is probed, so don't
  49. * shout about it.
  50. */
  51. pr_debug("%s: can not allocate dma channel\n", __func__);
  52. goto err_dma_alloc;
  53. }
  54. tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
  55. &tegra_apb_bb_phys, GFP_KERNEL);
  56. if (!tegra_apb_bb) {
  57. pr_err("%s: can not allocate bounce buffer\n", __func__);
  58. goto err_buff_alloc;
  59. }
  60. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  61. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  62. dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
  63. dma_sconfig.src_maxburst = 1;
  64. dma_sconfig.dst_maxburst = 1;
  65. skip_init:
  66. mutex_unlock(&tegra_apb_dma_lock);
  67. return true;
  68. err_buff_alloc:
  69. dma_release_channel(tegra_apb_dma_chan);
  70. tegra_apb_dma_chan = NULL;
  71. err_dma_alloc:
  72. mutex_unlock(&tegra_apb_dma_lock);
  73. return false;
  74. }
  75. static void apb_dma_complete(void *args)
  76. {
  77. complete(&tegra_apb_wait);
  78. }
  79. static int do_dma_transfer(unsigned long apb_add,
  80. enum dma_transfer_direction dir)
  81. {
  82. struct dma_async_tx_descriptor *dma_desc;
  83. int ret;
  84. if (dir == DMA_DEV_TO_MEM)
  85. dma_sconfig.src_addr = apb_add;
  86. else
  87. dma_sconfig.dst_addr = apb_add;
  88. ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
  89. if (ret)
  90. return ret;
  91. dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
  92. tegra_apb_bb_phys, sizeof(u32), dir,
  93. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  94. if (!dma_desc)
  95. return -EINVAL;
  96. dma_desc->callback = apb_dma_complete;
  97. dma_desc->callback_param = NULL;
  98. INIT_COMPLETION(tegra_apb_wait);
  99. dmaengine_submit(dma_desc);
  100. dma_async_issue_pending(tegra_apb_dma_chan);
  101. ret = wait_for_completion_timeout(&tegra_apb_wait,
  102. msecs_to_jiffies(50));
  103. if (WARN(ret == 0, "apb read dma timed out")) {
  104. dmaengine_terminate_all(tegra_apb_dma_chan);
  105. return -EFAULT;
  106. }
  107. return 0;
  108. }
  109. static u32 tegra_apb_readl_using_dma(unsigned long offset)
  110. {
  111. int ret;
  112. if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
  113. return tegra_apb_readl_direct(offset);
  114. mutex_lock(&tegra_apb_dma_lock);
  115. ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
  116. if (ret < 0) {
  117. pr_err("error in reading offset 0x%08lx using dma\n", offset);
  118. *(u32 *)tegra_apb_bb = 0;
  119. }
  120. mutex_unlock(&tegra_apb_dma_lock);
  121. return *((u32 *)tegra_apb_bb);
  122. }
  123. static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
  124. {
  125. int ret;
  126. if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
  127. tegra_apb_writel_direct(value, offset);
  128. return;
  129. }
  130. mutex_lock(&tegra_apb_dma_lock);
  131. *((u32 *)tegra_apb_bb) = value;
  132. ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
  133. if (ret < 0)
  134. pr_err("error in writing offset 0x%08lx using dma\n", offset);
  135. mutex_unlock(&tegra_apb_dma_lock);
  136. }
  137. #else
  138. #define tegra_apb_readl_using_dma tegra_apb_readl_direct
  139. #define tegra_apb_writel_using_dma tegra_apb_writel_direct
  140. #endif
  141. typedef u32 (*apbio_read_fptr)(unsigned long offset);
  142. typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
  143. static apbio_read_fptr apbio_read;
  144. static apbio_write_fptr apbio_write;
  145. static u32 tegra_apb_readl_direct(unsigned long offset)
  146. {
  147. return readl(IO_ADDRESS(offset));
  148. }
  149. static void tegra_apb_writel_direct(u32 value, unsigned long offset)
  150. {
  151. writel(value, IO_ADDRESS(offset));
  152. }
  153. void tegra_apb_io_init(void)
  154. {
  155. /* Need to use dma only when it is Tegra20 based platform */
  156. if (of_machine_is_compatible("nvidia,tegra20") ||
  157. !of_have_populated_dt()) {
  158. apbio_read = tegra_apb_readl_using_dma;
  159. apbio_write = tegra_apb_writel_using_dma;
  160. } else {
  161. apbio_read = tegra_apb_readl_direct;
  162. apbio_write = tegra_apb_writel_direct;
  163. }
  164. }
  165. u32 tegra_apb_readl(unsigned long offset)
  166. {
  167. return apbio_read(offset);
  168. }
  169. void tegra_apb_writel(u32 value, unsigned long offset)
  170. {
  171. apbio_write(value, offset);
  172. }