apbio.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * Copyright (C) 2010 NVIDIA Corporation.
  3. * Copyright (C) 2010 Google, Inc.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/io.h>
  17. #include <mach/iomap.h>
  18. #include <linux/of.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/completion.h>
  23. #include <linux/sched.h>
  24. #include <linux/mutex.h>
  25. #include <mach/dma.h>
  26. #include "apbio.h"
  27. #if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA)
  28. static DEFINE_MUTEX(tegra_apb_dma_lock);
  29. static u32 *tegra_apb_bb;
  30. static dma_addr_t tegra_apb_bb_phys;
  31. static DECLARE_COMPLETION(tegra_apb_wait);
  32. static u32 tegra_apb_readl_direct(unsigned long offset);
  33. static void tegra_apb_writel_direct(u32 value, unsigned long offset);
  34. #if defined(CONFIG_TEGRA_SYSTEM_DMA)
  35. static struct tegra_dma_channel *tegra_apb_dma;
  36. bool tegra_apb_init(void)
  37. {
  38. struct tegra_dma_channel *ch;
  39. mutex_lock(&tegra_apb_dma_lock);
  40. /* Check to see if we raced to setup */
  41. if (tegra_apb_dma)
  42. goto out;
  43. ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
  44. TEGRA_DMA_SHARED);
  45. if (!ch)
  46. goto out_fail;
  47. tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
  48. &tegra_apb_bb_phys, GFP_KERNEL);
  49. if (!tegra_apb_bb) {
  50. pr_err("%s: can not allocate bounce buffer\n", __func__);
  51. tegra_dma_free_channel(ch);
  52. goto out_fail;
  53. }
  54. tegra_apb_dma = ch;
  55. out:
  56. mutex_unlock(&tegra_apb_dma_lock);
  57. return true;
  58. out_fail:
  59. mutex_unlock(&tegra_apb_dma_lock);
  60. return false;
  61. }
  62. static void apb_dma_complete(struct tegra_dma_req *req)
  63. {
  64. complete(&tegra_apb_wait);
  65. }
  66. static u32 tegra_apb_readl_using_dma(unsigned long offset)
  67. {
  68. struct tegra_dma_req req;
  69. int ret;
  70. if (!tegra_apb_dma && !tegra_apb_init())
  71. return tegra_apb_readl_direct(offset);
  72. mutex_lock(&tegra_apb_dma_lock);
  73. req.complete = apb_dma_complete;
  74. req.to_memory = 1;
  75. req.dest_addr = tegra_apb_bb_phys;
  76. req.dest_bus_width = 32;
  77. req.dest_wrap = 1;
  78. req.source_addr = offset;
  79. req.source_bus_width = 32;
  80. req.source_wrap = 4;
  81. req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
  82. req.size = 4;
  83. INIT_COMPLETION(tegra_apb_wait);
  84. tegra_dma_enqueue_req(tegra_apb_dma, &req);
  85. ret = wait_for_completion_timeout(&tegra_apb_wait,
  86. msecs_to_jiffies(50));
  87. if (WARN(ret == 0, "apb read dma timed out")) {
  88. tegra_dma_dequeue_req(tegra_apb_dma, &req);
  89. *(u32 *)tegra_apb_bb = 0;
  90. }
  91. mutex_unlock(&tegra_apb_dma_lock);
  92. return *((u32 *)tegra_apb_bb);
  93. }
  94. static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
  95. {
  96. struct tegra_dma_req req;
  97. int ret;
  98. if (!tegra_apb_dma && !tegra_apb_init()) {
  99. tegra_apb_writel_direct(value, offset);
  100. return;
  101. }
  102. mutex_lock(&tegra_apb_dma_lock);
  103. *((u32 *)tegra_apb_bb) = value;
  104. req.complete = apb_dma_complete;
  105. req.to_memory = 0;
  106. req.dest_addr = offset;
  107. req.dest_wrap = 4;
  108. req.dest_bus_width = 32;
  109. req.source_addr = tegra_apb_bb_phys;
  110. req.source_bus_width = 32;
  111. req.source_wrap = 1;
  112. req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
  113. req.size = 4;
  114. INIT_COMPLETION(tegra_apb_wait);
  115. tegra_dma_enqueue_req(tegra_apb_dma, &req);
  116. ret = wait_for_completion_timeout(&tegra_apb_wait,
  117. msecs_to_jiffies(50));
  118. if (WARN(ret == 0, "apb write dma timed out"))
  119. tegra_dma_dequeue_req(tegra_apb_dma, &req);
  120. mutex_unlock(&tegra_apb_dma_lock);
  121. }
  122. #else
  123. static struct dma_chan *tegra_apb_dma_chan;
  124. static struct dma_slave_config dma_sconfig;
  125. bool tegra_apb_dma_init(void)
  126. {
  127. dma_cap_mask_t mask;
  128. mutex_lock(&tegra_apb_dma_lock);
  129. /* Check to see if we raced to setup */
  130. if (tegra_apb_dma_chan)
  131. goto skip_init;
  132. dma_cap_zero(mask);
  133. dma_cap_set(DMA_SLAVE, mask);
  134. tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
  135. if (!tegra_apb_dma_chan) {
  136. /*
  137. * This is common until the device is probed, so don't
  138. * shout about it.
  139. */
  140. pr_debug("%s: can not allocate dma channel\n", __func__);
  141. goto err_dma_alloc;
  142. }
  143. tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
  144. &tegra_apb_bb_phys, GFP_KERNEL);
  145. if (!tegra_apb_bb) {
  146. pr_err("%s: can not allocate bounce buffer\n", __func__);
  147. goto err_buff_alloc;
  148. }
  149. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  150. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  151. dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
  152. dma_sconfig.src_maxburst = 1;
  153. dma_sconfig.dst_maxburst = 1;
  154. skip_init:
  155. mutex_unlock(&tegra_apb_dma_lock);
  156. return true;
  157. err_buff_alloc:
  158. dma_release_channel(tegra_apb_dma_chan);
  159. tegra_apb_dma_chan = NULL;
  160. err_dma_alloc:
  161. mutex_unlock(&tegra_apb_dma_lock);
  162. return false;
  163. }
  164. static void apb_dma_complete(void *args)
  165. {
  166. complete(&tegra_apb_wait);
  167. }
  168. static int do_dma_transfer(unsigned long apb_add,
  169. enum dma_transfer_direction dir)
  170. {
  171. struct dma_async_tx_descriptor *dma_desc;
  172. int ret;
  173. if (dir == DMA_DEV_TO_MEM)
  174. dma_sconfig.src_addr = apb_add;
  175. else
  176. dma_sconfig.dst_addr = apb_add;
  177. ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
  178. if (ret)
  179. return ret;
  180. dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
  181. tegra_apb_bb_phys, sizeof(u32), dir,
  182. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  183. if (!dma_desc)
  184. return -EINVAL;
  185. dma_desc->callback = apb_dma_complete;
  186. dma_desc->callback_param = NULL;
  187. INIT_COMPLETION(tegra_apb_wait);
  188. dmaengine_submit(dma_desc);
  189. dma_async_issue_pending(tegra_apb_dma_chan);
  190. ret = wait_for_completion_timeout(&tegra_apb_wait,
  191. msecs_to_jiffies(50));
  192. if (WARN(ret == 0, "apb read dma timed out")) {
  193. dmaengine_terminate_all(tegra_apb_dma_chan);
  194. return -EFAULT;
  195. }
  196. return 0;
  197. }
  198. static u32 tegra_apb_readl_using_dma(unsigned long offset)
  199. {
  200. int ret;
  201. if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
  202. return tegra_apb_readl_direct(offset);
  203. mutex_lock(&tegra_apb_dma_lock);
  204. ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
  205. if (ret < 0) {
  206. pr_err("error in reading offset 0x%08lx using dma\n", offset);
  207. *(u32 *)tegra_apb_bb = 0;
  208. }
  209. mutex_unlock(&tegra_apb_dma_lock);
  210. return *((u32 *)tegra_apb_bb);
  211. }
  212. static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
  213. {
  214. int ret;
  215. if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
  216. tegra_apb_writel_direct(value, offset);
  217. return;
  218. }
  219. mutex_lock(&tegra_apb_dma_lock);
  220. *((u32 *)tegra_apb_bb) = value;
  221. ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
  222. if (ret < 0)
  223. pr_err("error in writing offset 0x%08lx using dma\n", offset);
  224. mutex_unlock(&tegra_apb_dma_lock);
  225. }
  226. #endif
  227. #else
  228. #define tegra_apb_readl_using_dma tegra_apb_readl_direct
  229. #define tegra_apb_writel_using_dma tegra_apb_writel_direct
  230. #endif
  231. typedef u32 (*apbio_read_fptr)(unsigned long offset);
  232. typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
  233. static apbio_read_fptr apbio_read;
  234. static apbio_write_fptr apbio_write;
  235. static u32 tegra_apb_readl_direct(unsigned long offset)
  236. {
  237. return readl(IO_TO_VIRT(offset));
  238. }
  239. static void tegra_apb_writel_direct(u32 value, unsigned long offset)
  240. {
  241. writel(value, IO_TO_VIRT(offset));
  242. }
  243. void tegra_apb_io_init(void)
  244. {
  245. /* Need to use dma only when it is Tegra20 based platform */
  246. if (of_machine_is_compatible("nvidia,tegra20") ||
  247. !of_have_populated_dt()) {
  248. apbio_read = tegra_apb_readl_using_dma;
  249. apbio_write = tegra_apb_writel_using_dma;
  250. } else {
  251. apbio_read = tegra_apb_readl_direct;
  252. apbio_write = tegra_apb_writel_direct;
  253. }
  254. }
  255. u32 tegra_apb_readl(unsigned long offset)
  256. {
  257. return apbio_read(offset);
  258. }
  259. void tegra_apb_writel(u32 value, unsigned long offset)
  260. {
  261. apbio_write(value, offset);
  262. }