dma.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /* Wrapper for DMA channel allocator that updates DMA client muxing.
  2. * Copyright 2004, Axis Communications AB
  3. * $Id: dma.c,v 1.1 2004/12/13 12:21:51 starvik Exp $
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/errno.h>
  8. #include <asm/dma.h>
  9. #include <asm/arch/svinto.h>
  10. /* Macro to access ETRAX 100 registers */
  11. #define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
  12. IO_STATE_(reg##_, field##_, _##val)
  13. static char used_dma_channels[MAX_DMA_CHANNELS];
  14. static const char * used_dma_channels_users[MAX_DMA_CHANNELS];
  15. int cris_request_dma(unsigned int dmanr, const char * device_id,
  16. unsigned options, enum dma_owner owner)
  17. {
  18. unsigned long flags;
  19. unsigned long int gens;
  20. int fail = -EINVAL;
  21. if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) {
  22. printk(KERN_CRIT "cris_request_dma: invalid DMA channel %u\n", dmanr);
  23. return -EINVAL;
  24. }
  25. local_irq_save(flags);
  26. if (used_dma_channels[dmanr]) {
  27. local_irq_restore(flags);
  28. if (options & DMA_VERBOSE_ON_ERROR) {
  29. printk(KERN_CRIT "Failed to request DMA %i for %s, already allocated by %s\n", dmanr, device_id, used_dma_channels_users[dmanr]);
  30. }
  31. if (options & DMA_PANIC_ON_ERROR) {
  32. panic("request_dma error!");
  33. }
  34. return -EBUSY;
  35. }
  36. gens = genconfig_shadow;
  37. switch(owner)
  38. {
  39. case dma_eth:
  40. if ((dmanr != NETWORK_TX_DMA_NBR) &&
  41. (dmanr != NETWORK_RX_DMA_NBR)) {
  42. printk(KERN_CRIT "Invalid DMA channel for eth\n");
  43. goto bail;
  44. }
  45. break;
  46. case dma_ser0:
  47. if (dmanr == SER0_TX_DMA_NBR) {
  48. SETS(gens, R_GEN_CONFIG, dma6, serial0);
  49. } else if (dmanr == SER0_RX_DMA_NBR) {
  50. SETS(gens, R_GEN_CONFIG, dma7, serial0);
  51. } else {
  52. printk(KERN_CRIT "Invalid DMA channel for ser0\n");
  53. goto bail;
  54. }
  55. break;
  56. case dma_ser1:
  57. if (dmanr == SER1_TX_DMA_NBR) {
  58. SETS(gens, R_GEN_CONFIG, dma8, serial1);
  59. } else if (dmanr == SER1_RX_DMA_NBR) {
  60. SETS(gens, R_GEN_CONFIG, dma9, serial1);
  61. } else {
  62. printk(KERN_CRIT "Invalid DMA channel for ser1\n");
  63. goto bail;
  64. }
  65. break;
  66. case dma_ser2:
  67. if (dmanr == SER2_TX_DMA_NBR) {
  68. SETS(gens, R_GEN_CONFIG, dma2, serial2);
  69. } else if (dmanr == SER2_RX_DMA_NBR) {
  70. SETS(gens, R_GEN_CONFIG, dma3, serial2);
  71. } else {
  72. printk(KERN_CRIT "Invalid DMA channel for ser2\n");
  73. goto bail;
  74. }
  75. break;
  76. case dma_ser3:
  77. if (dmanr == SER3_TX_DMA_NBR) {
  78. SETS(gens, R_GEN_CONFIG, dma4, serial3);
  79. } else if (dmanr == SER3_RX_DMA_NBR) {
  80. SETS(gens, R_GEN_CONFIG, dma5, serial3);
  81. } else {
  82. printk(KERN_CRIT "Invalid DMA channel for ser3\n");
  83. goto bail;
  84. }
  85. break;
  86. case dma_ata:
  87. if (dmanr == ATA_TX_DMA_NBR) {
  88. SETS(gens, R_GEN_CONFIG, dma2, ata);
  89. } else if (dmanr == ATA_RX_DMA_NBR) {
  90. SETS(gens, R_GEN_CONFIG, dma3, ata);
  91. } else {
  92. printk(KERN_CRIT "Invalid DMA channel for ata\n");
  93. goto bail;
  94. }
  95. break;
  96. case dma_ext0:
  97. if (dmanr == EXTDMA0_TX_DMA_NBR) {
  98. SETS(gens, R_GEN_CONFIG, dma4, extdma0);
  99. } else if (dmanr == EXTDMA0_RX_DMA_NBR) {
  100. SETS(gens, R_GEN_CONFIG, dma5, extdma0);
  101. } else {
  102. printk(KERN_CRIT "Invalid DMA channel for ext0\n");
  103. goto bail;
  104. }
  105. break;
  106. case dma_ext1:
  107. if (dmanr == EXTDMA1_TX_DMA_NBR) {
  108. SETS(gens, R_GEN_CONFIG, dma6, extdma1);
  109. } else if (dmanr == EXTDMA1_RX_DMA_NBR) {
  110. SETS(gens, R_GEN_CONFIG, dma7, extdma1);
  111. } else {
  112. printk(KERN_CRIT "Invalid DMA channel for ext1\n");
  113. goto bail;
  114. }
  115. break;
  116. case dma_int6:
  117. if (dmanr == MEM2MEM_RX_DMA_NBR) {
  118. SETS(gens, R_GEN_CONFIG, dma7, intdma6);
  119. } else {
  120. printk(KERN_CRIT "Invalid DMA channel for int6\n");
  121. goto bail;
  122. }
  123. break;
  124. case dma_int7:
  125. if (dmanr == MEM2MEM_TX_DMA_NBR) {
  126. SETS(gens, R_GEN_CONFIG, dma6, intdma7);
  127. } else {
  128. printk(KERN_CRIT "Invalid DMA channel for int7\n");
  129. goto bail;
  130. }
  131. break;
  132. case dma_usb:
  133. if (dmanr == USB_TX_DMA_NBR) {
  134. SETS(gens, R_GEN_CONFIG, dma8, usb);
  135. } else if (dmanr == USB_RX_DMA_NBR) {
  136. SETS(gens, R_GEN_CONFIG, dma9, usb);
  137. } else {
  138. printk(KERN_CRIT "Invalid DMA channel for usb\n");
  139. goto bail;
  140. }
  141. break;
  142. case dma_scsi0:
  143. if (dmanr == SCSI0_TX_DMA_NBR) {
  144. SETS(gens, R_GEN_CONFIG, dma2, scsi0);
  145. } else if (dmanr == SCSI0_RX_DMA_NBR) {
  146. SETS(gens, R_GEN_CONFIG, dma3, scsi0);
  147. } else {
  148. printk(KERN_CRIT "Invalid DMA channel for scsi0\n");
  149. goto bail;
  150. }
  151. break;
  152. case dma_scsi1:
  153. if (dmanr == SCSI1_TX_DMA_NBR) {
  154. SETS(gens, R_GEN_CONFIG, dma4, scsi1);
  155. } else if (dmanr == SCSI1_RX_DMA_NBR) {
  156. SETS(gens, R_GEN_CONFIG, dma5, scsi1);
  157. } else {
  158. printk(KERN_CRIT "Invalid DMA channel for scsi1\n");
  159. goto bail;
  160. }
  161. break;
  162. case dma_par0:
  163. if (dmanr == PAR0_TX_DMA_NBR) {
  164. SETS(gens, R_GEN_CONFIG, dma2, par0);
  165. } else if (dmanr == PAR0_RX_DMA_NBR) {
  166. SETS(gens, R_GEN_CONFIG, dma3, par0);
  167. } else {
  168. printk(KERN_CRIT "Invalid DMA channel for par0\n");
  169. goto bail;
  170. }
  171. break;
  172. case dma_par1:
  173. if (dmanr == PAR1_TX_DMA_NBR) {
  174. SETS(gens, R_GEN_CONFIG, dma4, par1);
  175. } else if (dmanr == PAR1_RX_DMA_NBR) {
  176. SETS(gens, R_GEN_CONFIG, dma5, par1);
  177. } else {
  178. printk(KERN_CRIT "Invalid DMA channel for par1\n");
  179. goto bail;
  180. }
  181. break;
  182. default:
  183. printk(KERN_CRIT "Invalid DMA owner.\n");
  184. goto bail;
  185. }
  186. used_dma_channels[dmanr] = 1;
  187. used_dma_channels_users[dmanr] = device_id;
  188. {
  189. volatile int i;
  190. genconfig_shadow = gens;
  191. *R_GEN_CONFIG = genconfig_shadow;
  192. /* Wait 12 cycles before doing any DMA command */
  193. for(i = 6; i > 0; i--)
  194. nop();
  195. }
  196. fail = 0;
  197. bail:
  198. local_irq_restore(flags);
  199. return fail;
  200. }
  201. void cris_free_dma(unsigned int dmanr, const char * device_id)
  202. {
  203. unsigned long flags;
  204. if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) {
  205. printk(KERN_CRIT "cris_free_dma: invalid DMA channel %u\n", dmanr);
  206. return;
  207. }
  208. local_irq_save(flags);
  209. if (!used_dma_channels[dmanr]) {
  210. printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated\n", dmanr);
  211. } else if (device_id != used_dma_channels_users[dmanr]) {
  212. printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated by device\n", dmanr);
  213. } else {
  214. switch(dmanr)
  215. {
  216. case 0:
  217. *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, reset);
  218. while (IO_EXTRACT(R_DMA_CH0_CMD, cmd, *R_DMA_CH0_CMD) ==
  219. IO_STATE_VALUE(R_DMA_CH0_CMD, cmd, reset));
  220. break;
  221. case 1:
  222. *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, reset);
  223. while (IO_EXTRACT(R_DMA_CH1_CMD, cmd, *R_DMA_CH1_CMD) ==
  224. IO_STATE_VALUE(R_DMA_CH1_CMD, cmd, reset));
  225. break;
  226. case 2:
  227. *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, reset);
  228. while (IO_EXTRACT(R_DMA_CH2_CMD, cmd, *R_DMA_CH2_CMD) ==
  229. IO_STATE_VALUE(R_DMA_CH2_CMD, cmd, reset));
  230. break;
  231. case 3:
  232. *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, reset);
  233. while (IO_EXTRACT(R_DMA_CH3_CMD, cmd, *R_DMA_CH3_CMD) ==
  234. IO_STATE_VALUE(R_DMA_CH3_CMD, cmd, reset));
  235. break;
  236. case 4:
  237. *R_DMA_CH4_CMD = IO_STATE(R_DMA_CH4_CMD, cmd, reset);
  238. while (IO_EXTRACT(R_DMA_CH4_CMD, cmd, *R_DMA_CH4_CMD) ==
  239. IO_STATE_VALUE(R_DMA_CH4_CMD, cmd, reset));
  240. break;
  241. case 5:
  242. *R_DMA_CH5_CMD = IO_STATE(R_DMA_CH5_CMD, cmd, reset);
  243. while (IO_EXTRACT(R_DMA_CH5_CMD, cmd, *R_DMA_CH5_CMD) ==
  244. IO_STATE_VALUE(R_DMA_CH5_CMD, cmd, reset));
  245. break;
  246. case 6:
  247. *R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
  248. while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *R_DMA_CH6_CMD) ==
  249. IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
  250. break;
  251. case 7:
  252. *R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, reset);
  253. while (IO_EXTRACT(R_DMA_CH7_CMD, cmd, *R_DMA_CH7_CMD) ==
  254. IO_STATE_VALUE(R_DMA_CH7_CMD, cmd, reset));
  255. break;
  256. case 8:
  257. *R_DMA_CH8_CMD = IO_STATE(R_DMA_CH8_CMD, cmd, reset);
  258. while (IO_EXTRACT(R_DMA_CH8_CMD, cmd, *R_DMA_CH8_CMD) ==
  259. IO_STATE_VALUE(R_DMA_CH8_CMD, cmd, reset));
  260. break;
  261. case 9:
  262. *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, reset);
  263. while (IO_EXTRACT(R_DMA_CH9_CMD, cmd, *R_DMA_CH9_CMD) ==
  264. IO_STATE_VALUE(R_DMA_CH9_CMD, cmd, reset));
  265. break;
  266. }
  267. used_dma_channels[dmanr] = 0;
  268. }
  269. local_irq_restore(flags);
  270. }
  271. EXPORT_SYMBOL(cris_request_dma);
  272. EXPORT_SYMBOL(cris_free_dma);