dma.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /* Wrapper for DMA channel allocator that starts clocks etc */
  2. #include <linux/kernel.h>
  3. #include <linux/spinlock.h>
  4. #include <mach/dma.h>
  5. #include <hwregs/reg_map.h>
  6. #include <hwregs/reg_rdwr.h>
  7. #include <hwregs/marb_defs.h>
  8. #include <hwregs/clkgen_defs.h>
  9. #include <hwregs/strmux_defs.h>
  10. #include <linux/errno.h>
  11. #include <asm/system.h>
  12. #include <arbiter.h>
  13. static char used_dma_channels[MAX_DMA_CHANNELS];
  14. static const char *used_dma_channels_users[MAX_DMA_CHANNELS];
  15. static DEFINE_SPINLOCK(dma_lock);
  16. int crisv32_request_dma(unsigned int dmanr, const char *device_id,
  17. unsigned options, unsigned int bandwidth, enum dma_owner owner)
  18. {
  19. unsigned long flags;
  20. reg_clkgen_rw_clk_ctrl clk_ctrl;
  21. reg_strmux_rw_cfg strmux_cfg;
  22. if (crisv32_arbiter_allocate_bandwidth(dmanr,
  23. options & DMA_INT_MEM ? INT_REGION : EXT_REGION,
  24. bandwidth))
  25. return -ENOMEM;
  26. spin_lock_irqsave(&dma_lock, flags);
  27. if (used_dma_channels[dmanr]) {
  28. spin_unlock_irqrestore(&dma_lock, flags);
  29. if (options & DMA_VERBOSE_ON_ERROR)
  30. printk(KERN_ERR "Failed to request DMA %i for %s, "
  31. "already allocated by %s\n",
  32. dmanr,
  33. device_id,
  34. used_dma_channels_users[dmanr]);
  35. if (options & DMA_PANIC_ON_ERROR)
  36. panic("request_dma error!");
  37. spin_unlock_irqrestore(&dma_lock, flags);
  38. return -EBUSY;
  39. }
  40. clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
  41. strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
  42. switch (dmanr) {
  43. case 0:
  44. case 1:
  45. clk_ctrl.dma0_1_eth = 1;
  46. break;
  47. case 2:
  48. case 3:
  49. clk_ctrl.dma2_3_strcop = 1;
  50. break;
  51. case 4:
  52. case 5:
  53. clk_ctrl.dma4_5_iop = 1;
  54. break;
  55. case 6:
  56. case 7:
  57. clk_ctrl.sser_ser_dma6_7 = 1;
  58. break;
  59. case 9:
  60. case 11:
  61. clk_ctrl.dma9_11 = 1;
  62. break;
  63. #if MAX_DMA_CHANNELS-1 != 11
  64. #error Check dma.c
  65. #endif
  66. default:
  67. spin_unlock_irqrestore(&dma_lock, flags);
  68. if (options & DMA_VERBOSE_ON_ERROR)
  69. printk(KERN_ERR "Failed to request DMA %i for %s, "
  70. "only 0-%i valid)\n",
  71. dmanr, device_id, MAX_DMA_CHANNELS-1);
  72. if (options & DMA_PANIC_ON_ERROR)
  73. panic("request_dma error!");
  74. return -EINVAL;
  75. }
  76. switch (owner) {
  77. case dma_eth:
  78. if (dmanr == 0)
  79. strmux_cfg.dma0 = regk_strmux_eth;
  80. else if (dmanr == 1)
  81. strmux_cfg.dma1 = regk_strmux_eth;
  82. else
  83. panic("Invalid DMA channel for eth\n");
  84. break;
  85. case dma_ser0:
  86. if (dmanr == 0)
  87. strmux_cfg.dma0 = regk_strmux_ser0;
  88. else if (dmanr == 1)
  89. strmux_cfg.dma1 = regk_strmux_ser0;
  90. else
  91. panic("Invalid DMA channel for ser0\n");
  92. break;
  93. case dma_ser3:
  94. if (dmanr == 2)
  95. strmux_cfg.dma2 = regk_strmux_ser3;
  96. else if (dmanr == 3)
  97. strmux_cfg.dma3 = regk_strmux_ser3;
  98. else
  99. panic("Invalid DMA channel for ser3\n");
  100. break;
  101. case dma_strp:
  102. if (dmanr == 2)
  103. strmux_cfg.dma2 = regk_strmux_strcop;
  104. else if (dmanr == 3)
  105. strmux_cfg.dma3 = regk_strmux_strcop;
  106. else
  107. panic("Invalid DMA channel for strp\n");
  108. break;
  109. case dma_ser1:
  110. if (dmanr == 4)
  111. strmux_cfg.dma4 = regk_strmux_ser1;
  112. else if (dmanr == 5)
  113. strmux_cfg.dma5 = regk_strmux_ser1;
  114. else
  115. panic("Invalid DMA channel for ser1\n");
  116. break;
  117. case dma_iop:
  118. if (dmanr == 4)
  119. strmux_cfg.dma4 = regk_strmux_iop;
  120. else if (dmanr == 5)
  121. strmux_cfg.dma5 = regk_strmux_iop;
  122. else
  123. panic("Invalid DMA channel for iop\n");
  124. break;
  125. case dma_ser2:
  126. if (dmanr == 6)
  127. strmux_cfg.dma6 = regk_strmux_ser2;
  128. else if (dmanr == 7)
  129. strmux_cfg.dma7 = regk_strmux_ser2;
  130. else
  131. panic("Invalid DMA channel for ser2\n");
  132. break;
  133. case dma_sser:
  134. if (dmanr == 6)
  135. strmux_cfg.dma6 = regk_strmux_sser;
  136. else if (dmanr == 7)
  137. strmux_cfg.dma7 = regk_strmux_sser;
  138. else
  139. panic("Invalid DMA channel for sser\n");
  140. break;
  141. case dma_ser4:
  142. if (dmanr == 9)
  143. strmux_cfg.dma9 = regk_strmux_ser4;
  144. else
  145. panic("Invalid DMA channel for ser4\n");
  146. break;
  147. case dma_jpeg:
  148. if (dmanr == 9)
  149. strmux_cfg.dma9 = regk_strmux_jpeg;
  150. else
  151. panic("Invalid DMA channel for JPEG\n");
  152. break;
  153. case dma_h264:
  154. if (dmanr == 11)
  155. strmux_cfg.dma11 = regk_strmux_h264;
  156. else
  157. panic("Invalid DMA channel for H264\n");
  158. break;
  159. }
  160. used_dma_channels[dmanr] = 1;
  161. used_dma_channels_users[dmanr] = device_id;
  162. REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
  163. REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
  164. spin_unlock_irqrestore(&dma_lock, flags);
  165. return 0;
  166. }
  167. void crisv32_free_dma(unsigned int dmanr)
  168. {
  169. spin_lock(&dma_lock);
  170. used_dma_channels[dmanr] = 0;
  171. spin_unlock(&dma_lock);
  172. }