dma.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. #ifndef _M68K_DMA_H
  2. #define _M68K_DMA_H 1
  3. #ifdef CONFIG_COLDFIRE
  4. /*
  5. * ColdFire DMA Model:
  6. * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
  7. * address mode emits a source address, and expects that the device will either
  8. * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
  9. * the device will place data on the correct byte(s) of the data bus, as the
  10. * memory transactions are always 32 bits. This implies that only 32 bit
  11. * devices will find single mode transfers useful. Dual address DMA mode
  12. * performs two cycles: source read and destination write. ColdFire will
  13. * align the data so that the device will always get the correct bytes, thus
  14. * is useful for 8 and 16 bit devices. This is the mode that is supported
  15. * below.
  16. *
  17. * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  18. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  19. *
  20. * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  21. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  22. *
  23. * APR/18/2002 : added proper support for MCF5272 DMA controller.
  24. * Arthur Shipkowski (art@videon-central.com)
  25. */
  26. #include <asm/coldfire.h>
  27. #include <asm/mcfsim.h>
  28. #include <asm/mcfdma.h>
  29. /*
  30. * Set number of channels of DMA on ColdFire for different implementations.
  31. */
  32. #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
  33. defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
  34. #define MAX_M68K_DMA_CHANNELS 4
  35. #elif defined(CONFIG_M5272)
  36. #define MAX_M68K_DMA_CHANNELS 1
  37. #elif defined(CONFIG_M532x)
  38. #define MAX_M68K_DMA_CHANNELS 0
  39. #else
  40. #define MAX_M68K_DMA_CHANNELS 2
  41. #endif
  42. extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
  43. extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
  44. #if !defined(CONFIG_M5272)
  45. #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
  46. #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
  47. #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
  48. #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
  49. /* I/O to memory, 8 bits, mode */
  50. #define DMA_MODE_READ 0
  51. /* memory to I/O, 8 bits, mode */
  52. #define DMA_MODE_WRITE 1
  53. /* I/O to memory, 16 bits, mode */
  54. #define DMA_MODE_READ_WORD 2
  55. /* memory to I/O, 16 bits, mode */
  56. #define DMA_MODE_WRITE_WORD 3
  57. /* I/O to memory, 32 bits, mode */
  58. #define DMA_MODE_READ_LONG 4
  59. /* memory to I/O, 32 bits, mode */
  60. #define DMA_MODE_WRITE_LONG 5
  61. /* I/O to memory, 8 bits, single-address-mode */
  62. #define DMA_MODE_READ_SINGLE 8
  63. /* memory to I/O, 8 bits, single-address-mode */
  64. #define DMA_MODE_WRITE_SINGLE 9
  65. /* I/O to memory, 16 bits, single-address-mode */
  66. #define DMA_MODE_READ_WORD_SINGLE 10
  67. /* memory to I/O, 16 bits, single-address-mode */
  68. #define DMA_MODE_WRITE_WORD_SINGLE 11
  69. /* I/O to memory, 32 bits, single-address-mode */
  70. #define DMA_MODE_READ_LONG_SINGLE 12
  71. /* memory to I/O, 32 bits, single-address-mode */
  72. #define DMA_MODE_WRITE_LONG_SINGLE 13
  73. #else /* CONFIG_M5272 is defined */
  74. /* Source static-address mode */
  75. #define DMA_MODE_SRC_SA_BIT 0x01
  76. /* Two bits to select between all four modes */
  77. #define DMA_MODE_SSIZE_MASK 0x06
  78. /* Offset to shift bits in */
  79. #define DMA_MODE_SSIZE_OFF 0x01
  80. /* Destination static-address mode */
  81. #define DMA_MODE_DES_SA_BIT 0x10
  82. /* Two bits to select between all four modes */
  83. #define DMA_MODE_DSIZE_MASK 0x60
  84. /* Offset to shift bits in */
  85. #define DMA_MODE_DSIZE_OFF 0x05
  86. /* Size modifiers */
  87. #define DMA_MODE_SIZE_LONG 0x00
  88. #define DMA_MODE_SIZE_BYTE 0x01
  89. #define DMA_MODE_SIZE_WORD 0x02
  90. #define DMA_MODE_SIZE_LINE 0x03
  91. /*
  92. * Aliases to help speed quick ports; these may be suboptimal, however. They
  93. * do not include the SINGLE mode modifiers since the MCF5272 does not have a
  94. * mode where the device is in control of its addressing.
  95. */
  96. /* I/O to memory, 8 bits, mode */
  97. #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  98. /* memory to I/O, 8 bits, mode */
  99. #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  100. /* I/O to memory, 16 bits, mode */
  101. #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  102. /* memory to I/O, 16 bits, mode */
  103. #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  104. /* I/O to memory, 32 bits, mode */
  105. #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  106. /* memory to I/O, 32 bits, mode */
  107. #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  108. #endif /* !defined(CONFIG_M5272) */
  109. #if !defined(CONFIG_M5272)
  110. /* enable/disable a specific DMA channel */
  111. static __inline__ void enable_dma(unsigned int dmanr)
  112. {
  113. volatile unsigned short *dmawp;
  114. #ifdef DMA_DEBUG
  115. printk("enable_dma(dmanr=%d)\n", dmanr);
  116. #endif
  117. dmawp = (unsigned short *) dma_base_addr[dmanr];
  118. dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
  119. }
  120. static __inline__ void disable_dma(unsigned int dmanr)
  121. {
  122. volatile unsigned short *dmawp;
  123. volatile unsigned char *dmapb;
  124. #ifdef DMA_DEBUG
  125. printk("disable_dma(dmanr=%d)\n", dmanr);
  126. #endif
  127. dmawp = (unsigned short *) dma_base_addr[dmanr];
  128. dmapb = (unsigned char *) dma_base_addr[dmanr];
  129. /* Turn off external requests, and stop any DMA in progress */
  130. dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
  131. dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  132. }
  133. /*
  134. * Clear the 'DMA Pointer Flip Flop'.
  135. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  136. * Use this once to initialize the FF to a known state.
  137. * After that, keep track of it. :-)
  138. * --- In order to do that, the DMA routines below should ---
  139. * --- only be used while interrupts are disabled! ---
  140. *
  141. * This is a NOP for ColdFire. Provide a stub for compatibility.
  142. */
  143. static __inline__ void clear_dma_ff(unsigned int dmanr)
  144. {
  145. }
  146. /* set mode (above) for a specific DMA channel */
  147. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  148. {
  149. volatile unsigned char *dmabp;
  150. volatile unsigned short *dmawp;
  151. #ifdef DMA_DEBUG
  152. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  153. #endif
  154. dmabp = (unsigned char *) dma_base_addr[dmanr];
  155. dmawp = (unsigned short *) dma_base_addr[dmanr];
  156. /* Clear config errors */
  157. dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  158. /* Set command register */
  159. dmawp[MCFDMA_DCR] =
  160. MCFDMA_DCR_INT | /* Enable completion irq */
  161. MCFDMA_DCR_CS | /* Force one xfer per request */
  162. MCFDMA_DCR_AA | /* Enable auto alignment */
  163. /* single-address-mode */
  164. ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
  165. /* sets s_rw (-> r/w) high if Memory to I/0 */
  166. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
  167. /* Memory to I/O or I/O to Memory */
  168. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
  169. /* 32 bit, 16 bit or 8 bit transfers */
  170. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
  171. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
  172. MCFDMA_DCR_SSIZE_BYTE)) |
  173. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
  174. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
  175. MCFDMA_DCR_DSIZE_BYTE));
  176. #ifdef DEBUG_DMA
  177. printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
  178. dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
  179. (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
  180. #endif
  181. }
  182. /* Set transfer address for specific DMA channel */
  183. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  184. {
  185. volatile unsigned short *dmawp;
  186. volatile unsigned int *dmalp;
  187. #ifdef DMA_DEBUG
  188. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  189. #endif
  190. dmawp = (unsigned short *) dma_base_addr[dmanr];
  191. dmalp = (unsigned int *) dma_base_addr[dmanr];
  192. /* Determine which address registers are used for memory/device accesses */
  193. if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
  194. /* Source incrementing, must be memory */
  195. dmalp[MCFDMA_SAR] = a;
  196. /* Set dest address, must be device */
  197. dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
  198. } else {
  199. /* Destination incrementing, must be memory */
  200. dmalp[MCFDMA_DAR] = a;
  201. /* Set source address, must be device */
  202. dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
  203. }
  204. #ifdef DEBUG_DMA
  205. printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  206. __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
  207. (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
  208. (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
  209. #endif
  210. }
  211. /*
  212. * Specific for Coldfire - sets device address.
  213. * Should be called after the mode set call, and before set DMA address.
  214. */
  215. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  216. {
  217. #ifdef DMA_DEBUG
  218. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  219. #endif
  220. dma_device_address[dmanr] = a;
  221. }
  222. /*
  223. * NOTE 2: "count" represents _bytes_.
  224. */
  225. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  226. {
  227. volatile unsigned short *dmawp;
  228. #ifdef DMA_DEBUG
  229. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  230. #endif
  231. dmawp = (unsigned short *) dma_base_addr[dmanr];
  232. dmawp[MCFDMA_BCR] = (unsigned short)count;
  233. }
  234. /*
  235. * Get DMA residue count. After a DMA transfer, this
  236. * should return zero. Reading this while a DMA transfer is
  237. * still in progress will return unpredictable results.
  238. * Otherwise, it returns the number of _bytes_ left to transfer.
  239. */
  240. static __inline__ int get_dma_residue(unsigned int dmanr)
  241. {
  242. volatile unsigned short *dmawp;
  243. unsigned short count;
  244. #ifdef DMA_DEBUG
  245. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  246. #endif
  247. dmawp = (unsigned short *) dma_base_addr[dmanr];
  248. count = dmawp[MCFDMA_BCR];
  249. return((int) count);
  250. }
  251. #else /* CONFIG_M5272 is defined */
  252. /*
  253. * The MCF5272 DMA controller is very different than the controller defined above
  254. * in terms of register mapping. For instance, with the exception of the 16-bit
  255. * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
  256. *
  257. * The big difference, however, is the lack of device-requested DMA. All modes
  258. * are dual address transfer, and there is no 'device' setup or direction bit.
  259. * You can DMA between a device and memory, between memory and memory, or even between
  260. * two devices directly, with any combination of incrementing and non-incrementing
  261. * addresses you choose. This puts a crimp in distinguishing between the 'device
  262. * address' set up by set_dma_device_addr.
  263. *
  264. * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
  265. * which will act exactly as above in -- it will look to see if the source is set to
  266. * autoincrement, and if so it will make the source use the set_dma_addr value and the
  267. * destination the set_dma_device_addr value. Otherwise the source will be set to the
  268. * set_dma_device_addr value and the destination will get the set_dma_addr value.
  269. *
  270. * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
  271. * and make it explicit. Depending on what you're doing, one of these two should work
  272. * for you, but don't mix them in the same transfer setup.
  273. */
  274. /* enable/disable a specific DMA channel */
  275. static __inline__ void enable_dma(unsigned int dmanr)
  276. {
  277. volatile unsigned int *dmalp;
  278. #ifdef DMA_DEBUG
  279. printk("enable_dma(dmanr=%d)\n", dmanr);
  280. #endif
  281. dmalp = (unsigned int *) dma_base_addr[dmanr];
  282. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
  283. }
  284. static __inline__ void disable_dma(unsigned int dmanr)
  285. {
  286. volatile unsigned int *dmalp;
  287. #ifdef DMA_DEBUG
  288. printk("disable_dma(dmanr=%d)\n", dmanr);
  289. #endif
  290. dmalp = (unsigned int *) dma_base_addr[dmanr];
  291. /* Turn off external requests, and stop any DMA in progress */
  292. dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
  293. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
  294. }
  295. /*
  296. * Clear the 'DMA Pointer Flip Flop'.
  297. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  298. * Use this once to initialize the FF to a known state.
  299. * After that, keep track of it. :-)
  300. * --- In order to do that, the DMA routines below should ---
  301. * --- only be used while interrupts are disabled! ---
  302. *
  303. * This is a NOP for ColdFire. Provide a stub for compatibility.
  304. */
  305. static __inline__ void clear_dma_ff(unsigned int dmanr)
  306. {
  307. }
  308. /* set mode (above) for a specific DMA channel */
  309. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  310. {
  311. volatile unsigned int *dmalp;
  312. volatile unsigned short *dmawp;
  313. #ifdef DMA_DEBUG
  314. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  315. #endif
  316. dmalp = (unsigned int *) dma_base_addr[dmanr];
  317. dmawp = (unsigned short *) dma_base_addr[dmanr];
  318. /* Clear config errors */
  319. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
  320. /* Set command register */
  321. dmalp[MCFDMA_DMR] =
  322. MCFDMA_DMR_RQM_DUAL | /* Mandatory Request Mode setting */
  323. MCFDMA_DMR_DSTT_SD | /* Set up addressing types; set to supervisor-data. */
  324. MCFDMA_DMR_SRCT_SD | /* Set up addressing types; set to supervisor-data. */
  325. /* source static-address-mode */
  326. ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
  327. /* dest static-address-mode */
  328. ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
  329. /* burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272 */
  330. (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
  331. (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
  332. dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
  333. #ifdef DEBUG_DMA
  334. printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
  335. dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
  336. (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
  337. #endif
  338. }
  339. /* Set transfer address for specific DMA channel */
  340. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  341. {
  342. volatile unsigned int *dmalp;
  343. #ifdef DMA_DEBUG
  344. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  345. #endif
  346. dmalp = (unsigned int *) dma_base_addr[dmanr];
  347. /* Determine which address registers are used for memory/device accesses */
  348. if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
  349. /* Source incrementing, must be memory */
  350. dmalp[MCFDMA_DSAR] = a;
  351. /* Set dest address, must be device */
  352. dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
  353. } else {
  354. /* Destination incrementing, must be memory */
  355. dmalp[MCFDMA_DDAR] = a;
  356. /* Set source address, must be device */
  357. dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
  358. }
  359. #ifdef DEBUG_DMA
  360. printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  361. __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
  362. (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
  363. (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
  364. #endif
  365. }
  366. /*
  367. * Specific for Coldfire - sets device address.
  368. * Should be called after the mode set call, and before set DMA address.
  369. */
  370. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  371. {
  372. #ifdef DMA_DEBUG
  373. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  374. #endif
  375. dma_device_address[dmanr] = a;
  376. }
  377. /*
  378. * NOTE 2: "count" represents _bytes_.
  379. *
  380. * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
  381. */
  382. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  383. {
  384. volatile unsigned int *dmalp;
  385. #ifdef DMA_DEBUG
  386. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  387. #endif
  388. dmalp = (unsigned int *) dma_base_addr[dmanr];
  389. dmalp[MCFDMA_DBCR] = count;
  390. }
  391. /*
  392. * Get DMA residue count. After a DMA transfer, this
  393. * should return zero. Reading this while a DMA transfer is
  394. * still in progress will return unpredictable results.
  395. * Otherwise, it returns the number of _bytes_ left to transfer.
  396. */
  397. static __inline__ int get_dma_residue(unsigned int dmanr)
  398. {
  399. volatile unsigned int *dmalp;
  400. unsigned int count;
  401. #ifdef DMA_DEBUG
  402. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  403. #endif
  404. dmalp = (unsigned int *) dma_base_addr[dmanr];
  405. count = dmalp[MCFDMA_DBCR];
  406. return(count);
  407. }
  408. #endif /* !defined(CONFIG_M5272) */
  409. #endif /* CONFIG_COLDFIRE */
  410. /* it's useless on the m68k, but unfortunately needed by the new
  411. bootmem allocator (but this should do it for this) */
  412. #define MAX_DMA_ADDRESS PAGE_OFFSET
  413. #define MAX_DMA_CHANNELS 8
  414. extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
  415. extern void free_dma(unsigned int dmanr); /* release it again */
  416. #define isa_dma_bridge_buggy (0)
  417. #endif /* _M68K_DMA_H */