dma.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. #ifndef _M68K_DMA_H
  2. #define _M68K_DMA_H 1
  3. //#define DMA_DEBUG 1
  4. #ifdef CONFIG_COLDFIRE
  5. /*
  6. * ColdFire DMA Model:
  7. * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
  8. * address mode emits a source address, and expects that the device will either
  9. * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
  10. * the device will place data on the correct byte(s) of the data bus, as the
  11. * memory transactions are always 32 bits. This implies that only 32 bit
  12. * devices will find single mode transfers useful. Dual address DMA mode
  13. * performs two cycles: source read and destination write. ColdFire will
  14. * align the data so that the device will always get the correct bytes, thus
  15. * is useful for 8 and 16 bit devices. This is the mode that is supported
  16. * below.
  17. *
  18. * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  19. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  20. *
  21. * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  22. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  23. *
  24. * APR/18/2002 : added proper support for MCF5272 DMA controller.
  25. * Arthur Shipkowski (art@videon-central.com)
  26. */
  27. #include <asm/coldfire.h>
  28. #include <asm/mcfsim.h>
  29. #include <asm/mcfdma.h>
  30. /*
  31. * Set number of channels of DMA on ColdFire for different implementations.
  32. */
  33. #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
  34. #define MAX_M68K_DMA_CHANNELS 4
  35. #elif defined(CONFIG_M5272)
  36. #define MAX_M68K_DMA_CHANNELS 1
  37. #else
  38. #define MAX_M68K_DMA_CHANNELS 2
  39. #endif
  40. extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
  41. extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
  42. #if !defined(CONFIG_M5272)
  43. #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
  44. #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
  45. #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
  46. #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
  47. /* I/O to memory, 8 bits, mode */
  48. #define DMA_MODE_READ 0
  49. /* memory to I/O, 8 bits, mode */
  50. #define DMA_MODE_WRITE 1
  51. /* I/O to memory, 16 bits, mode */
  52. #define DMA_MODE_READ_WORD 2
  53. /* memory to I/O, 16 bits, mode */
  54. #define DMA_MODE_WRITE_WORD 3
  55. /* I/O to memory, 32 bits, mode */
  56. #define DMA_MODE_READ_LONG 4
  57. /* memory to I/O, 32 bits, mode */
  58. #define DMA_MODE_WRITE_LONG 5
  59. /* I/O to memory, 8 bits, single-address-mode */
  60. #define DMA_MODE_READ_SINGLE 8
  61. /* memory to I/O, 8 bits, single-address-mode */
  62. #define DMA_MODE_WRITE_SINGLE 9
  63. /* I/O to memory, 16 bits, single-address-mode */
  64. #define DMA_MODE_READ_WORD_SINGLE 10
  65. /* memory to I/O, 16 bits, single-address-mode */
  66. #define DMA_MODE_WRITE_WORD_SINGLE 11
  67. /* I/O to memory, 32 bits, single-address-mode */
  68. #define DMA_MODE_READ_LONG_SINGLE 12
  69. /* memory to I/O, 32 bits, single-address-mode */
  70. #define DMA_MODE_WRITE_LONG_SINGLE 13
  71. #else /* CONFIG_M5272 is defined */
  72. /* Source static-address mode */
  73. #define DMA_MODE_SRC_SA_BIT 0x01
  74. /* Two bits to select between all four modes */
  75. #define DMA_MODE_SSIZE_MASK 0x06
  76. /* Offset to shift bits in */
  77. #define DMA_MODE_SSIZE_OFF 0x01
  78. /* Destination static-address mode */
  79. #define DMA_MODE_DES_SA_BIT 0x10
  80. /* Two bits to select between all four modes */
  81. #define DMA_MODE_DSIZE_MASK 0x60
  82. /* Offset to shift bits in */
  83. #define DMA_MODE_DSIZE_OFF 0x05
  84. /* Size modifiers */
  85. #define DMA_MODE_SIZE_LONG 0x00
  86. #define DMA_MODE_SIZE_BYTE 0x01
  87. #define DMA_MODE_SIZE_WORD 0x02
  88. #define DMA_MODE_SIZE_LINE 0x03
  89. /*
  90. * Aliases to help speed quick ports; these may be suboptimal, however. They
  91. * do not include the SINGLE mode modifiers since the MCF5272 does not have a
  92. * mode where the device is in control of its addressing.
  93. */
  94. /* I/O to memory, 8 bits, mode */
  95. #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  96. /* memory to I/O, 8 bits, mode */
  97. #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  98. /* I/O to memory, 16 bits, mode */
  99. #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  100. /* memory to I/O, 16 bits, mode */
  101. #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  102. /* I/O to memory, 32 bits, mode */
  103. #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  104. /* memory to I/O, 32 bits, mode */
  105. #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  106. #endif /* !defined(CONFIG_M5272) */
  107. #if !defined(CONFIG_M5272)
  108. /* enable/disable a specific DMA channel */
  109. static __inline__ void enable_dma(unsigned int dmanr)
  110. {
  111. volatile unsigned short *dmawp;
  112. #ifdef DMA_DEBUG
  113. printk("enable_dma(dmanr=%d)\n", dmanr);
  114. #endif
  115. dmawp = (unsigned short *) dma_base_addr[dmanr];
  116. dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
  117. }
  118. static __inline__ void disable_dma(unsigned int dmanr)
  119. {
  120. volatile unsigned short *dmawp;
  121. volatile unsigned char *dmapb;
  122. #ifdef DMA_DEBUG
  123. printk("disable_dma(dmanr=%d)\n", dmanr);
  124. #endif
  125. dmawp = (unsigned short *) dma_base_addr[dmanr];
  126. dmapb = (unsigned char *) dma_base_addr[dmanr];
  127. /* Turn off external requests, and stop any DMA in progress */
  128. dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
  129. dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  130. }
  131. /*
  132. * Clear the 'DMA Pointer Flip Flop'.
  133. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  134. * Use this once to initialize the FF to a known state.
  135. * After that, keep track of it. :-)
  136. * --- In order to do that, the DMA routines below should ---
  137. * --- only be used while interrupts are disabled! ---
  138. *
  139. * This is a NOP for ColdFire. Provide a stub for compatibility.
  140. */
  141. static __inline__ void clear_dma_ff(unsigned int dmanr)
  142. {
  143. }
  144. /* set mode (above) for a specific DMA channel */
  145. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  146. {
  147. volatile unsigned char *dmabp;
  148. volatile unsigned short *dmawp;
  149. #ifdef DMA_DEBUG
  150. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  151. #endif
  152. dmabp = (unsigned char *) dma_base_addr[dmanr];
  153. dmawp = (unsigned short *) dma_base_addr[dmanr];
  154. // Clear config errors
  155. dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  156. // Set command register
  157. dmawp[MCFDMA_DCR] =
  158. MCFDMA_DCR_INT | // Enable completion irq
  159. MCFDMA_DCR_CS | // Force one xfer per request
  160. MCFDMA_DCR_AA | // Enable auto alignment
  161. // single-address-mode
  162. ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
  163. // sets s_rw (-> r/w) high if Memory to I/0
  164. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
  165. // Memory to I/O or I/O to Memory
  166. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
  167. // 32 bit, 16 bit or 8 bit transfers
  168. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
  169. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
  170. MCFDMA_DCR_SSIZE_BYTE)) |
  171. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
  172. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
  173. MCFDMA_DCR_DSIZE_BYTE));
  174. #ifdef DEBUG_DMA
  175. printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
  176. dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
  177. (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
  178. #endif
  179. }
  180. /* Set transfer address for specific DMA channel */
  181. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  182. {
  183. volatile unsigned short *dmawp;
  184. volatile unsigned int *dmalp;
  185. #ifdef DMA_DEBUG
  186. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  187. #endif
  188. dmawp = (unsigned short *) dma_base_addr[dmanr];
  189. dmalp = (unsigned int *) dma_base_addr[dmanr];
  190. // Determine which address registers are used for memory/device accesses
  191. if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
  192. // Source incrementing, must be memory
  193. dmalp[MCFDMA_SAR] = a;
  194. // Set dest address, must be device
  195. dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
  196. } else {
  197. // Destination incrementing, must be memory
  198. dmalp[MCFDMA_DAR] = a;
  199. // Set source address, must be device
  200. dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
  201. }
  202. #ifdef DEBUG_DMA
  203. printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  204. __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
  205. (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
  206. (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
  207. #endif
  208. }
  209. /*
  210. * Specific for Coldfire - sets device address.
  211. * Should be called after the mode set call, and before set DMA address.
  212. */
  213. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  214. {
  215. #ifdef DMA_DEBUG
  216. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  217. #endif
  218. dma_device_address[dmanr] = a;
  219. }
  220. /*
  221. * NOTE 2: "count" represents _bytes_.
  222. */
  223. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  224. {
  225. volatile unsigned short *dmawp;
  226. #ifdef DMA_DEBUG
  227. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  228. #endif
  229. dmawp = (unsigned short *) dma_base_addr[dmanr];
  230. dmawp[MCFDMA_BCR] = (unsigned short)count;
  231. }
  232. /*
  233. * Get DMA residue count. After a DMA transfer, this
  234. * should return zero. Reading this while a DMA transfer is
  235. * still in progress will return unpredictable results.
  236. * Otherwise, it returns the number of _bytes_ left to transfer.
  237. */
  238. static __inline__ int get_dma_residue(unsigned int dmanr)
  239. {
  240. volatile unsigned short *dmawp;
  241. unsigned short count;
  242. #ifdef DMA_DEBUG
  243. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  244. #endif
  245. dmawp = (unsigned short *) dma_base_addr[dmanr];
  246. count = dmawp[MCFDMA_BCR];
  247. return((int) count);
  248. }
  249. #else /* CONFIG_M5272 is defined */
  250. /*
  251. * The MCF5272 DMA controller is very different than the controller defined above
  252. * in terms of register mapping. For instance, with the exception of the 16-bit
  253. * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
  254. *
  255. * The big difference, however, is the lack of device-requested DMA. All modes
  256. * are dual address transfer, and there is no 'device' setup or direction bit.
  257. * You can DMA between a device and memory, between memory and memory, or even between
  258. * two devices directly, with any combination of incrementing and non-incrementing
  259. * addresses you choose. This puts a crimp in distinguishing between the 'device
  260. * address' set up by set_dma_device_addr.
  261. *
  262. * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
  263. * which will act exactly as above in -- it will look to see if the source is set to
  264. * autoincrement, and if so it will make the source use the set_dma_addr value and the
  265. * destination the set_dma_device_addr value. Otherwise the source will be set to the
  266. * set_dma_device_addr value and the destination will get the set_dma_addr value.
  267. *
  268. * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
  269. * and make it explicit. Depending on what you're doing, one of these two should work
  270. * for you, but don't mix them in the same transfer setup.
  271. */
  272. /* enable/disable a specific DMA channel */
  273. static __inline__ void enable_dma(unsigned int dmanr)
  274. {
  275. volatile unsigned int *dmalp;
  276. #ifdef DMA_DEBUG
  277. printk("enable_dma(dmanr=%d)\n", dmanr);
  278. #endif
  279. dmalp = (unsigned int *) dma_base_addr[dmanr];
  280. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_EN;
  281. }
  282. static __inline__ void disable_dma(unsigned int dmanr)
  283. {
  284. volatile unsigned int *dmalp;
  285. #ifdef DMA_DEBUG
  286. printk("disable_dma(dmanr=%d)\n", dmanr);
  287. #endif
  288. dmalp = (unsigned int *) dma_base_addr[dmanr];
  289. /* Turn off external requests, and stop any DMA in progress */
  290. dmalp[MCFDMA_DMR] &= ~MCFDMA_DMR_EN;
  291. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
  292. }
  293. /*
  294. * Clear the 'DMA Pointer Flip Flop'.
  295. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  296. * Use this once to initialize the FF to a known state.
  297. * After that, keep track of it. :-)
  298. * --- In order to do that, the DMA routines below should ---
  299. * --- only be used while interrupts are disabled! ---
  300. *
  301. * This is a NOP for ColdFire. Provide a stub for compatibility.
  302. */
  303. static __inline__ void clear_dma_ff(unsigned int dmanr)
  304. {
  305. }
  306. /* set mode (above) for a specific DMA channel */
  307. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  308. {
  309. volatile unsigned int *dmalp;
  310. volatile unsigned short *dmawp;
  311. #ifdef DMA_DEBUG
  312. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  313. #endif
  314. dmalp = (unsigned int *) dma_base_addr[dmanr];
  315. dmawp = (unsigned short *) dma_base_addr[dmanr];
  316. // Clear config errors
  317. dmalp[MCFDMA_DMR] |= MCFDMA_DMR_RESET;
  318. // Set command register
  319. dmalp[MCFDMA_DMR] =
  320. MCFDMA_DMR_RQM_DUAL | // Mandatory Request Mode setting
  321. MCFDMA_DMR_DSTT_SD | // Set up addressing types; set to supervisor-data.
  322. MCFDMA_DMR_SRCT_SD | // Set up addressing types; set to supervisor-data.
  323. // source static-address-mode
  324. ((mode & DMA_MODE_SRC_SA_BIT) ? MCFDMA_DMR_SRCM_SA : MCFDMA_DMR_SRCM_IA) |
  325. // dest static-address-mode
  326. ((mode & DMA_MODE_DES_SA_BIT) ? MCFDMA_DMR_DSTM_SA : MCFDMA_DMR_DSTM_IA) |
  327. // burst, 32 bit, 16 bit or 8 bit transfers are separately configurable on the MCF5272
  328. (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_DSTS_OFF) |
  329. (((mode & DMA_MODE_SSIZE_MASK) >> DMA_MODE_SSIZE_OFF) << MCFDMA_DMR_SRCS_OFF);
  330. dmawp[MCFDMA_DIR] |= MCFDMA_DIR_ASCEN; /* Enable completion interrupts */
  331. #ifdef DEBUG_DMA
  332. printk("%s(%d): dmanr=%d DMR[%x]=%x DIR[%x]=%x\n", __FILE__, __LINE__,
  333. dmanr, (int) &dmalp[MCFDMA_DMR], dmabp[MCFDMA_DMR],
  334. (int) &dmawp[MCFDMA_DIR], dmawp[MCFDMA_DIR]);
  335. #endif
  336. }
  337. /* Set transfer address for specific DMA channel */
  338. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  339. {
  340. volatile unsigned int *dmalp;
  341. #ifdef DMA_DEBUG
  342. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  343. #endif
  344. dmalp = (unsigned int *) dma_base_addr[dmanr];
  345. // Determine which address registers are used for memory/device accesses
  346. if (dmalp[MCFDMA_DMR] & MCFDMA_DMR_SRCM) {
  347. // Source incrementing, must be memory
  348. dmalp[MCFDMA_DSAR] = a;
  349. // Set dest address, must be device
  350. dmalp[MCFDMA_DDAR] = dma_device_address[dmanr];
  351. } else {
  352. // Destination incrementing, must be memory
  353. dmalp[MCFDMA_DDAR] = a;
  354. // Set source address, must be device
  355. dmalp[MCFDMA_DSAR] = dma_device_address[dmanr];
  356. }
  357. #ifdef DEBUG_DMA
  358. printk("%s(%d): dmanr=%d DMR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  359. __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DMR], dmawp[MCFDMA_DMR],
  360. (int) &dmalp[MCFDMA_DSAR], dmalp[MCFDMA_DSAR],
  361. (int) &dmalp[MCFDMA_DDAR], dmalp[MCFDMA_DDAR]);
  362. #endif
  363. }
  364. /*
  365. * Specific for Coldfire - sets device address.
  366. * Should be called after the mode set call, and before set DMA address.
  367. */
  368. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  369. {
  370. #ifdef DMA_DEBUG
  371. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  372. #endif
  373. dma_device_address[dmanr] = a;
  374. }
  375. /*
  376. * NOTE 2: "count" represents _bytes_.
  377. *
  378. * NOTE 3: While a 32-bit register, "count" is only a maximum 24-bit value.
  379. */
  380. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  381. {
  382. volatile unsigned int *dmalp;
  383. #ifdef DMA_DEBUG
  384. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  385. #endif
  386. dmalp = (unsigned int *) dma_base_addr[dmanr];
  387. dmalp[MCFDMA_DBCR] = count;
  388. }
  389. /*
  390. * Get DMA residue count. After a DMA transfer, this
  391. * should return zero. Reading this while a DMA transfer is
  392. * still in progress will return unpredictable results.
  393. * Otherwise, it returns the number of _bytes_ left to transfer.
  394. */
  395. static __inline__ int get_dma_residue(unsigned int dmanr)
  396. {
  397. volatile unsigned int *dmalp;
  398. unsigned int count;
  399. #ifdef DMA_DEBUG
  400. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  401. #endif
  402. dmalp = (unsigned int *) dma_base_addr[dmanr];
  403. count = dmalp[MCFDMA_DBCR];
  404. return(count);
  405. }
  406. #endif /* !defined(CONFIG_M5272) */
  407. #endif /* CONFIG_COLDFIRE */
  408. #define MAX_DMA_CHANNELS 8
  409. /* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any
  410. occurrence should be flagged as an error. */
  411. /* under 2.4 it is actually needed by the new bootmem allocator */
  412. #define MAX_DMA_ADDRESS PAGE_OFFSET
  413. /* These are in kernel/dma.c: */
  414. extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */
  415. extern void free_dma(unsigned int dmanr); /* release it again */
  416. #endif /* _M68K_DMA_H */