dma.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /* $Id: dma.h,v 1.35 1999/12/27 06:37:09 anton Exp $
  2. * include/asm-sparc/dma.h
  3. *
  4. * Copyright 1995 (C) David S. Miller (davem@caip.rutgers.edu)
  5. */
  6. #ifndef _ASM_SPARC_DMA_H
  7. #define _ASM_SPARC_DMA_H
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <asm/vac-ops.h> /* for invalidate's, etc. */
  11. #include <asm/sbus.h>
  12. #include <asm/delay.h>
  13. #include <asm/oplib.h>
  14. #include <asm/system.h>
  15. #include <asm/io.h>
  16. #include <linux/spinlock.h>
  17. struct page;
  18. extern spinlock_t dma_spin_lock;
  19. static __inline__ unsigned long claim_dma_lock(void)
  20. {
  21. unsigned long flags;
  22. spin_lock_irqsave(&dma_spin_lock, flags);
  23. return flags;
  24. }
  25. static __inline__ void release_dma_lock(unsigned long flags)
  26. {
  27. spin_unlock_irqrestore(&dma_spin_lock, flags);
  28. }
  29. /* These are irrelevant for Sparc DMA, but we leave it in so that
  30. * things can compile.
  31. */
  32. #define MAX_DMA_CHANNELS 8
  33. #define MAX_DMA_ADDRESS (~0UL)
  34. #define DMA_MODE_READ 1
  35. #define DMA_MODE_WRITE 2
  36. /* Useful constants */
  37. #define SIZE_16MB (16*1024*1024)
  38. #define SIZE_64K (64*1024)
  39. /* SBUS DMA controller reg offsets */
  40. #define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
  41. #define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
  42. #define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
  43. #define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
  44. /* DVMA chip revisions */
  45. enum dvma_rev {
  46. dvmarev0,
  47. dvmaesc1,
  48. dvmarev1,
  49. dvmarev2,
  50. dvmarev3,
  51. dvmarevplus,
  52. dvmahme
  53. };
  54. #define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
  55. /* Linux DMA information structure, filled during probe. */
  56. struct sbus_dma {
  57. struct sbus_dma *next;
  58. struct sbus_dev *sdev;
  59. void __iomem *regs;
  60. /* Status, misc info */
  61. int node; /* Prom node for this DMA device */
  62. int running; /* Are we doing DMA now? */
  63. int allocated; /* Are we "owned" by anyone yet? */
  64. /* Transfer information. */
  65. unsigned long addr; /* Start address of current transfer */
  66. int nbytes; /* Size of current transfer */
  67. int realbytes; /* For splitting up large transfers, etc. */
  68. /* DMA revision */
  69. enum dvma_rev revision;
  70. };
  71. extern struct sbus_dma *dma_chain;
  72. /* Broken hardware... */
  73. #ifdef CONFIG_SUN4
  74. /* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
  75. * Or is rev0 present only on sun4 boxes? -jj */
  76. #define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
  77. #else
  78. #define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
  79. #endif
  80. #define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
  81. /* Main routines in dma.c */
  82. extern void dvma_init(struct sbus_bus *);
  83. /* Fields in the cond_reg register */
  84. /* First, the version identification bits */
  85. #define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
  86. #define DMA_VERS0 0x00000000 /* Sunray DMA version */
  87. #define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
  88. #define DMA_VERS1 0x80000000 /* DMA rev 1 */
  89. #define DMA_VERS2 0xa0000000 /* DMA rev 2 */
  90. #define DMA_VERHME 0xb0000000 /* DMA hme gate array */
  91. #define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
  92. #define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
  93. #define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
  94. #define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
  95. #define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
  96. #define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
  97. #define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
  98. #define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
  99. #define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
  100. #define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
  101. #define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
  102. #define DMA_ST_WRITE 0x00000100 /* write from device to memory */
  103. #define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
  104. #define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
  105. #define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
  106. #define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
  107. #define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
  108. #define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
  109. #define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
  110. #define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
  111. #define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
  112. #define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
  113. #define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
  114. #define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
  115. #define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
  116. #define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
  117. #define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
  118. #define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
  119. #define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
  120. #define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
  121. #define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
  122. #define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
  123. #define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
  124. #define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
  125. #define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
  126. #define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
  127. #define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
  128. #define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
  129. #define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
  130. #define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
  131. #define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
  132. #define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
  133. #define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
  134. #define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
  135. /* Values describing the burst-size property from the PROM */
  136. #define DMA_BURST1 0x01
  137. #define DMA_BURST2 0x02
  138. #define DMA_BURST4 0x04
  139. #define DMA_BURST8 0x08
  140. #define DMA_BURST16 0x10
  141. #define DMA_BURST32 0x20
  142. #define DMA_BURST64 0x40
  143. #define DMA_BURSTBITS 0x7f
  144. /* Determine highest possible final transfer address given a base */
  145. #define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
  146. /* Yes, I hack a lot of elisp in my spare time... */
  147. #define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
  148. #define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
  149. #define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
  150. #define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
  151. #define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
  152. #define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
  153. #define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
  154. #define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
  155. #define DMA_BEGINDMA_W(regs) \
  156. ((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
  157. #define DMA_BEGINDMA_R(regs) \
  158. ((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
  159. /* For certain DMA chips, we need to disable ints upon irq entry
  160. * and turn them back on when we are done. So in any ESP interrupt
  161. * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
  162. * when leaving the handler. You have been warned...
  163. */
  164. #define DMA_IRQ_ENTRY(dma, dregs) do { \
  165. if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
  166. } while (0)
  167. #define DMA_IRQ_EXIT(dma, dregs) do { \
  168. if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
  169. } while(0)
  170. #if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
  171. /* Pause until counter runs out or BIT isn't set in the DMA condition
  172. * register.
  173. */
  174. static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
  175. unsigned long bit)
  176. {
  177. int ctr = 50000; /* Let's find some bugs ;) */
  178. /* Busy wait until the bit is not set any more */
  179. while((regs->cond_reg&bit) && (ctr>0)) {
  180. ctr--;
  181. __delay(5);
  182. }
  183. /* Check for bogus outcome. */
  184. if(!ctr)
  185. panic("DMA timeout");
  186. }
  187. /* Reset the friggin' thing... */
  188. #define DMA_RESET(dma) do { \
  189. struct sparc_dma_registers *regs = dma->regs; \
  190. /* Let the current FIFO drain itself */ \
  191. sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
  192. /* Reset the logic */ \
  193. regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
  194. __delay(400); /* let the bits set ;) */ \
  195. regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
  196. sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
  197. /* Enable FAST transfers if available */ \
  198. if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
  199. dma->running = 0; \
  200. } while(0)
  201. #endif
  202. #define for_each_dvma(dma) \
  203. for((dma) = dma_chain; (dma); (dma) = (dma)->next)
  204. extern int get_dma_list(char *);
  205. extern int request_dma(unsigned int, __const__ char *);
  206. extern void free_dma(unsigned int);
  207. /* From PCI */
  208. #ifdef CONFIG_PCI
  209. extern int isa_dma_bridge_buggy;
  210. #else
  211. #define isa_dma_bridge_buggy (0)
  212. #endif
  213. /* Routines for data transfer buffers. */
  214. BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
  215. BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
  216. #define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
  217. #define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
  218. /* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
  219. BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
  220. BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
  221. BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
  222. BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
  223. #define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
  224. #define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
  225. #define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
  226. #define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
  227. /*
  228. * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
  229. *
  230. * The mmu_map_dma_area establishes two mappings in one go.
  231. * These mappings point to pages normally mapped at 'va' (linear address).
  232. * First mapping is for CPU visible address at 'a', uncached.
  233. * This is an alias, but it works because it is an uncached mapping.
  234. * Second mapping is for device visible address, or "bus" address.
  235. * The bus address is returned at '*pba'.
  236. *
  237. * These functions seem distinct, but are hard to split. On sun4c,
  238. * at least for now, 'a' is equal to bus address, and retured in *pba.
  239. * On sun4m, page attributes depend on the CPU type, so we have to
  240. * know if we are mapping RAM or I/O, so it has to be an additional argument
  241. * to a separate mapping function for CPU visible mappings.
  242. */
  243. BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
  244. BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
  245. BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
  246. #define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
  247. #define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
  248. #define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
  249. #endif /* !(_ASM_SPARC_DMA_H) */