io-unit.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
  2. * io-unit.c: IO-UNIT specific routines for memory management.
  3. *
  4. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/mm.h>
  11. #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
  12. #include <linux/bitops.h>
  13. #include <asm/scatterlist.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/sbus.h>
  17. #include <asm/io.h>
  18. #include <asm/io-unit.h>
  19. #include <asm/mxcc.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/dma.h>
  23. #include <asm/oplib.h>
  24. /* #define IOUNIT_DEBUG */
  25. #ifdef IOUNIT_DEBUG
  26. #define IOD(x) printk(x)
  27. #else
  28. #define IOD(x) do { } while (0)
  29. #endif
  30. #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
  31. #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
  32. void __init
  33. iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
  34. {
  35. iopte_t *xpt, *xptend;
  36. struct iounit_struct *iounit;
  37. struct linux_prom_registers iommu_promregs[PROMREG_MAX];
  38. struct resource r;
  39. iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
  40. if (!iounit) {
  41. prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
  42. prom_halt();
  43. }
  44. iounit->limit[0] = IOUNIT_BMAP1_START;
  45. iounit->limit[1] = IOUNIT_BMAP2_START;
  46. iounit->limit[2] = IOUNIT_BMAPM_START;
  47. iounit->limit[3] = IOUNIT_BMAPM_END;
  48. iounit->rotor[1] = IOUNIT_BMAP2_START;
  49. iounit->rotor[2] = IOUNIT_BMAPM_START;
  50. xpt = NULL;
  51. if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
  52. sizeof(iommu_promregs)) != -1) {
  53. prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
  54. memset(&r, 0, sizeof(r));
  55. r.flags = iommu_promregs[2].which_io;
  56. r.start = iommu_promregs[2].phys_addr;
  57. xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
  58. }
  59. if(!xpt) panic("Cannot map External Page Table.");
  60. sbus->ofdev.dev.archdata.iommu = iounit;
  61. iounit->page_table = xpt;
  62. spin_lock_init(&iounit->lock);
  63. for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
  64. xpt < xptend;)
  65. iopte_val(*xpt++) = 0;
  66. }
  67. /* One has to hold iounit->lock to call this */
  68. static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
  69. {
  70. int i, j, k, npages;
  71. unsigned long rotor, scan, limit;
  72. iopte_t iopte;
  73. npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  74. /* A tiny bit of magic ingredience :) */
  75. switch (npages) {
  76. case 1: i = 0x0231; break;
  77. case 2: i = 0x0132; break;
  78. default: i = 0x0213; break;
  79. }
  80. IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
  81. next: j = (i & 15);
  82. rotor = iounit->rotor[j - 1];
  83. limit = iounit->limit[j];
  84. scan = rotor;
  85. nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
  86. if (scan + npages > limit) {
  87. if (limit != rotor) {
  88. limit = rotor;
  89. scan = iounit->limit[j - 1];
  90. goto nexti;
  91. }
  92. i >>= 4;
  93. if (!(i & 15))
  94. panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
  95. goto next;
  96. }
  97. for (k = 1, scan++; k < npages; k++)
  98. if (test_bit(scan++, iounit->bmap))
  99. goto nexti;
  100. iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
  101. scan -= npages;
  102. iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
  103. vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
  104. for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
  105. set_bit(scan, iounit->bmap);
  106. iounit->page_table[scan] = iopte;
  107. }
  108. IOD(("%08lx\n", vaddr));
  109. return vaddr;
  110. }
  111. static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  112. {
  113. unsigned long ret, flags;
  114. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  115. spin_lock_irqsave(&iounit->lock, flags);
  116. ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
  117. spin_unlock_irqrestore(&iounit->lock, flags);
  118. return ret;
  119. }
  120. static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  121. {
  122. unsigned long flags;
  123. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  124. /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
  125. spin_lock_irqsave(&iounit->lock, flags);
  126. while (sz != 0) {
  127. --sz;
  128. sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
  129. sg[sz].dvma_length = sg[sz].length;
  130. }
  131. spin_unlock_irqrestore(&iounit->lock, flags);
  132. }
  133. static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
  134. {
  135. unsigned long flags;
  136. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  137. spin_lock_irqsave(&iounit->lock, flags);
  138. len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  139. vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
  140. IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
  141. for (len += vaddr; vaddr < len; vaddr++)
  142. clear_bit(vaddr, iounit->bmap);
  143. spin_unlock_irqrestore(&iounit->lock, flags);
  144. }
  145. static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  146. {
  147. unsigned long flags;
  148. unsigned long vaddr, len;
  149. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  150. spin_lock_irqsave(&iounit->lock, flags);
  151. while (sz != 0) {
  152. --sz;
  153. len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  154. vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
  155. IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
  156. for (len += vaddr; vaddr < len; vaddr++)
  157. clear_bit(vaddr, iounit->bmap);
  158. }
  159. spin_unlock_irqrestore(&iounit->lock, flags);
  160. }
  161. #ifdef CONFIG_SBUS
  162. static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
  163. {
  164. unsigned long page, end;
  165. pgprot_t dvma_prot;
  166. iopte_t *iopte;
  167. struct sbus_bus *sbus;
  168. *pba = addr;
  169. dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
  170. end = PAGE_ALIGN((addr + len));
  171. while(addr < end) {
  172. page = va;
  173. {
  174. pgd_t *pgdp;
  175. pmd_t *pmdp;
  176. pte_t *ptep;
  177. long i;
  178. pgdp = pgd_offset(&init_mm, addr);
  179. pmdp = pmd_offset(pgdp, addr);
  180. ptep = pte_offset_map(pmdp, addr);
  181. set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
  182. i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
  183. for_each_sbus(sbus) {
  184. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  185. iopte = (iopte_t *)(iounit->page_table + i);
  186. *iopte = MKIOPTE(__pa(page));
  187. }
  188. }
  189. addr += PAGE_SIZE;
  190. va += PAGE_SIZE;
  191. }
  192. flush_cache_all();
  193. flush_tlb_all();
  194. return 0;
  195. }
  196. static void iounit_unmap_dma_area(unsigned long addr, int len)
  197. {
  198. /* XXX Somebody please fill this in */
  199. }
  200. /* XXX We do not pass sbus device here, bad. */
  201. static struct page *iounit_translate_dvma(unsigned long addr)
  202. {
  203. struct sbus_bus *sbus = sbus_root; /* They are all the same */
  204. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  205. int i;
  206. iopte_t *iopte;
  207. i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
  208. iopte = (iopte_t *)(iounit->page_table + i);
  209. return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
  210. }
  211. #endif
  212. static char *iounit_lockarea(char *vaddr, unsigned long len)
  213. {
  214. /* FIXME: Write this */
  215. return vaddr;
  216. }
  217. static void iounit_unlockarea(char *vaddr, unsigned long len)
  218. {
  219. /* FIXME: Write this */
  220. }
  221. void __init ld_mmu_iounit(void)
  222. {
  223. BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
  224. BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
  225. BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
  226. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
  227. BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
  228. BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
  229. #ifdef CONFIG_SBUS
  230. BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
  231. BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
  232. BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
  233. #endif
  234. }
  235. __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
  236. {
  237. int i, j, k, npages;
  238. unsigned long rotor, scan, limit;
  239. unsigned long flags;
  240. __u32 ret;
  241. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  242. npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  243. i = 0x0213;
  244. spin_lock_irqsave(&iounit->lock, flags);
  245. next: j = (i & 15);
  246. rotor = iounit->rotor[j - 1];
  247. limit = iounit->limit[j];
  248. scan = rotor;
  249. nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
  250. if (scan + npages > limit) {
  251. if (limit != rotor) {
  252. limit = rotor;
  253. scan = iounit->limit[j - 1];
  254. goto nexti;
  255. }
  256. i >>= 4;
  257. if (!(i & 15))
  258. panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
  259. goto next;
  260. }
  261. for (k = 1, scan++; k < npages; k++)
  262. if (test_bit(scan++, iounit->bmap))
  263. goto nexti;
  264. iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
  265. scan -= npages;
  266. ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
  267. for (k = 0; k < npages; k++, scan++)
  268. set_bit(scan, iounit->bmap);
  269. spin_unlock_irqrestore(&iounit->lock, flags);
  270. return ret;
  271. }
  272. __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
  273. {
  274. int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
  275. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  276. iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
  277. return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
  278. }