io-unit.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /*
  2. * io-unit.c: IO-UNIT specific routines for memory management.
  3. *
  4. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/mm.h>
  11. #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
  12. #include <linux/bitops.h>
  13. #include <linux/scatterlist.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/sbus.h>
  17. #include <asm/io.h>
  18. #include <asm/io-unit.h>
  19. #include <asm/mxcc.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/dma.h>
  23. #include <asm/oplib.h>
  24. /* #define IOUNIT_DEBUG */
  25. #ifdef IOUNIT_DEBUG
  26. #define IOD(x) printk(x)
  27. #else
  28. #define IOD(x) do { } while (0)
  29. #endif
  30. #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
  31. #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
  32. static void __init iounit_iommu_init(struct of_device *op)
  33. {
  34. struct iounit_struct *iounit;
  35. iopte_t *xpt, *xptend;
  36. iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
  37. if (!iounit) {
  38. prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
  39. prom_halt();
  40. }
  41. iounit->limit[0] = IOUNIT_BMAP1_START;
  42. iounit->limit[1] = IOUNIT_BMAP2_START;
  43. iounit->limit[2] = IOUNIT_BMAPM_START;
  44. iounit->limit[3] = IOUNIT_BMAPM_END;
  45. iounit->rotor[1] = IOUNIT_BMAP2_START;
  46. iounit->rotor[2] = IOUNIT_BMAPM_START;
  47. xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
  48. if (!xpt) {
  49. prom_printf("SUN4D: Cannot map External Page Table.");
  50. prom_halt();
  51. }
  52. op->dev.archdata.iommu = iounit;
  53. iounit->page_table = xpt;
  54. spin_lock_init(&iounit->lock);
  55. for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
  56. xpt < xptend;)
  57. iopte_val(*xpt++) = 0;
  58. }
  59. static int __init iounit_init(void)
  60. {
  61. extern void sun4d_init_sbi_irq(void);
  62. struct device_node *dp;
  63. for_each_node_by_name(dp, "sbi") {
  64. struct of_device *op = of_find_device_by_node(dp);
  65. iounit_iommu_init(op);
  66. of_propagate_archdata(op);
  67. }
  68. sun4d_init_sbi_irq();
  69. return 0;
  70. }
  71. subsys_initcall(iounit_init);
  72. /* One has to hold iounit->lock to call this */
  73. static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
  74. {
  75. int i, j, k, npages;
  76. unsigned long rotor, scan, limit;
  77. iopte_t iopte;
  78. npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  79. /* A tiny bit of magic ingredience :) */
  80. switch (npages) {
  81. case 1: i = 0x0231; break;
  82. case 2: i = 0x0132; break;
  83. default: i = 0x0213; break;
  84. }
  85. IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
  86. next: j = (i & 15);
  87. rotor = iounit->rotor[j - 1];
  88. limit = iounit->limit[j];
  89. scan = rotor;
  90. nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
  91. if (scan + npages > limit) {
  92. if (limit != rotor) {
  93. limit = rotor;
  94. scan = iounit->limit[j - 1];
  95. goto nexti;
  96. }
  97. i >>= 4;
  98. if (!(i & 15))
  99. panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
  100. goto next;
  101. }
  102. for (k = 1, scan++; k < npages; k++)
  103. if (test_bit(scan++, iounit->bmap))
  104. goto nexti;
  105. iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
  106. scan -= npages;
  107. iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
  108. vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
  109. for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
  110. set_bit(scan, iounit->bmap);
  111. iounit->page_table[scan] = iopte;
  112. }
  113. IOD(("%08lx\n", vaddr));
  114. return vaddr;
  115. }
  116. static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
  117. {
  118. struct iounit_struct *iounit = dev->archdata.iommu;
  119. unsigned long ret, flags;
  120. spin_lock_irqsave(&iounit->lock, flags);
  121. ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
  122. spin_unlock_irqrestore(&iounit->lock, flags);
  123. return ret;
  124. }
  125. static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
  126. {
  127. struct iounit_struct *iounit = dev->archdata.iommu;
  128. unsigned long flags;
  129. /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
  130. spin_lock_irqsave(&iounit->lock, flags);
  131. while (sz != 0) {
  132. --sz;
  133. sg->dvma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
  134. sg->dvma_length = sg->length;
  135. sg = sg_next(sg);
  136. }
  137. spin_unlock_irqrestore(&iounit->lock, flags);
  138. }
  139. static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
  140. {
  141. struct iounit_struct *iounit = dev->archdata.iommu;
  142. unsigned long flags;
  143. spin_lock_irqsave(&iounit->lock, flags);
  144. len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  145. vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
  146. IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
  147. for (len += vaddr; vaddr < len; vaddr++)
  148. clear_bit(vaddr, iounit->bmap);
  149. spin_unlock_irqrestore(&iounit->lock, flags);
  150. }
  151. static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
  152. {
  153. struct iounit_struct *iounit = dev->archdata.iommu;
  154. unsigned long flags;
  155. unsigned long vaddr, len;
  156. spin_lock_irqsave(&iounit->lock, flags);
  157. while (sz != 0) {
  158. --sz;
  159. len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  160. vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
  161. IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
  162. for (len += vaddr; vaddr < len; vaddr++)
  163. clear_bit(vaddr, iounit->bmap);
  164. sg = sg_next(sg);
  165. }
  166. spin_unlock_irqrestore(&iounit->lock, flags);
  167. }
  168. #ifdef CONFIG_SBUS
  169. static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
  170. {
  171. struct iounit_struct *iounit = dev->archdata.iommu;
  172. unsigned long page, end;
  173. pgprot_t dvma_prot;
  174. iopte_t *iopte;
  175. *pba = addr;
  176. dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
  177. end = PAGE_ALIGN((addr + len));
  178. while(addr < end) {
  179. page = va;
  180. {
  181. pgd_t *pgdp;
  182. pmd_t *pmdp;
  183. pte_t *ptep;
  184. long i;
  185. pgdp = pgd_offset(&init_mm, addr);
  186. pmdp = pmd_offset(pgdp, addr);
  187. ptep = pte_offset_map(pmdp, addr);
  188. set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
  189. i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
  190. iopte = (iopte_t *)(iounit->page_table + i);
  191. *iopte = MKIOPTE(__pa(page));
  192. }
  193. addr += PAGE_SIZE;
  194. va += PAGE_SIZE;
  195. }
  196. flush_cache_all();
  197. flush_tlb_all();
  198. return 0;
  199. }
  200. static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
  201. {
  202. /* XXX Somebody please fill this in */
  203. }
  204. #endif
  205. static char *iounit_lockarea(char *vaddr, unsigned long len)
  206. {
  207. /* FIXME: Write this */
  208. return vaddr;
  209. }
  210. static void iounit_unlockarea(char *vaddr, unsigned long len)
  211. {
  212. /* FIXME: Write this */
  213. }
  214. void __init ld_mmu_iounit(void)
  215. {
  216. BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
  217. BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
  218. BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
  219. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
  220. BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
  221. BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
  222. #ifdef CONFIG_SBUS
  223. BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
  224. BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
  225. #endif
  226. }
  227. __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
  228. {
  229. int i, j, k, npages;
  230. unsigned long rotor, scan, limit;
  231. unsigned long flags;
  232. __u32 ret;
  233. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  234. npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
  235. i = 0x0213;
  236. spin_lock_irqsave(&iounit->lock, flags);
  237. next: j = (i & 15);
  238. rotor = iounit->rotor[j - 1];
  239. limit = iounit->limit[j];
  240. scan = rotor;
  241. nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
  242. if (scan + npages > limit) {
  243. if (limit != rotor) {
  244. limit = rotor;
  245. scan = iounit->limit[j - 1];
  246. goto nexti;
  247. }
  248. i >>= 4;
  249. if (!(i & 15))
  250. panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
  251. goto next;
  252. }
  253. for (k = 1, scan++; k < npages; k++)
  254. if (test_bit(scan++, iounit->bmap))
  255. goto nexti;
  256. iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
  257. scan -= npages;
  258. ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
  259. for (k = 0; k < npages; k++, scan++)
  260. set_bit(scan, iounit->bmap);
  261. spin_unlock_irqrestore(&iounit->lock, flags);
  262. return ret;
  263. }
  264. __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
  265. {
  266. int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
  267. struct iounit_struct *iounit = sbus->ofdev.dev.archdata.iommu;
  268. iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
  269. return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
  270. }