iommu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. /*
  2. * iommu.c: IOMMU specific routines for memory management.
  3. *
  4. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
  6. * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <linux/slab.h>
  13. #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
  14. #include <asm/scatterlist.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/sbus.h>
  18. #include <asm/io.h>
  19. #include <asm/mxcc.h>
  20. #include <asm/mbus.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/bitext.h>
  24. #include <asm/iommu.h>
  25. #include <asm/dma.h>
  26. /*
  27. * This can be sized dynamically, but we will do this
  28. * only when we have a guidance about actual I/O pressures.
  29. */
  30. #define IOMMU_RNGE IOMMU_RNGE_256MB
  31. #define IOMMU_START 0xF0000000
  32. #define IOMMU_WINSIZE (256*1024*1024U)
  33. #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
  34. #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
  35. /* srmmu.c */
  36. extern int viking_mxcc_present;
  37. BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
  38. #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
  39. extern int flush_page_for_dma_global;
  40. static int viking_flush;
  41. /* viking.S */
  42. extern void viking_flush_page(unsigned long page);
  43. extern void viking_mxcc_flush_page(unsigned long page);
  44. /*
  45. * Values precomputed according to CPU type.
  46. */
  47. static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
  48. static pgprot_t dvma_prot; /* Consistent mapping pte flags */
  49. #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
  50. #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
  51. void __init
  52. iommu_init(int iommund, struct sbus_bus *sbus)
  53. {
  54. unsigned int impl, vers;
  55. unsigned long tmp;
  56. struct iommu_struct *iommu;
  57. struct linux_prom_registers iommu_promregs[PROMREG_MAX];
  58. struct resource r;
  59. unsigned long *bitmap;
  60. iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
  61. if (!iommu) {
  62. prom_printf("Unable to allocate iommu structure\n");
  63. prom_halt();
  64. }
  65. iommu->regs = NULL;
  66. if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
  67. sizeof(iommu_promregs)) != -1) {
  68. memset(&r, 0, sizeof(r));
  69. r.flags = iommu_promregs[0].which_io;
  70. r.start = iommu_promregs[0].phys_addr;
  71. iommu->regs = (struct iommu_regs *)
  72. sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
  73. }
  74. if (!iommu->regs) {
  75. prom_printf("Cannot map IOMMU registers\n");
  76. prom_halt();
  77. }
  78. impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
  79. vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
  80. tmp = iommu->regs->control;
  81. tmp &= ~(IOMMU_CTRL_RNGE);
  82. tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
  83. iommu->regs->control = tmp;
  84. iommu_invalidate(iommu->regs);
  85. iommu->start = IOMMU_START;
  86. iommu->end = 0xffffffff;
  87. /* Allocate IOMMU page table */
  88. /* Stupid alignment constraints give me a headache.
  89. We need 256K or 512K or 1M or 2M area aligned to
  90. its size and current gfp will fortunately give
  91. it to us. */
  92. tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
  93. if (!tmp) {
  94. prom_printf("Unable to allocate iommu table [0x%08x]\n",
  95. IOMMU_NPTES*sizeof(iopte_t));
  96. prom_halt();
  97. }
  98. iommu->page_table = (iopte_t *)tmp;
  99. /* Initialize new table. */
  100. memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
  101. flush_cache_all();
  102. flush_tlb_all();
  103. iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
  104. iommu_invalidate(iommu->regs);
  105. bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
  106. if (!bitmap) {
  107. prom_printf("Unable to allocate iommu bitmap [%d]\n",
  108. (int)(IOMMU_NPTES>>3));
  109. prom_halt();
  110. }
  111. bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
  112. /* To be coherent on HyperSparc, the page color of DVMA
  113. * and physical addresses must match.
  114. */
  115. if (srmmu_modtype == HyperSparc)
  116. iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
  117. else
  118. iommu->usemap.num_colors = 1;
  119. printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
  120. impl, vers, iommu->page_table,
  121. (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
  122. sbus->ofdev.dev.archdata.iommu = iommu;
  123. }
  124. /* This begs to be btfixup-ed by srmmu. */
  125. /* Flush the iotlb entries to ram. */
  126. /* This could be better if we didn't have to flush whole pages. */
  127. static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
  128. {
  129. unsigned long start;
  130. unsigned long end;
  131. start = (unsigned long)iopte;
  132. end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
  133. start &= PAGE_MASK;
  134. if (viking_mxcc_present) {
  135. while(start < end) {
  136. viking_mxcc_flush_page(start);
  137. start += PAGE_SIZE;
  138. }
  139. } else if (viking_flush) {
  140. while(start < end) {
  141. viking_flush_page(start);
  142. start += PAGE_SIZE;
  143. }
  144. } else {
  145. while(start < end) {
  146. __flush_page_to_ram(start);
  147. start += PAGE_SIZE;
  148. }
  149. }
  150. }
  151. static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
  152. {
  153. struct iommu_struct *iommu = sbus->ofdev.dev.archdata.iommu;
  154. int ioptex;
  155. iopte_t *iopte, *iopte0;
  156. unsigned int busa, busa0;
  157. int i;
  158. /* page color = pfn of page */
  159. ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
  160. if (ioptex < 0)
  161. panic("iommu out");
  162. busa0 = iommu->start + (ioptex << PAGE_SHIFT);
  163. iopte0 = &iommu->page_table[ioptex];
  164. busa = busa0;
  165. iopte = iopte0;
  166. for (i = 0; i < npages; i++) {
  167. iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
  168. iommu_invalidate_page(iommu->regs, busa);
  169. busa += PAGE_SIZE;
  170. iopte++;
  171. page++;
  172. }
  173. iommu_flush_iotlb(iopte0, npages);
  174. return busa0;
  175. }
  176. static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
  177. struct sbus_bus *sbus)
  178. {
  179. unsigned long off;
  180. int npages;
  181. struct page *page;
  182. u32 busa;
  183. off = (unsigned long)vaddr & ~PAGE_MASK;
  184. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  185. page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
  186. busa = iommu_get_one(page, npages, sbus);
  187. return busa + off;
  188. }
  189. static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  190. {
  191. return iommu_get_scsi_one(vaddr, len, sbus);
  192. }
  193. static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  194. {
  195. flush_page_for_dma(0);
  196. return iommu_get_scsi_one(vaddr, len, sbus);
  197. }
  198. static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  199. {
  200. unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
  201. while(page < ((unsigned long)(vaddr + len))) {
  202. flush_page_for_dma(page);
  203. page += PAGE_SIZE;
  204. }
  205. return iommu_get_scsi_one(vaddr, len, sbus);
  206. }
  207. static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  208. {
  209. int n;
  210. while (sz != 0) {
  211. --sz;
  212. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  213. sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
  214. sg->dvma_length = (__u32) sg->length;
  215. sg++;
  216. }
  217. }
  218. static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  219. {
  220. int n;
  221. flush_page_for_dma(0);
  222. while (sz != 0) {
  223. --sz;
  224. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  225. sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
  226. sg->dvma_length = (__u32) sg->length;
  227. sg++;
  228. }
  229. }
  230. static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  231. {
  232. unsigned long page, oldpage = 0;
  233. int n, i;
  234. while(sz != 0) {
  235. --sz;
  236. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  237. /*
  238. * We expect unmapped highmem pages to be not in the cache.
  239. * XXX Is this a good assumption?
  240. * XXX What if someone else unmaps it here and races us?
  241. */
  242. if ((page = (unsigned long) page_address(sg->page)) != 0) {
  243. for (i = 0; i < n; i++) {
  244. if (page != oldpage) { /* Already flushed? */
  245. flush_page_for_dma(page);
  246. oldpage = page;
  247. }
  248. page += PAGE_SIZE;
  249. }
  250. }
  251. sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
  252. sg->dvma_length = (__u32) sg->length;
  253. sg++;
  254. }
  255. }
  256. static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
  257. {
  258. struct iommu_struct *iommu = sbus->ofdev.dev.archdata.iommu;
  259. int ioptex;
  260. int i;
  261. BUG_ON(busa < iommu->start);
  262. ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  263. for (i = 0; i < npages; i++) {
  264. iopte_val(iommu->page_table[ioptex + i]) = 0;
  265. iommu_invalidate_page(iommu->regs, busa);
  266. busa += PAGE_SIZE;
  267. }
  268. bit_map_clear(&iommu->usemap, ioptex, npages);
  269. }
  270. static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
  271. {
  272. unsigned long off;
  273. int npages;
  274. off = vaddr & ~PAGE_MASK;
  275. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  276. iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
  277. }
  278. static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  279. {
  280. int n;
  281. while(sz != 0) {
  282. --sz;
  283. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  284. iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
  285. sg->dvma_address = 0x21212121;
  286. sg++;
  287. }
  288. }
  289. #ifdef CONFIG_SBUS
  290. static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
  291. unsigned long addr, int len)
  292. {
  293. unsigned long page, end;
  294. struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
  295. iopte_t *iopte = iommu->page_table;
  296. iopte_t *first;
  297. int ioptex;
  298. BUG_ON((va & ~PAGE_MASK) != 0);
  299. BUG_ON((addr & ~PAGE_MASK) != 0);
  300. BUG_ON((len & ~PAGE_MASK) != 0);
  301. /* page color = physical address */
  302. ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
  303. addr >> PAGE_SHIFT);
  304. if (ioptex < 0)
  305. panic("iommu out");
  306. iopte += ioptex;
  307. first = iopte;
  308. end = addr + len;
  309. while(addr < end) {
  310. page = va;
  311. {
  312. pgd_t *pgdp;
  313. pmd_t *pmdp;
  314. pte_t *ptep;
  315. if (viking_mxcc_present)
  316. viking_mxcc_flush_page(page);
  317. else if (viking_flush)
  318. viking_flush_page(page);
  319. else
  320. __flush_page_to_ram(page);
  321. pgdp = pgd_offset(&init_mm, addr);
  322. pmdp = pmd_offset(pgdp, addr);
  323. ptep = pte_offset_map(pmdp, addr);
  324. set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
  325. }
  326. iopte_val(*iopte++) =
  327. MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
  328. addr += PAGE_SIZE;
  329. va += PAGE_SIZE;
  330. }
  331. /* P3: why do we need this?
  332. *
  333. * DAVEM: Because there are several aspects, none of which
  334. * are handled by a single interface. Some cpus are
  335. * completely not I/O DMA coherent, and some have
  336. * virtually indexed caches. The driver DMA flushing
  337. * methods handle the former case, but here during
  338. * IOMMU page table modifications, and usage of non-cacheable
  339. * cpu mappings of pages potentially in the cpu caches, we have
  340. * to handle the latter case as well.
  341. */
  342. flush_cache_all();
  343. iommu_flush_iotlb(first, len >> PAGE_SHIFT);
  344. flush_tlb_all();
  345. iommu_invalidate(iommu->regs);
  346. *pba = iommu->start + (ioptex << PAGE_SHIFT);
  347. return 0;
  348. }
  349. static void iommu_unmap_dma_area(unsigned long busa, int len)
  350. {
  351. struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
  352. iopte_t *iopte = iommu->page_table;
  353. unsigned long end;
  354. int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  355. BUG_ON((busa & ~PAGE_MASK) != 0);
  356. BUG_ON((len & ~PAGE_MASK) != 0);
  357. iopte += ioptex;
  358. end = busa + len;
  359. while (busa < end) {
  360. iopte_val(*iopte++) = 0;
  361. busa += PAGE_SIZE;
  362. }
  363. flush_tlb_all();
  364. iommu_invalidate(iommu->regs);
  365. bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
  366. }
  367. static struct page *iommu_translate_dvma(unsigned long busa)
  368. {
  369. struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
  370. iopte_t *iopte = iommu->page_table;
  371. iopte += ((busa - iommu->start) >> PAGE_SHIFT);
  372. return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
  373. }
  374. #endif
  375. static char *iommu_lockarea(char *vaddr, unsigned long len)
  376. {
  377. return vaddr;
  378. }
  379. static void iommu_unlockarea(char *vaddr, unsigned long len)
  380. {
  381. }
  382. void __init ld_mmu_iommu(void)
  383. {
  384. viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
  385. BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
  386. BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
  387. if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
  388. /* IO coherent chip */
  389. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
  390. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
  391. } else if (flush_page_for_dma_global) {
  392. /* flush_page_for_dma flushes everything, no matter of what page is it */
  393. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
  394. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
  395. } else {
  396. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
  397. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
  398. }
  399. BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
  400. BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
  401. #ifdef CONFIG_SBUS
  402. BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
  403. BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
  404. BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
  405. #endif
  406. if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
  407. dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
  408. ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
  409. } else {
  410. dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
  411. ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
  412. }
  413. }