iommu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * iommu.c: IOMMU specific routines for memory management.
  3. *
  4. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5. * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
  6. * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. */
  9. #include <linux/config.h>
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/mm.h>
  13. #include <linux/slab.h>
  14. #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
  15. #include <asm/scatterlist.h>
  16. #include <asm/pgalloc.h>
  17. #include <asm/pgtable.h>
  18. #include <asm/sbus.h>
  19. #include <asm/io.h>
  20. #include <asm/mxcc.h>
  21. #include <asm/mbus.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/bitext.h>
  25. #include <asm/iommu.h>
  26. #include <asm/dma.h>
  27. /*
  28. * This can be sized dynamically, but we will do this
  29. * only when we have a guidance about actual I/O pressures.
  30. */
  31. #define IOMMU_RNGE IOMMU_RNGE_256MB
  32. #define IOMMU_START 0xF0000000
  33. #define IOMMU_WINSIZE (256*1024*1024U)
  34. #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
  35. #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
  36. /* srmmu.c */
  37. extern int viking_mxcc_present;
  38. BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
  39. #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
  40. extern int flush_page_for_dma_global;
  41. static int viking_flush;
  42. /* viking.S */
  43. extern void viking_flush_page(unsigned long page);
  44. extern void viking_mxcc_flush_page(unsigned long page);
  45. /*
  46. * Values precomputed according to CPU type.
  47. */
  48. static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
  49. static pgprot_t dvma_prot; /* Consistent mapping pte flags */
  50. #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
  51. #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
  52. void __init
  53. iommu_init(int iommund, struct sbus_bus *sbus)
  54. {
  55. unsigned int impl, vers;
  56. unsigned long tmp;
  57. struct iommu_struct *iommu;
  58. struct linux_prom_registers iommu_promregs[PROMREG_MAX];
  59. struct resource r;
  60. unsigned long *bitmap;
  61. iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
  62. if (!iommu) {
  63. prom_printf("Unable to allocate iommu structure\n");
  64. prom_halt();
  65. }
  66. iommu->regs = NULL;
  67. if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
  68. sizeof(iommu_promregs)) != -1) {
  69. memset(&r, 0, sizeof(r));
  70. r.flags = iommu_promregs[0].which_io;
  71. r.start = iommu_promregs[0].phys_addr;
  72. iommu->regs = (struct iommu_regs *)
  73. sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
  74. }
  75. if (!iommu->regs) {
  76. prom_printf("Cannot map IOMMU registers\n");
  77. prom_halt();
  78. }
  79. impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
  80. vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
  81. tmp = iommu->regs->control;
  82. tmp &= ~(IOMMU_CTRL_RNGE);
  83. tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
  84. iommu->regs->control = tmp;
  85. iommu_invalidate(iommu->regs);
  86. iommu->start = IOMMU_START;
  87. iommu->end = 0xffffffff;
  88. /* Allocate IOMMU page table */
  89. /* Stupid alignment constraints give me a headache.
  90. We need 256K or 512K or 1M or 2M area aligned to
  91. its size and current gfp will fortunately give
  92. it to us. */
  93. tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
  94. if (!tmp) {
  95. prom_printf("Unable to allocate iommu table [0x%08x]\n",
  96. IOMMU_NPTES*sizeof(iopte_t));
  97. prom_halt();
  98. }
  99. iommu->page_table = (iopte_t *)tmp;
  100. /* Initialize new table. */
  101. memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
  102. flush_cache_all();
  103. flush_tlb_all();
  104. iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
  105. iommu_invalidate(iommu->regs);
  106. bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
  107. if (!bitmap) {
  108. prom_printf("Unable to allocate iommu bitmap [%d]\n",
  109. (int)(IOMMU_NPTES>>3));
  110. prom_halt();
  111. }
  112. bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
  113. /* To be coherent on HyperSparc, the page color of DVMA
  114. * and physical addresses must match.
  115. */
  116. if (srmmu_modtype == HyperSparc)
  117. iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
  118. else
  119. iommu->usemap.num_colors = 1;
  120. printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
  121. impl, vers, iommu->page_table,
  122. (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
  123. sbus->iommu = iommu;
  124. }
  125. /* This begs to be btfixup-ed by srmmu. */
  126. /* Flush the iotlb entries to ram. */
  127. /* This could be better if we didn't have to flush whole pages. */
  128. static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
  129. {
  130. unsigned long start;
  131. unsigned long end;
  132. start = (unsigned long)iopte & PAGE_MASK;
  133. end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
  134. if (viking_mxcc_present) {
  135. while(start < end) {
  136. viking_mxcc_flush_page(start);
  137. start += PAGE_SIZE;
  138. }
  139. } else if (viking_flush) {
  140. while(start < end) {
  141. viking_flush_page(start);
  142. start += PAGE_SIZE;
  143. }
  144. } else {
  145. while(start < end) {
  146. __flush_page_to_ram(start);
  147. start += PAGE_SIZE;
  148. }
  149. }
  150. }
  151. static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
  152. {
  153. struct iommu_struct *iommu = sbus->iommu;
  154. int ioptex;
  155. iopte_t *iopte, *iopte0;
  156. unsigned int busa, busa0;
  157. int i;
  158. /* page color = pfn of page */
  159. ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
  160. if (ioptex < 0)
  161. panic("iommu out");
  162. busa0 = iommu->start + (ioptex << PAGE_SHIFT);
  163. iopte0 = &iommu->page_table[ioptex];
  164. busa = busa0;
  165. iopte = iopte0;
  166. for (i = 0; i < npages; i++) {
  167. iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
  168. iommu_invalidate_page(iommu->regs, busa);
  169. busa += PAGE_SIZE;
  170. iopte++;
  171. page++;
  172. }
  173. iommu_flush_iotlb(iopte0, npages);
  174. return busa0;
  175. }
  176. static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
  177. struct sbus_bus *sbus)
  178. {
  179. unsigned long off;
  180. int npages;
  181. struct page *page;
  182. u32 busa;
  183. off = (unsigned long)vaddr & ~PAGE_MASK;
  184. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  185. page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
  186. busa = iommu_get_one(page, npages, sbus);
  187. return busa + off;
  188. }
  189. static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  190. {
  191. return iommu_get_scsi_one(vaddr, len, sbus);
  192. }
  193. static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  194. {
  195. flush_page_for_dma(0);
  196. return iommu_get_scsi_one(vaddr, len, sbus);
  197. }
  198. static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
  199. {
  200. unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
  201. while(page < ((unsigned long)(vaddr + len))) {
  202. flush_page_for_dma(page);
  203. page += PAGE_SIZE;
  204. }
  205. return iommu_get_scsi_one(vaddr, len, sbus);
  206. }
  207. static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  208. {
  209. int n;
  210. while (sz != 0) {
  211. --sz;
  212. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  213. sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
  214. sg->dvma_length = (__u32) sg->length;
  215. sg++;
  216. }
  217. }
  218. static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  219. {
  220. int n;
  221. flush_page_for_dma(0);
  222. while (sz != 0) {
  223. --sz;
  224. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  225. sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
  226. sg->dvma_length = (__u32) sg->length;
  227. sg++;
  228. }
  229. }
  230. static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  231. {
  232. unsigned long page, oldpage = 0;
  233. int n, i;
  234. while(sz != 0) {
  235. --sz;
  236. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  237. /*
  238. * We expect unmapped highmem pages to be not in the cache.
  239. * XXX Is this a good assumption?
  240. * XXX What if someone else unmaps it here and races us?
  241. */
  242. if ((page = (unsigned long) page_address(sg->page)) != 0) {
  243. for (i = 0; i < n; i++) {
  244. if (page != oldpage) { /* Already flushed? */
  245. flush_page_for_dma(page);
  246. oldpage = page;
  247. }
  248. page += PAGE_SIZE;
  249. }
  250. }
  251. sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
  252. sg->dvma_length = (__u32) sg->length;
  253. sg++;
  254. }
  255. }
  256. static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
  257. {
  258. struct iommu_struct *iommu = sbus->iommu;
  259. int ioptex;
  260. int i;
  261. if (busa < iommu->start)
  262. BUG();
  263. ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  264. for (i = 0; i < npages; i++) {
  265. iopte_val(iommu->page_table[ioptex + i]) = 0;
  266. iommu_invalidate_page(iommu->regs, busa);
  267. busa += PAGE_SIZE;
  268. }
  269. bit_map_clear(&iommu->usemap, ioptex, npages);
  270. }
  271. static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
  272. {
  273. unsigned long off;
  274. int npages;
  275. off = vaddr & ~PAGE_MASK;
  276. npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
  277. iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
  278. }
  279. static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
  280. {
  281. int n;
  282. while(sz != 0) {
  283. --sz;
  284. n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
  285. iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
  286. sg->dvma_address = 0x21212121;
  287. sg++;
  288. }
  289. }
  290. #ifdef CONFIG_SBUS
  291. static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
  292. unsigned long addr, int len)
  293. {
  294. unsigned long page, end;
  295. struct iommu_struct *iommu = sbus_root->iommu;
  296. iopte_t *iopte = iommu->page_table;
  297. iopte_t *first;
  298. int ioptex;
  299. if ((va & ~PAGE_MASK) != 0) BUG();
  300. if ((addr & ~PAGE_MASK) != 0) BUG();
  301. if ((len & ~PAGE_MASK) != 0) BUG();
  302. /* page color = physical address */
  303. ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
  304. addr >> PAGE_SHIFT);
  305. if (ioptex < 0)
  306. panic("iommu out");
  307. iopte += ioptex;
  308. first = iopte;
  309. end = addr + len;
  310. while(addr < end) {
  311. page = va;
  312. {
  313. pgd_t *pgdp;
  314. pmd_t *pmdp;
  315. pte_t *ptep;
  316. if (viking_mxcc_present)
  317. viking_mxcc_flush_page(page);
  318. else if (viking_flush)
  319. viking_flush_page(page);
  320. else
  321. __flush_page_to_ram(page);
  322. pgdp = pgd_offset(&init_mm, addr);
  323. pmdp = pmd_offset(pgdp, addr);
  324. ptep = pte_offset_map(pmdp, addr);
  325. set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
  326. }
  327. iopte_val(*iopte++) =
  328. MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
  329. addr += PAGE_SIZE;
  330. va += PAGE_SIZE;
  331. }
  332. /* P3: why do we need this?
  333. *
  334. * DAVEM: Because there are several aspects, none of which
  335. * are handled by a single interface. Some cpus are
  336. * completely not I/O DMA coherent, and some have
  337. * virtually indexed caches. The driver DMA flushing
  338. * methods handle the former case, but here during
  339. * IOMMU page table modifications, and usage of non-cacheable
  340. * cpu mappings of pages potentially in the cpu caches, we have
  341. * to handle the latter case as well.
  342. */
  343. flush_cache_all();
  344. iommu_flush_iotlb(first, len >> PAGE_SHIFT);
  345. flush_tlb_all();
  346. iommu_invalidate(iommu->regs);
  347. *pba = iommu->start + (ioptex << PAGE_SHIFT);
  348. return 0;
  349. }
  350. static void iommu_unmap_dma_area(unsigned long busa, int len)
  351. {
  352. struct iommu_struct *iommu = sbus_root->iommu;
  353. iopte_t *iopte = iommu->page_table;
  354. unsigned long end;
  355. int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
  356. if ((busa & ~PAGE_MASK) != 0) BUG();
  357. if ((len & ~PAGE_MASK) != 0) BUG();
  358. iopte += ioptex;
  359. end = busa + len;
  360. while (busa < end) {
  361. iopte_val(*iopte++) = 0;
  362. busa += PAGE_SIZE;
  363. }
  364. flush_tlb_all();
  365. iommu_invalidate(iommu->regs);
  366. bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
  367. }
  368. static struct page *iommu_translate_dvma(unsigned long busa)
  369. {
  370. struct iommu_struct *iommu = sbus_root->iommu;
  371. iopte_t *iopte = iommu->page_table;
  372. iopte += ((busa - iommu->start) >> PAGE_SHIFT);
  373. return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
  374. }
  375. #endif
  376. static char *iommu_lockarea(char *vaddr, unsigned long len)
  377. {
  378. return vaddr;
  379. }
  380. static void iommu_unlockarea(char *vaddr, unsigned long len)
  381. {
  382. }
  383. void __init ld_mmu_iommu(void)
  384. {
  385. viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
  386. BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
  387. BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
  388. if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
  389. /* IO coherent chip */
  390. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
  391. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
  392. } else if (flush_page_for_dma_global) {
  393. /* flush_page_for_dma flushes everything, no matter of what page is it */
  394. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
  395. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
  396. } else {
  397. BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
  398. BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
  399. }
  400. BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
  401. BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
  402. #ifdef CONFIG_SBUS
  403. BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
  404. BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
  405. BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
  406. #endif
  407. if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
  408. dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
  409. ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
  410. } else {
  411. dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
  412. ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
  413. }
  414. }