sbus.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258
  1. /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
  2. * sbus.c: UltraSparc SBUS controller support.
  3. *
  4. * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/mm.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <asm/page.h>
  14. #include <asm/sbus.h>
  15. #include <asm/io.h>
  16. #include <asm/upa.h>
  17. #include <asm/cache.h>
  18. #include <asm/dma.h>
  19. #include <asm/irq.h>
  20. #include <asm/starfire.h>
  21. #include "iommu_common.h"
  22. /* These should be allocated on an SMP_CACHE_BYTES
  23. * aligned boundary for optimal performance.
  24. *
  25. * On SYSIO, using an 8K page size we have 1GB of SBUS
  26. * DMA space mapped. We divide this space into equally
  27. * sized clusters. We allocate a DMA mapping from the
  28. * cluster that matches the order of the allocation, or
  29. * if the order is greater than the number of clusters,
  30. * we try to allocate from the last cluster.
  31. */
  32. #define NCLUSTERS 8UL
  33. #define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
  34. #define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
  35. #define CLUSTER_MASK (CLUSTER_SIZE - 1)
  36. #define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
  37. #define MAP_BASE ((u32)0xc0000000)
  38. struct sbus_iommu {
  39. /*0x00*/spinlock_t lock;
  40. /*0x08*/iopte_t *page_table;
  41. /*0x10*/unsigned long strbuf_regs;
  42. /*0x18*/unsigned long iommu_regs;
  43. /*0x20*/unsigned long sbus_control_reg;
  44. /*0x28*/volatile unsigned long strbuf_flushflag;
  45. /* If NCLUSTERS is ever decresed to 4 or lower,
  46. * you must increase the size of the type of
  47. * these counters. You have been duly warned. -DaveM
  48. */
  49. /*0x30*/struct {
  50. u16 next;
  51. u16 flush;
  52. } alloc_info[NCLUSTERS];
  53. /* The lowest used consistent mapping entry. Since
  54. * we allocate consistent maps out of cluster 0 this
  55. * is relative to the beginning of closter 0.
  56. */
  57. /*0x50*/u32 lowest_consistent_map;
  58. };
  59. /* Offsets from iommu_regs */
  60. #define SYSIO_IOMMUREG_BASE 0x2400UL
  61. #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
  62. #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
  63. #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
  64. #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
  65. #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
  66. #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
  67. #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
  68. #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
  69. #define IOMMU_DRAM_VALID (1UL << 30UL)
  70. static void __iommu_flushall(struct sbus_iommu *iommu)
  71. {
  72. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  73. int entry;
  74. for (entry = 0; entry < 16; entry++) {
  75. upa_writeq(0, tag);
  76. tag += 8UL;
  77. }
  78. upa_readq(iommu->sbus_control_reg);
  79. for (entry = 0; entry < NCLUSTERS; entry++) {
  80. iommu->alloc_info[entry].flush =
  81. iommu->alloc_info[entry].next;
  82. }
  83. }
  84. static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  85. {
  86. while (npages--)
  87. upa_writeq(base + (npages << IO_PAGE_SHIFT),
  88. iommu->iommu_regs + IOMMU_FLUSH);
  89. upa_readq(iommu->sbus_control_reg);
  90. }
  91. /* Offsets from strbuf_regs */
  92. #define SYSIO_STRBUFREG_BASE 0x2800UL
  93. #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
  94. #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
  95. #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
  96. #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
  97. #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
  98. #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
  99. #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
  100. #define STRBUF_TAG_VALID 0x02UL
  101. static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  102. {
  103. unsigned long n;
  104. int limit;
  105. iommu->strbuf_flushflag = 0UL;
  106. n = npages;
  107. while (n--)
  108. upa_writeq(base + (n << IO_PAGE_SHIFT),
  109. iommu->strbuf_regs + STRBUF_PFLUSH);
  110. /* Whoopee cushion! */
  111. upa_writeq(__pa(&iommu->strbuf_flushflag),
  112. iommu->strbuf_regs + STRBUF_FSYNC);
  113. upa_readq(iommu->sbus_control_reg);
  114. limit = 10000;
  115. while (iommu->strbuf_flushflag == 0UL) {
  116. limit--;
  117. if (!limit)
  118. break;
  119. udelay(10);
  120. membar("#LoadLoad");
  121. }
  122. if (!limit)
  123. printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
  124. "vaddr[%08x] npages[%ld]\n",
  125. base, npages);
  126. }
  127. static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
  128. {
  129. iopte_t *iopte, *limit, *first, *cluster;
  130. unsigned long cnum, ent, nent, flush_point, found;
  131. cnum = 0;
  132. nent = 1;
  133. while ((1UL << cnum) < npages)
  134. cnum++;
  135. if(cnum >= NCLUSTERS) {
  136. nent = 1UL << (cnum - NCLUSTERS);
  137. cnum = NCLUSTERS - 1;
  138. }
  139. iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
  140. if (cnum == 0)
  141. limit = (iommu->page_table +
  142. iommu->lowest_consistent_map);
  143. else
  144. limit = (iopte + CLUSTER_NPAGES);
  145. iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
  146. flush_point = iommu->alloc_info[cnum].flush;
  147. first = iopte;
  148. cluster = NULL;
  149. found = 0;
  150. for (;;) {
  151. if (iopte_val(*iopte) == 0UL) {
  152. found++;
  153. if (!cluster)
  154. cluster = iopte;
  155. } else {
  156. /* Used cluster in the way */
  157. cluster = NULL;
  158. found = 0;
  159. }
  160. if (found == nent)
  161. break;
  162. iopte += (1 << cnum);
  163. ent++;
  164. if (iopte >= limit) {
  165. iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
  166. ent = 0;
  167. /* Multiple cluster allocations must not wrap */
  168. cluster = NULL;
  169. found = 0;
  170. }
  171. if (ent == flush_point)
  172. __iommu_flushall(iommu);
  173. if (iopte == first)
  174. goto bad;
  175. }
  176. /* ent/iopte points to the last cluster entry we're going to use,
  177. * so save our place for the next allocation.
  178. */
  179. if ((iopte + (1 << cnum)) >= limit)
  180. ent = 0;
  181. else
  182. ent = ent + 1;
  183. iommu->alloc_info[cnum].next = ent;
  184. if (ent == flush_point)
  185. __iommu_flushall(iommu);
  186. /* I've got your streaming cluster right here buddy boy... */
  187. return cluster;
  188. bad:
  189. printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
  190. npages);
  191. return NULL;
  192. }
  193. static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  194. {
  195. unsigned long cnum, ent, nent;
  196. iopte_t *iopte;
  197. cnum = 0;
  198. nent = 1;
  199. while ((1UL << cnum) < npages)
  200. cnum++;
  201. if(cnum >= NCLUSTERS) {
  202. nent = 1UL << (cnum - NCLUSTERS);
  203. cnum = NCLUSTERS - 1;
  204. }
  205. ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
  206. iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
  207. do {
  208. iopte_val(*iopte) = 0UL;
  209. iopte += 1 << cnum;
  210. } while(--nent);
  211. /* If the global flush might not have caught this entry,
  212. * adjust the flush point such that we will flush before
  213. * ever trying to reuse it.
  214. */
  215. #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
  216. if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
  217. iommu->alloc_info[cnum].flush = ent;
  218. #undef between
  219. }
  220. /* We allocate consistent mappings from the end of cluster zero. */
  221. static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
  222. {
  223. iopte_t *iopte;
  224. iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
  225. while (iopte > iommu->page_table) {
  226. iopte--;
  227. if (!(iopte_val(*iopte) & IOPTE_VALID)) {
  228. unsigned long tmp = npages;
  229. while (--tmp) {
  230. iopte--;
  231. if (iopte_val(*iopte) & IOPTE_VALID)
  232. break;
  233. }
  234. if (tmp == 0) {
  235. u32 entry = (iopte - iommu->page_table);
  236. if (entry < iommu->lowest_consistent_map)
  237. iommu->lowest_consistent_map = entry;
  238. return iopte;
  239. }
  240. }
  241. }
  242. return NULL;
  243. }
  244. static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  245. {
  246. iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
  247. if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
  248. iopte_t *walk = iopte + npages;
  249. iopte_t *limit;
  250. limit = iommu->page_table + CLUSTER_NPAGES;
  251. while (walk < limit) {
  252. if (iopte_val(*walk) != 0UL)
  253. break;
  254. walk++;
  255. }
  256. iommu->lowest_consistent_map =
  257. (walk - iommu->page_table);
  258. }
  259. while (npages--)
  260. *iopte++ = __iopte(0UL);
  261. }
  262. void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
  263. {
  264. unsigned long order, first_page, flags;
  265. struct sbus_iommu *iommu;
  266. iopte_t *iopte;
  267. void *ret;
  268. int npages;
  269. if (size <= 0 || sdev == NULL || dvma_addr == NULL)
  270. return NULL;
  271. size = IO_PAGE_ALIGN(size);
  272. order = get_order(size);
  273. if (order >= 10)
  274. return NULL;
  275. first_page = __get_free_pages(GFP_KERNEL, order);
  276. if (first_page == 0UL)
  277. return NULL;
  278. memset((char *)first_page, 0, PAGE_SIZE << order);
  279. iommu = sdev->bus->iommu;
  280. spin_lock_irqsave(&iommu->lock, flags);
  281. iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
  282. if (iopte == NULL) {
  283. spin_unlock_irqrestore(&iommu->lock, flags);
  284. free_pages(first_page, order);
  285. return NULL;
  286. }
  287. /* Ok, we're committed at this point. */
  288. *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  289. ret = (void *) first_page;
  290. npages = size >> IO_PAGE_SHIFT;
  291. while (npages--) {
  292. *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
  293. (__pa(first_page) & IOPTE_PAGE));
  294. first_page += IO_PAGE_SIZE;
  295. }
  296. iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
  297. spin_unlock_irqrestore(&iommu->lock, flags);
  298. return ret;
  299. }
  300. void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
  301. {
  302. unsigned long order, npages;
  303. struct sbus_iommu *iommu;
  304. if (size <= 0 || sdev == NULL || cpu == NULL)
  305. return;
  306. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  307. iommu = sdev->bus->iommu;
  308. spin_lock_irq(&iommu->lock);
  309. free_consistent_cluster(iommu, dvma, npages);
  310. iommu_flush(iommu, dvma, npages);
  311. spin_unlock_irq(&iommu->lock);
  312. order = get_order(size);
  313. if (order < 10)
  314. free_pages((unsigned long)cpu, order);
  315. }
  316. dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
  317. {
  318. struct sbus_iommu *iommu = sdev->bus->iommu;
  319. unsigned long npages, pbase, flags;
  320. iopte_t *iopte;
  321. u32 dma_base, offset;
  322. unsigned long iopte_bits;
  323. if (dir == SBUS_DMA_NONE)
  324. BUG();
  325. pbase = (unsigned long) ptr;
  326. offset = (u32) (pbase & ~IO_PAGE_MASK);
  327. size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
  328. pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
  329. spin_lock_irqsave(&iommu->lock, flags);
  330. npages = size >> IO_PAGE_SHIFT;
  331. iopte = alloc_streaming_cluster(iommu, npages);
  332. if (iopte == NULL)
  333. goto bad;
  334. dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  335. npages = size >> IO_PAGE_SHIFT;
  336. iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  337. if (dir != SBUS_DMA_TODEVICE)
  338. iopte_bits |= IOPTE_WRITE;
  339. while (npages--) {
  340. *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
  341. pbase += IO_PAGE_SIZE;
  342. }
  343. npages = size >> IO_PAGE_SHIFT;
  344. spin_unlock_irqrestore(&iommu->lock, flags);
  345. return (dma_base | offset);
  346. bad:
  347. spin_unlock_irqrestore(&iommu->lock, flags);
  348. BUG();
  349. return 0;
  350. }
  351. void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
  352. {
  353. struct sbus_iommu *iommu = sdev->bus->iommu;
  354. u32 dma_base = dma_addr & IO_PAGE_MASK;
  355. unsigned long flags;
  356. size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
  357. spin_lock_irqsave(&iommu->lock, flags);
  358. free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
  359. sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
  360. spin_unlock_irqrestore(&iommu->lock, flags);
  361. }
  362. #define SG_ENT_PHYS_ADDRESS(SG) \
  363. (__pa(page_address((SG)->page)) + (SG)->offset)
  364. static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
  365. {
  366. struct scatterlist *dma_sg = sg;
  367. struct scatterlist *sg_end = sg + nelems;
  368. int i;
  369. for (i = 0; i < nused; i++) {
  370. unsigned long pteval = ~0UL;
  371. u32 dma_npages;
  372. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  373. dma_sg->dma_length +
  374. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  375. do {
  376. unsigned long offset;
  377. signed int len;
  378. /* If we are here, we know we have at least one
  379. * more page to map. So walk forward until we
  380. * hit a page crossing, and begin creating new
  381. * mappings from that spot.
  382. */
  383. for (;;) {
  384. unsigned long tmp;
  385. tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
  386. len = sg->length;
  387. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  388. pteval = tmp & IO_PAGE_MASK;
  389. offset = tmp & (IO_PAGE_SIZE - 1UL);
  390. break;
  391. }
  392. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  393. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  394. offset = 0UL;
  395. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  396. break;
  397. }
  398. sg++;
  399. }
  400. pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
  401. while (len > 0) {
  402. *iopte++ = __iopte(pteval);
  403. pteval += IO_PAGE_SIZE;
  404. len -= (IO_PAGE_SIZE - offset);
  405. offset = 0;
  406. dma_npages--;
  407. }
  408. pteval = (pteval & IOPTE_PAGE) + len;
  409. sg++;
  410. /* Skip over any tail mappings we've fully mapped,
  411. * adjusting pteval along the way. Stop when we
  412. * detect a page crossing event.
  413. */
  414. while (sg < sg_end &&
  415. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  416. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  417. ((pteval ^
  418. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  419. pteval += sg->length;
  420. sg++;
  421. }
  422. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  423. pteval = ~0UL;
  424. } while (dma_npages != 0);
  425. dma_sg++;
  426. }
  427. }
  428. int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
  429. {
  430. struct sbus_iommu *iommu = sdev->bus->iommu;
  431. unsigned long flags, npages;
  432. iopte_t *iopte;
  433. u32 dma_base;
  434. struct scatterlist *sgtmp;
  435. int used;
  436. unsigned long iopte_bits;
  437. if (dir == SBUS_DMA_NONE)
  438. BUG();
  439. /* Fast path single entry scatterlists. */
  440. if (nents == 1) {
  441. sg->dma_address =
  442. sbus_map_single(sdev,
  443. (page_address(sg->page) + sg->offset),
  444. sg->length, dir);
  445. sg->dma_length = sg->length;
  446. return 1;
  447. }
  448. npages = prepare_sg(sg, nents);
  449. spin_lock_irqsave(&iommu->lock, flags);
  450. iopte = alloc_streaming_cluster(iommu, npages);
  451. if (iopte == NULL)
  452. goto bad;
  453. dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  454. /* Normalize DVMA addresses. */
  455. sgtmp = sg;
  456. used = nents;
  457. while (used && sgtmp->dma_length) {
  458. sgtmp->dma_address += dma_base;
  459. sgtmp++;
  460. used--;
  461. }
  462. used = nents - used;
  463. iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  464. if (dir != SBUS_DMA_TODEVICE)
  465. iopte_bits |= IOPTE_WRITE;
  466. fill_sg(iopte, sg, used, nents, iopte_bits);
  467. #ifdef VERIFY_SG
  468. verify_sglist(sg, nents, iopte, npages);
  469. #endif
  470. spin_unlock_irqrestore(&iommu->lock, flags);
  471. return used;
  472. bad:
  473. spin_unlock_irqrestore(&iommu->lock, flags);
  474. BUG();
  475. return 0;
  476. }
  477. void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  478. {
  479. unsigned long size, flags;
  480. struct sbus_iommu *iommu;
  481. u32 dvma_base;
  482. int i;
  483. /* Fast path single entry scatterlists. */
  484. if (nents == 1) {
  485. sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
  486. return;
  487. }
  488. dvma_base = sg[0].dma_address & IO_PAGE_MASK;
  489. for (i = 0; i < nents; i++) {
  490. if (sg[i].dma_length == 0)
  491. break;
  492. }
  493. i--;
  494. size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
  495. iommu = sdev->bus->iommu;
  496. spin_lock_irqsave(&iommu->lock, flags);
  497. free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
  498. sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
  499. spin_unlock_irqrestore(&iommu->lock, flags);
  500. }
  501. void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  502. {
  503. struct sbus_iommu *iommu = sdev->bus->iommu;
  504. unsigned long flags;
  505. size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
  506. spin_lock_irqsave(&iommu->lock, flags);
  507. sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
  508. spin_unlock_irqrestore(&iommu->lock, flags);
  509. }
  510. void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  511. {
  512. }
  513. void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  514. {
  515. struct sbus_iommu *iommu = sdev->bus->iommu;
  516. unsigned long flags, size;
  517. u32 base;
  518. int i;
  519. base = sg[0].dma_address & IO_PAGE_MASK;
  520. for (i = 0; i < nents; i++) {
  521. if (sg[i].dma_length == 0)
  522. break;
  523. }
  524. i--;
  525. size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
  526. spin_lock_irqsave(&iommu->lock, flags);
  527. sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
  528. spin_unlock_irqrestore(&iommu->lock, flags);
  529. }
  530. void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  531. {
  532. }
  533. /* Enable 64-bit DVMA mode for the given device. */
  534. void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
  535. {
  536. struct sbus_iommu *iommu = sdev->bus->iommu;
  537. int slot = sdev->slot;
  538. unsigned long cfg_reg;
  539. u64 val;
  540. cfg_reg = iommu->sbus_control_reg;
  541. switch (slot) {
  542. case 0:
  543. cfg_reg += 0x20UL;
  544. break;
  545. case 1:
  546. cfg_reg += 0x28UL;
  547. break;
  548. case 2:
  549. cfg_reg += 0x30UL;
  550. break;
  551. case 3:
  552. cfg_reg += 0x38UL;
  553. break;
  554. case 13:
  555. cfg_reg += 0x40UL;
  556. break;
  557. case 14:
  558. cfg_reg += 0x48UL;
  559. break;
  560. case 15:
  561. cfg_reg += 0x50UL;
  562. break;
  563. default:
  564. return;
  565. };
  566. val = upa_readq(cfg_reg);
  567. if (val & (1UL << 14UL)) {
  568. /* Extended transfer mode already enabled. */
  569. return;
  570. }
  571. val |= (1UL << 14UL);
  572. if (bursts & DMA_BURST8)
  573. val |= (1UL << 1UL);
  574. if (bursts & DMA_BURST16)
  575. val |= (1UL << 2UL);
  576. if (bursts & DMA_BURST32)
  577. val |= (1UL << 3UL);
  578. if (bursts & DMA_BURST64)
  579. val |= (1UL << 4UL);
  580. upa_writeq(val, cfg_reg);
  581. }
  582. /* SBUS SYSIO INO number to Sparc PIL level. */
  583. static unsigned char sysio_ino_to_pil[] = {
  584. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
  585. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
  586. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
  587. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
  588. 4, /* Onboard SCSI */
  589. 5, /* Onboard Ethernet */
  590. /*XXX*/ 8, /* Onboard BPP */
  591. 0, /* Bogon */
  592. 13, /* Audio */
  593. /*XXX*/15, /* PowerFail */
  594. 0, /* Bogon */
  595. 0, /* Bogon */
  596. 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
  597. 11, /* Floppy */
  598. 0, /* Spare Hardware (bogon for now) */
  599. 0, /* Keyboard (bogon for now) */
  600. 0, /* Mouse (bogon for now) */
  601. 0, /* Serial (bogon for now) */
  602. 0, 0, /* Bogon, Bogon */
  603. 10, /* Timer 0 */
  604. 11, /* Timer 1 */
  605. 0, 0, /* Bogon, Bogon */
  606. 15, /* Uncorrectable SBUS Error */
  607. 15, /* Correctable SBUS Error */
  608. 15, /* SBUS Error */
  609. /*XXX*/ 0, /* Power Management (bogon for now) */
  610. };
  611. /* INO number to IMAP register offset for SYSIO external IRQ's.
  612. * This should conform to both Sunfire/Wildfire server and Fusion
  613. * desktop designs.
  614. */
  615. #define SYSIO_IMAP_SLOT0 0x2c04UL
  616. #define SYSIO_IMAP_SLOT1 0x2c0cUL
  617. #define SYSIO_IMAP_SLOT2 0x2c14UL
  618. #define SYSIO_IMAP_SLOT3 0x2c1cUL
  619. #define SYSIO_IMAP_SCSI 0x3004UL
  620. #define SYSIO_IMAP_ETH 0x300cUL
  621. #define SYSIO_IMAP_BPP 0x3014UL
  622. #define SYSIO_IMAP_AUDIO 0x301cUL
  623. #define SYSIO_IMAP_PFAIL 0x3024UL
  624. #define SYSIO_IMAP_KMS 0x302cUL
  625. #define SYSIO_IMAP_FLPY 0x3034UL
  626. #define SYSIO_IMAP_SHW 0x303cUL
  627. #define SYSIO_IMAP_KBD 0x3044UL
  628. #define SYSIO_IMAP_MS 0x304cUL
  629. #define SYSIO_IMAP_SER 0x3054UL
  630. #define SYSIO_IMAP_TIM0 0x3064UL
  631. #define SYSIO_IMAP_TIM1 0x306cUL
  632. #define SYSIO_IMAP_UE 0x3074UL
  633. #define SYSIO_IMAP_CE 0x307cUL
  634. #define SYSIO_IMAP_SBERR 0x3084UL
  635. #define SYSIO_IMAP_PMGMT 0x308cUL
  636. #define SYSIO_IMAP_GFX 0x3094UL
  637. #define SYSIO_IMAP_EUPA 0x309cUL
  638. #define bogon ((unsigned long) -1)
  639. static unsigned long sysio_irq_offsets[] = {
  640. /* SBUS Slot 0 --> 3, level 1 --> 7 */
  641. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  642. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  643. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  644. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  645. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  646. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  647. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  648. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  649. /* Onboard devices (not relevant/used on SunFire). */
  650. SYSIO_IMAP_SCSI,
  651. SYSIO_IMAP_ETH,
  652. SYSIO_IMAP_BPP,
  653. bogon,
  654. SYSIO_IMAP_AUDIO,
  655. SYSIO_IMAP_PFAIL,
  656. bogon,
  657. bogon,
  658. SYSIO_IMAP_KMS,
  659. SYSIO_IMAP_FLPY,
  660. SYSIO_IMAP_SHW,
  661. SYSIO_IMAP_KBD,
  662. SYSIO_IMAP_MS,
  663. SYSIO_IMAP_SER,
  664. bogon,
  665. bogon,
  666. SYSIO_IMAP_TIM0,
  667. SYSIO_IMAP_TIM1,
  668. bogon,
  669. bogon,
  670. SYSIO_IMAP_UE,
  671. SYSIO_IMAP_CE,
  672. SYSIO_IMAP_SBERR,
  673. SYSIO_IMAP_PMGMT,
  674. };
  675. #undef bogon
  676. #define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
  677. /* Convert Interrupt Mapping register pointer to associated
  678. * Interrupt Clear register pointer, SYSIO specific version.
  679. */
  680. #define SYSIO_ICLR_UNUSED0 0x3400UL
  681. #define SYSIO_ICLR_SLOT0 0x340cUL
  682. #define SYSIO_ICLR_SLOT1 0x344cUL
  683. #define SYSIO_ICLR_SLOT2 0x348cUL
  684. #define SYSIO_ICLR_SLOT3 0x34ccUL
  685. static unsigned long sysio_imap_to_iclr(unsigned long imap)
  686. {
  687. unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
  688. return imap + diff;
  689. }
  690. unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
  691. {
  692. struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
  693. struct sbus_iommu *iommu = sbus->iommu;
  694. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  695. unsigned long imap, iclr;
  696. int pil, sbus_level = 0;
  697. pil = sysio_ino_to_pil[ino];
  698. if (!pil) {
  699. printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
  700. panic("Bad SYSIO IRQ translations...");
  701. }
  702. if (PIL_RESERVED(pil))
  703. BUG();
  704. imap = sysio_irq_offsets[ino];
  705. if (imap == ((unsigned long)-1)) {
  706. prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
  707. ino, pil);
  708. prom_halt();
  709. }
  710. imap += reg_base;
  711. /* SYSIO inconsistency. For external SLOTS, we have to select
  712. * the right ICLR register based upon the lower SBUS irq level
  713. * bits.
  714. */
  715. if (ino >= 0x20) {
  716. iclr = sysio_imap_to_iclr(imap);
  717. } else {
  718. int sbus_slot = (ino & 0x18)>>3;
  719. sbus_level = ino & 0x7;
  720. switch(sbus_slot) {
  721. case 0:
  722. iclr = reg_base + SYSIO_ICLR_SLOT0;
  723. break;
  724. case 1:
  725. iclr = reg_base + SYSIO_ICLR_SLOT1;
  726. break;
  727. case 2:
  728. iclr = reg_base + SYSIO_ICLR_SLOT2;
  729. break;
  730. default:
  731. case 3:
  732. iclr = reg_base + SYSIO_ICLR_SLOT3;
  733. break;
  734. };
  735. iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
  736. }
  737. return build_irq(pil, sbus_level, iclr, imap);
  738. }
  739. /* Error interrupt handling. */
  740. #define SYSIO_UE_AFSR 0x0030UL
  741. #define SYSIO_UE_AFAR 0x0038UL
  742. #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  743. #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  744. #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  745. #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
  746. #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  747. #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  748. #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  749. #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
  750. #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  751. #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  752. #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  753. static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
  754. {
  755. struct sbus_bus *sbus = dev_id;
  756. struct sbus_iommu *iommu = sbus->iommu;
  757. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  758. unsigned long afsr_reg, afar_reg;
  759. unsigned long afsr, afar, error_bits;
  760. int reported;
  761. afsr_reg = reg_base + SYSIO_UE_AFSR;
  762. afar_reg = reg_base + SYSIO_UE_AFAR;
  763. /* Latch error status. */
  764. afsr = upa_readq(afsr_reg);
  765. afar = upa_readq(afar_reg);
  766. /* Clear primary/secondary error status bits. */
  767. error_bits = afsr &
  768. (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
  769. SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
  770. upa_writeq(error_bits, afsr_reg);
  771. /* Log the error. */
  772. printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
  773. sbus->portid,
  774. (((error_bits & SYSIO_UEAFSR_PPIO) ?
  775. "PIO" :
  776. ((error_bits & SYSIO_UEAFSR_PDRD) ?
  777. "DVMA Read" :
  778. ((error_bits & SYSIO_UEAFSR_PDWR) ?
  779. "DVMA Write" : "???")))));
  780. printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
  781. sbus->portid,
  782. (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
  783. (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
  784. (afsr & SYSIO_UEAFSR_MID) >> 37UL);
  785. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  786. printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
  787. reported = 0;
  788. if (afsr & SYSIO_UEAFSR_SPIO) {
  789. reported++;
  790. printk("(PIO)");
  791. }
  792. if (afsr & SYSIO_UEAFSR_SDRD) {
  793. reported++;
  794. printk("(DVMA Read)");
  795. }
  796. if (afsr & SYSIO_UEAFSR_SDWR) {
  797. reported++;
  798. printk("(DVMA Write)");
  799. }
  800. if (!reported)
  801. printk("(none)");
  802. printk("]\n");
  803. return IRQ_HANDLED;
  804. }
  805. #define SYSIO_CE_AFSR 0x0040UL
  806. #define SYSIO_CE_AFAR 0x0048UL
  807. #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  808. #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  809. #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  810. #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
  811. #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  812. #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  813. #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
  814. #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
  815. #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
  816. #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  817. #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  818. #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  819. static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
  820. {
  821. struct sbus_bus *sbus = dev_id;
  822. struct sbus_iommu *iommu = sbus->iommu;
  823. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  824. unsigned long afsr_reg, afar_reg;
  825. unsigned long afsr, afar, error_bits;
  826. int reported;
  827. afsr_reg = reg_base + SYSIO_CE_AFSR;
  828. afar_reg = reg_base + SYSIO_CE_AFAR;
  829. /* Latch error status. */
  830. afsr = upa_readq(afsr_reg);
  831. afar = upa_readq(afar_reg);
  832. /* Clear primary/secondary error status bits. */
  833. error_bits = afsr &
  834. (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
  835. SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
  836. upa_writeq(error_bits, afsr_reg);
  837. printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
  838. sbus->portid,
  839. (((error_bits & SYSIO_CEAFSR_PPIO) ?
  840. "PIO" :
  841. ((error_bits & SYSIO_CEAFSR_PDRD) ?
  842. "DVMA Read" :
  843. ((error_bits & SYSIO_CEAFSR_PDWR) ?
  844. "DVMA Write" : "???")))));
  845. /* XXX Use syndrome and afar to print out module string just like
  846. * XXX UDB CE trap handler does... -DaveM
  847. */
  848. printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
  849. sbus->portid,
  850. (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
  851. (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
  852. (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
  853. (afsr & SYSIO_CEAFSR_MID) >> 37UL);
  854. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  855. printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
  856. reported = 0;
  857. if (afsr & SYSIO_CEAFSR_SPIO) {
  858. reported++;
  859. printk("(PIO)");
  860. }
  861. if (afsr & SYSIO_CEAFSR_SDRD) {
  862. reported++;
  863. printk("(DVMA Read)");
  864. }
  865. if (afsr & SYSIO_CEAFSR_SDWR) {
  866. reported++;
  867. printk("(DVMA Write)");
  868. }
  869. if (!reported)
  870. printk("(none)");
  871. printk("]\n");
  872. return IRQ_HANDLED;
  873. }
  874. #define SYSIO_SBUS_AFSR 0x2010UL
  875. #define SYSIO_SBUS_AFAR 0x2018UL
  876. #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
  877. #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
  878. #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
  879. #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
  880. #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
  881. #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
  882. #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  883. #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
  884. #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
  885. #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
  886. #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
  887. #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
  888. static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
  889. {
  890. struct sbus_bus *sbus = dev_id;
  891. struct sbus_iommu *iommu = sbus->iommu;
  892. unsigned long afsr_reg, afar_reg, reg_base;
  893. unsigned long afsr, afar, error_bits;
  894. int reported;
  895. reg_base = iommu->sbus_control_reg - 0x2000UL;
  896. afsr_reg = reg_base + SYSIO_SBUS_AFSR;
  897. afar_reg = reg_base + SYSIO_SBUS_AFAR;
  898. afsr = upa_readq(afsr_reg);
  899. afar = upa_readq(afar_reg);
  900. /* Clear primary/secondary error status bits. */
  901. error_bits = afsr &
  902. (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
  903. SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
  904. upa_writeq(error_bits, afsr_reg);
  905. /* Log the error. */
  906. printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
  907. sbus->portid,
  908. (((error_bits & SYSIO_SBAFSR_PLE) ?
  909. "Late PIO Error" :
  910. ((error_bits & SYSIO_SBAFSR_PTO) ?
  911. "Time Out" :
  912. ((error_bits & SYSIO_SBAFSR_PBERR) ?
  913. "Error Ack" : "???")))),
  914. (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
  915. printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
  916. sbus->portid,
  917. (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
  918. (afsr & SYSIO_SBAFSR_MID) >> 37UL);
  919. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  920. printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
  921. reported = 0;
  922. if (afsr & SYSIO_SBAFSR_SLE) {
  923. reported++;
  924. printk("(Late PIO Error)");
  925. }
  926. if (afsr & SYSIO_SBAFSR_STO) {
  927. reported++;
  928. printk("(Time Out)");
  929. }
  930. if (afsr & SYSIO_SBAFSR_SBERR) {
  931. reported++;
  932. printk("(Error Ack)");
  933. }
  934. if (!reported)
  935. printk("(none)");
  936. printk("]\n");
  937. /* XXX check iommu/strbuf for further error status XXX */
  938. return IRQ_HANDLED;
  939. }
  940. #define ECC_CONTROL 0x0020UL
  941. #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
  942. #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
  943. #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
  944. #define SYSIO_UE_INO 0x34
  945. #define SYSIO_CE_INO 0x35
  946. #define SYSIO_SBUSERR_INO 0x36
  947. static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
  948. {
  949. struct sbus_iommu *iommu = sbus->iommu;
  950. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  951. unsigned int irq;
  952. u64 control;
  953. irq = sbus_build_irq(sbus, SYSIO_UE_INO);
  954. if (request_irq(irq, sysio_ue_handler,
  955. SA_SHIRQ, "SYSIO UE", sbus) < 0) {
  956. prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
  957. sbus->portid);
  958. prom_halt();
  959. }
  960. irq = sbus_build_irq(sbus, SYSIO_CE_INO);
  961. if (request_irq(irq, sysio_ce_handler,
  962. SA_SHIRQ, "SYSIO CE", sbus) < 0) {
  963. prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
  964. sbus->portid);
  965. prom_halt();
  966. }
  967. irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
  968. if (request_irq(irq, sysio_sbus_error_handler,
  969. SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
  970. prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
  971. sbus->portid);
  972. prom_halt();
  973. }
  974. /* Now turn the error interrupts on and also enable ECC checking. */
  975. upa_writeq((SYSIO_ECNTRL_ECCEN |
  976. SYSIO_ECNTRL_UEEN |
  977. SYSIO_ECNTRL_CEEN),
  978. reg_base + ECC_CONTROL);
  979. control = upa_readq(iommu->sbus_control_reg);
  980. control |= 0x100UL; /* SBUS Error Interrupt Enable */
  981. upa_writeq(control, iommu->sbus_control_reg);
  982. }
  983. /* Boot time initialization. */
  984. void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
  985. {
  986. struct linux_prom64_registers rprop;
  987. struct sbus_iommu *iommu;
  988. unsigned long regs, tsb_base;
  989. u64 control;
  990. int err, i;
  991. sbus->portid = prom_getintdefault(sbus->prom_node,
  992. "upa-portid", -1);
  993. err = prom_getproperty(prom_node, "reg",
  994. (char *)&rprop, sizeof(rprop));
  995. if (err < 0) {
  996. prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
  997. prom_halt();
  998. }
  999. regs = rprop.phys_addr;
  1000. iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
  1001. if (iommu == NULL) {
  1002. prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
  1003. prom_halt();
  1004. }
  1005. /* Align on E$ line boundary. */
  1006. iommu = (struct sbus_iommu *)
  1007. (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
  1008. ~(SMP_CACHE_BYTES - 1UL));
  1009. memset(iommu, 0, sizeof(*iommu));
  1010. /* We start with no consistent mappings. */
  1011. iommu->lowest_consistent_map = CLUSTER_NPAGES;
  1012. for (i = 0; i < NCLUSTERS; i++) {
  1013. iommu->alloc_info[i].flush = 0;
  1014. iommu->alloc_info[i].next = 0;
  1015. }
  1016. /* Setup spinlock. */
  1017. spin_lock_init(&iommu->lock);
  1018. /* Init register offsets. */
  1019. iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
  1020. iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
  1021. /* The SYSIO SBUS control register is used for dummy reads
  1022. * in order to ensure write completion.
  1023. */
  1024. iommu->sbus_control_reg = regs + 0x2000UL;
  1025. /* Link into SYSIO software state. */
  1026. sbus->iommu = iommu;
  1027. printk("SYSIO: UPA portID %x, at %016lx\n",
  1028. sbus->portid, regs);
  1029. /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
  1030. control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
  1031. control = ((7UL << 16UL) |
  1032. (0UL << 2UL) |
  1033. (1UL << 1UL) |
  1034. (1UL << 0UL));
  1035. /* Using the above configuration we need 1MB iommu page
  1036. * table (128K ioptes * 8 bytes per iopte). This is
  1037. * page order 7 on UltraSparc.
  1038. */
  1039. tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
  1040. if (tsb_base == 0UL) {
  1041. prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
  1042. prom_halt();
  1043. }
  1044. iommu->page_table = (iopte_t *) tsb_base;
  1045. memset(iommu->page_table, 0, IO_TSB_SIZE);
  1046. upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
  1047. /* Clean out any cruft in the IOMMU using
  1048. * diagnostic accesses.
  1049. */
  1050. for (i = 0; i < 16; i++) {
  1051. unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
  1052. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  1053. dram += (unsigned long)i * 8UL;
  1054. tag += (unsigned long)i * 8UL;
  1055. upa_writeq(0, dram);
  1056. upa_writeq(0, tag);
  1057. }
  1058. upa_readq(iommu->sbus_control_reg);
  1059. /* Give the TSB to SYSIO. */
  1060. upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
  1061. /* Setup streaming buffer, DE=1 SB_EN=1 */
  1062. control = (1UL << 1UL) | (1UL << 0UL);
  1063. upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
  1064. /* Clear out the tags using diagnostics. */
  1065. for (i = 0; i < 16; i++) {
  1066. unsigned long ptag, ltag;
  1067. ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
  1068. ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
  1069. ptag += (unsigned long)i * 8UL;
  1070. ltag += (unsigned long)i * 8UL;
  1071. upa_writeq(0UL, ptag);
  1072. upa_writeq(0UL, ltag);
  1073. }
  1074. /* Enable DVMA arbitration for all devices/slots. */
  1075. control = upa_readq(iommu->sbus_control_reg);
  1076. control |= 0x3fUL;
  1077. upa_writeq(control, iommu->sbus_control_reg);
  1078. /* Now some Xfire specific grot... */
  1079. if (this_is_starfire)
  1080. sbus->starfire_cookie = starfire_hookup(sbus->portid);
  1081. else
  1082. sbus->starfire_cookie = NULL;
  1083. sysio_register_error_handlers(sbus);
  1084. }