sbus.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
  2. * sbus.c: UltraSparc SBUS controller support.
  3. *
  4. * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/mm.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <asm/page.h>
  14. #include <asm/sbus.h>
  15. #include <asm/io.h>
  16. #include <asm/upa.h>
  17. #include <asm/cache.h>
  18. #include <asm/dma.h>
  19. #include <asm/irq.h>
  20. #include <asm/prom.h>
  21. #include <asm/starfire.h>
  22. #include "iommu_common.h"
  23. /* These should be allocated on an SMP_CACHE_BYTES
  24. * aligned boundary for optimal performance.
  25. *
  26. * On SYSIO, using an 8K page size we have 1GB of SBUS
  27. * DMA space mapped. We divide this space into equally
  28. * sized clusters. We allocate a DMA mapping from the
  29. * cluster that matches the order of the allocation, or
  30. * if the order is greater than the number of clusters,
  31. * we try to allocate from the last cluster.
  32. */
  33. #define NCLUSTERS 8UL
  34. #define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
  35. #define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
  36. #define CLUSTER_MASK (CLUSTER_SIZE - 1)
  37. #define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
  38. #define MAP_BASE ((u32)0xc0000000)
  39. struct sbus_iommu {
  40. /*0x00*/spinlock_t lock;
  41. /*0x08*/iopte_t *page_table;
  42. /*0x10*/unsigned long strbuf_regs;
  43. /*0x18*/unsigned long iommu_regs;
  44. /*0x20*/unsigned long sbus_control_reg;
  45. /*0x28*/volatile unsigned long strbuf_flushflag;
  46. /* If NCLUSTERS is ever decresed to 4 or lower,
  47. * you must increase the size of the type of
  48. * these counters. You have been duly warned. -DaveM
  49. */
  50. /*0x30*/struct {
  51. u16 next;
  52. u16 flush;
  53. } alloc_info[NCLUSTERS];
  54. /* The lowest used consistent mapping entry. Since
  55. * we allocate consistent maps out of cluster 0 this
  56. * is relative to the beginning of closter 0.
  57. */
  58. /*0x50*/u32 lowest_consistent_map;
  59. };
  60. /* Offsets from iommu_regs */
  61. #define SYSIO_IOMMUREG_BASE 0x2400UL
  62. #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
  63. #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
  64. #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
  65. #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
  66. #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
  67. #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
  68. #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
  69. #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
  70. #define IOMMU_DRAM_VALID (1UL << 30UL)
  71. static void __iommu_flushall(struct sbus_iommu *iommu)
  72. {
  73. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  74. int entry;
  75. for (entry = 0; entry < 16; entry++) {
  76. upa_writeq(0, tag);
  77. tag += 8UL;
  78. }
  79. upa_readq(iommu->sbus_control_reg);
  80. for (entry = 0; entry < NCLUSTERS; entry++) {
  81. iommu->alloc_info[entry].flush =
  82. iommu->alloc_info[entry].next;
  83. }
  84. }
  85. static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  86. {
  87. while (npages--)
  88. upa_writeq(base + (npages << IO_PAGE_SHIFT),
  89. iommu->iommu_regs + IOMMU_FLUSH);
  90. upa_readq(iommu->sbus_control_reg);
  91. }
  92. /* Offsets from strbuf_regs */
  93. #define SYSIO_STRBUFREG_BASE 0x2800UL
  94. #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
  95. #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
  96. #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
  97. #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
  98. #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
  99. #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
  100. #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
  101. #define STRBUF_TAG_VALID 0x02UL
  102. static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
  103. {
  104. unsigned long n;
  105. int limit;
  106. n = npages;
  107. while (n--)
  108. upa_writeq(base + (n << IO_PAGE_SHIFT),
  109. iommu->strbuf_regs + STRBUF_PFLUSH);
  110. /* If the device could not have possibly put dirty data into
  111. * the streaming cache, no flush-flag synchronization needs
  112. * to be performed.
  113. */
  114. if (direction == SBUS_DMA_TODEVICE)
  115. return;
  116. iommu->strbuf_flushflag = 0UL;
  117. /* Whoopee cushion! */
  118. upa_writeq(__pa(&iommu->strbuf_flushflag),
  119. iommu->strbuf_regs + STRBUF_FSYNC);
  120. upa_readq(iommu->sbus_control_reg);
  121. limit = 100000;
  122. while (iommu->strbuf_flushflag == 0UL) {
  123. limit--;
  124. if (!limit)
  125. break;
  126. udelay(1);
  127. rmb();
  128. }
  129. if (!limit)
  130. printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
  131. "vaddr[%08x] npages[%ld]\n",
  132. base, npages);
  133. }
  134. static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
  135. {
  136. iopte_t *iopte, *limit, *first, *cluster;
  137. unsigned long cnum, ent, nent, flush_point, found;
  138. cnum = 0;
  139. nent = 1;
  140. while ((1UL << cnum) < npages)
  141. cnum++;
  142. if(cnum >= NCLUSTERS) {
  143. nent = 1UL << (cnum - NCLUSTERS);
  144. cnum = NCLUSTERS - 1;
  145. }
  146. iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
  147. if (cnum == 0)
  148. limit = (iommu->page_table +
  149. iommu->lowest_consistent_map);
  150. else
  151. limit = (iopte + CLUSTER_NPAGES);
  152. iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
  153. flush_point = iommu->alloc_info[cnum].flush;
  154. first = iopte;
  155. cluster = NULL;
  156. found = 0;
  157. for (;;) {
  158. if (iopte_val(*iopte) == 0UL) {
  159. found++;
  160. if (!cluster)
  161. cluster = iopte;
  162. } else {
  163. /* Used cluster in the way */
  164. cluster = NULL;
  165. found = 0;
  166. }
  167. if (found == nent)
  168. break;
  169. iopte += (1 << cnum);
  170. ent++;
  171. if (iopte >= limit) {
  172. iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
  173. ent = 0;
  174. /* Multiple cluster allocations must not wrap */
  175. cluster = NULL;
  176. found = 0;
  177. }
  178. if (ent == flush_point)
  179. __iommu_flushall(iommu);
  180. if (iopte == first)
  181. goto bad;
  182. }
  183. /* ent/iopte points to the last cluster entry we're going to use,
  184. * so save our place for the next allocation.
  185. */
  186. if ((iopte + (1 << cnum)) >= limit)
  187. ent = 0;
  188. else
  189. ent = ent + 1;
  190. iommu->alloc_info[cnum].next = ent;
  191. if (ent == flush_point)
  192. __iommu_flushall(iommu);
  193. /* I've got your streaming cluster right here buddy boy... */
  194. return cluster;
  195. bad:
  196. printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
  197. npages);
  198. return NULL;
  199. }
  200. static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  201. {
  202. unsigned long cnum, ent, nent;
  203. iopte_t *iopte;
  204. cnum = 0;
  205. nent = 1;
  206. while ((1UL << cnum) < npages)
  207. cnum++;
  208. if(cnum >= NCLUSTERS) {
  209. nent = 1UL << (cnum - NCLUSTERS);
  210. cnum = NCLUSTERS - 1;
  211. }
  212. ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
  213. iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
  214. do {
  215. iopte_val(*iopte) = 0UL;
  216. iopte += 1 << cnum;
  217. } while(--nent);
  218. /* If the global flush might not have caught this entry,
  219. * adjust the flush point such that we will flush before
  220. * ever trying to reuse it.
  221. */
  222. #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
  223. if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
  224. iommu->alloc_info[cnum].flush = ent;
  225. #undef between
  226. }
  227. /* We allocate consistent mappings from the end of cluster zero. */
  228. static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
  229. {
  230. iopte_t *iopte;
  231. iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
  232. while (iopte > iommu->page_table) {
  233. iopte--;
  234. if (!(iopte_val(*iopte) & IOPTE_VALID)) {
  235. unsigned long tmp = npages;
  236. while (--tmp) {
  237. iopte--;
  238. if (iopte_val(*iopte) & IOPTE_VALID)
  239. break;
  240. }
  241. if (tmp == 0) {
  242. u32 entry = (iopte - iommu->page_table);
  243. if (entry < iommu->lowest_consistent_map)
  244. iommu->lowest_consistent_map = entry;
  245. return iopte;
  246. }
  247. }
  248. }
  249. return NULL;
  250. }
  251. static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  252. {
  253. iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
  254. if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
  255. iopte_t *walk = iopte + npages;
  256. iopte_t *limit;
  257. limit = iommu->page_table + CLUSTER_NPAGES;
  258. while (walk < limit) {
  259. if (iopte_val(*walk) != 0UL)
  260. break;
  261. walk++;
  262. }
  263. iommu->lowest_consistent_map =
  264. (walk - iommu->page_table);
  265. }
  266. while (npages--)
  267. *iopte++ = __iopte(0UL);
  268. }
  269. void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
  270. {
  271. unsigned long order, first_page, flags;
  272. struct sbus_iommu *iommu;
  273. iopte_t *iopte;
  274. void *ret;
  275. int npages;
  276. if (size <= 0 || sdev == NULL || dvma_addr == NULL)
  277. return NULL;
  278. size = IO_PAGE_ALIGN(size);
  279. order = get_order(size);
  280. if (order >= 10)
  281. return NULL;
  282. first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
  283. if (first_page == 0UL)
  284. return NULL;
  285. memset((char *)first_page, 0, PAGE_SIZE << order);
  286. iommu = sdev->bus->iommu;
  287. spin_lock_irqsave(&iommu->lock, flags);
  288. iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
  289. if (iopte == NULL) {
  290. spin_unlock_irqrestore(&iommu->lock, flags);
  291. free_pages(first_page, order);
  292. return NULL;
  293. }
  294. /* Ok, we're committed at this point. */
  295. *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  296. ret = (void *) first_page;
  297. npages = size >> IO_PAGE_SHIFT;
  298. while (npages--) {
  299. *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
  300. (__pa(first_page) & IOPTE_PAGE));
  301. first_page += IO_PAGE_SIZE;
  302. }
  303. iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
  304. spin_unlock_irqrestore(&iommu->lock, flags);
  305. return ret;
  306. }
  307. void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
  308. {
  309. unsigned long order, npages;
  310. struct sbus_iommu *iommu;
  311. if (size <= 0 || sdev == NULL || cpu == NULL)
  312. return;
  313. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  314. iommu = sdev->bus->iommu;
  315. spin_lock_irq(&iommu->lock);
  316. free_consistent_cluster(iommu, dvma, npages);
  317. iommu_flush(iommu, dvma, npages);
  318. spin_unlock_irq(&iommu->lock);
  319. order = get_order(size);
  320. if (order < 10)
  321. free_pages((unsigned long)cpu, order);
  322. }
  323. dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
  324. {
  325. struct sbus_iommu *iommu = sdev->bus->iommu;
  326. unsigned long npages, pbase, flags;
  327. iopte_t *iopte;
  328. u32 dma_base, offset;
  329. unsigned long iopte_bits;
  330. if (dir == SBUS_DMA_NONE)
  331. BUG();
  332. pbase = (unsigned long) ptr;
  333. offset = (u32) (pbase & ~IO_PAGE_MASK);
  334. size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
  335. pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
  336. spin_lock_irqsave(&iommu->lock, flags);
  337. npages = size >> IO_PAGE_SHIFT;
  338. iopte = alloc_streaming_cluster(iommu, npages);
  339. if (iopte == NULL)
  340. goto bad;
  341. dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  342. npages = size >> IO_PAGE_SHIFT;
  343. iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  344. if (dir != SBUS_DMA_TODEVICE)
  345. iopte_bits |= IOPTE_WRITE;
  346. while (npages--) {
  347. *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
  348. pbase += IO_PAGE_SIZE;
  349. }
  350. npages = size >> IO_PAGE_SHIFT;
  351. spin_unlock_irqrestore(&iommu->lock, flags);
  352. return (dma_base | offset);
  353. bad:
  354. spin_unlock_irqrestore(&iommu->lock, flags);
  355. BUG();
  356. return 0;
  357. }
  358. void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
  359. {
  360. struct sbus_iommu *iommu = sdev->bus->iommu;
  361. u32 dma_base = dma_addr & IO_PAGE_MASK;
  362. unsigned long flags;
  363. size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
  364. spin_lock_irqsave(&iommu->lock, flags);
  365. free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
  366. sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction);
  367. spin_unlock_irqrestore(&iommu->lock, flags);
  368. }
  369. #define SG_ENT_PHYS_ADDRESS(SG) \
  370. (__pa(page_address((SG)->page)) + (SG)->offset)
  371. static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
  372. {
  373. struct scatterlist *dma_sg = sg;
  374. struct scatterlist *sg_end = sg + nelems;
  375. int i;
  376. for (i = 0; i < nused; i++) {
  377. unsigned long pteval = ~0UL;
  378. u32 dma_npages;
  379. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  380. dma_sg->dma_length +
  381. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  382. do {
  383. unsigned long offset;
  384. signed int len;
  385. /* If we are here, we know we have at least one
  386. * more page to map. So walk forward until we
  387. * hit a page crossing, and begin creating new
  388. * mappings from that spot.
  389. */
  390. for (;;) {
  391. unsigned long tmp;
  392. tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
  393. len = sg->length;
  394. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  395. pteval = tmp & IO_PAGE_MASK;
  396. offset = tmp & (IO_PAGE_SIZE - 1UL);
  397. break;
  398. }
  399. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  400. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  401. offset = 0UL;
  402. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  403. break;
  404. }
  405. sg++;
  406. }
  407. pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
  408. while (len > 0) {
  409. *iopte++ = __iopte(pteval);
  410. pteval += IO_PAGE_SIZE;
  411. len -= (IO_PAGE_SIZE - offset);
  412. offset = 0;
  413. dma_npages--;
  414. }
  415. pteval = (pteval & IOPTE_PAGE) + len;
  416. sg++;
  417. /* Skip over any tail mappings we've fully mapped,
  418. * adjusting pteval along the way. Stop when we
  419. * detect a page crossing event.
  420. */
  421. while (sg < sg_end &&
  422. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  423. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  424. ((pteval ^
  425. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  426. pteval += sg->length;
  427. sg++;
  428. }
  429. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  430. pteval = ~0UL;
  431. } while (dma_npages != 0);
  432. dma_sg++;
  433. }
  434. }
  435. int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
  436. {
  437. struct sbus_iommu *iommu = sdev->bus->iommu;
  438. unsigned long flags, npages;
  439. iopte_t *iopte;
  440. u32 dma_base;
  441. struct scatterlist *sgtmp;
  442. int used;
  443. unsigned long iopte_bits;
  444. if (dir == SBUS_DMA_NONE)
  445. BUG();
  446. /* Fast path single entry scatterlists. */
  447. if (nents == 1) {
  448. sg->dma_address =
  449. sbus_map_single(sdev,
  450. (page_address(sg->page) + sg->offset),
  451. sg->length, dir);
  452. sg->dma_length = sg->length;
  453. return 1;
  454. }
  455. npages = prepare_sg(sg, nents);
  456. spin_lock_irqsave(&iommu->lock, flags);
  457. iopte = alloc_streaming_cluster(iommu, npages);
  458. if (iopte == NULL)
  459. goto bad;
  460. dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  461. /* Normalize DVMA addresses. */
  462. sgtmp = sg;
  463. used = nents;
  464. while (used && sgtmp->dma_length) {
  465. sgtmp->dma_address += dma_base;
  466. sgtmp++;
  467. used--;
  468. }
  469. used = nents - used;
  470. iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  471. if (dir != SBUS_DMA_TODEVICE)
  472. iopte_bits |= IOPTE_WRITE;
  473. fill_sg(iopte, sg, used, nents, iopte_bits);
  474. #ifdef VERIFY_SG
  475. verify_sglist(sg, nents, iopte, npages);
  476. #endif
  477. spin_unlock_irqrestore(&iommu->lock, flags);
  478. return used;
  479. bad:
  480. spin_unlock_irqrestore(&iommu->lock, flags);
  481. BUG();
  482. return 0;
  483. }
  484. void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  485. {
  486. unsigned long size, flags;
  487. struct sbus_iommu *iommu;
  488. u32 dvma_base;
  489. int i;
  490. /* Fast path single entry scatterlists. */
  491. if (nents == 1) {
  492. sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
  493. return;
  494. }
  495. dvma_base = sg[0].dma_address & IO_PAGE_MASK;
  496. for (i = 0; i < nents; i++) {
  497. if (sg[i].dma_length == 0)
  498. break;
  499. }
  500. i--;
  501. size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
  502. iommu = sdev->bus->iommu;
  503. spin_lock_irqsave(&iommu->lock, flags);
  504. free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
  505. sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction);
  506. spin_unlock_irqrestore(&iommu->lock, flags);
  507. }
  508. void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  509. {
  510. struct sbus_iommu *iommu = sdev->bus->iommu;
  511. unsigned long flags;
  512. size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
  513. spin_lock_irqsave(&iommu->lock, flags);
  514. sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction);
  515. spin_unlock_irqrestore(&iommu->lock, flags);
  516. }
  517. void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  518. {
  519. }
  520. void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  521. {
  522. struct sbus_iommu *iommu = sdev->bus->iommu;
  523. unsigned long flags, size;
  524. u32 base;
  525. int i;
  526. base = sg[0].dma_address & IO_PAGE_MASK;
  527. for (i = 0; i < nents; i++) {
  528. if (sg[i].dma_length == 0)
  529. break;
  530. }
  531. i--;
  532. size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
  533. spin_lock_irqsave(&iommu->lock, flags);
  534. sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction);
  535. spin_unlock_irqrestore(&iommu->lock, flags);
  536. }
  537. void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  538. {
  539. }
  540. /* Enable 64-bit DVMA mode for the given device. */
  541. void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
  542. {
  543. struct sbus_iommu *iommu = sdev->bus->iommu;
  544. int slot = sdev->slot;
  545. unsigned long cfg_reg;
  546. u64 val;
  547. cfg_reg = iommu->sbus_control_reg;
  548. switch (slot) {
  549. case 0:
  550. cfg_reg += 0x20UL;
  551. break;
  552. case 1:
  553. cfg_reg += 0x28UL;
  554. break;
  555. case 2:
  556. cfg_reg += 0x30UL;
  557. break;
  558. case 3:
  559. cfg_reg += 0x38UL;
  560. break;
  561. case 13:
  562. cfg_reg += 0x40UL;
  563. break;
  564. case 14:
  565. cfg_reg += 0x48UL;
  566. break;
  567. case 15:
  568. cfg_reg += 0x50UL;
  569. break;
  570. default:
  571. return;
  572. };
  573. val = upa_readq(cfg_reg);
  574. if (val & (1UL << 14UL)) {
  575. /* Extended transfer mode already enabled. */
  576. return;
  577. }
  578. val |= (1UL << 14UL);
  579. if (bursts & DMA_BURST8)
  580. val |= (1UL << 1UL);
  581. if (bursts & DMA_BURST16)
  582. val |= (1UL << 2UL);
  583. if (bursts & DMA_BURST32)
  584. val |= (1UL << 3UL);
  585. if (bursts & DMA_BURST64)
  586. val |= (1UL << 4UL);
  587. upa_writeq(val, cfg_reg);
  588. }
  589. /* INO number to IMAP register offset for SYSIO external IRQ's.
  590. * This should conform to both Sunfire/Wildfire server and Fusion
  591. * desktop designs.
  592. */
  593. #define SYSIO_IMAP_SLOT0 0x2c04UL
  594. #define SYSIO_IMAP_SLOT1 0x2c0cUL
  595. #define SYSIO_IMAP_SLOT2 0x2c14UL
  596. #define SYSIO_IMAP_SLOT3 0x2c1cUL
  597. #define SYSIO_IMAP_SCSI 0x3004UL
  598. #define SYSIO_IMAP_ETH 0x300cUL
  599. #define SYSIO_IMAP_BPP 0x3014UL
  600. #define SYSIO_IMAP_AUDIO 0x301cUL
  601. #define SYSIO_IMAP_PFAIL 0x3024UL
  602. #define SYSIO_IMAP_KMS 0x302cUL
  603. #define SYSIO_IMAP_FLPY 0x3034UL
  604. #define SYSIO_IMAP_SHW 0x303cUL
  605. #define SYSIO_IMAP_KBD 0x3044UL
  606. #define SYSIO_IMAP_MS 0x304cUL
  607. #define SYSIO_IMAP_SER 0x3054UL
  608. #define SYSIO_IMAP_TIM0 0x3064UL
  609. #define SYSIO_IMAP_TIM1 0x306cUL
  610. #define SYSIO_IMAP_UE 0x3074UL
  611. #define SYSIO_IMAP_CE 0x307cUL
  612. #define SYSIO_IMAP_SBERR 0x3084UL
  613. #define SYSIO_IMAP_PMGMT 0x308cUL
  614. #define SYSIO_IMAP_GFX 0x3094UL
  615. #define SYSIO_IMAP_EUPA 0x309cUL
  616. #define bogon ((unsigned long) -1)
  617. static unsigned long sysio_irq_offsets[] = {
  618. /* SBUS Slot 0 --> 3, level 1 --> 7 */
  619. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  620. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  621. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  622. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  623. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  624. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  625. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  626. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  627. /* Onboard devices (not relevant/used on SunFire). */
  628. SYSIO_IMAP_SCSI,
  629. SYSIO_IMAP_ETH,
  630. SYSIO_IMAP_BPP,
  631. bogon,
  632. SYSIO_IMAP_AUDIO,
  633. SYSIO_IMAP_PFAIL,
  634. bogon,
  635. bogon,
  636. SYSIO_IMAP_KMS,
  637. SYSIO_IMAP_FLPY,
  638. SYSIO_IMAP_SHW,
  639. SYSIO_IMAP_KBD,
  640. SYSIO_IMAP_MS,
  641. SYSIO_IMAP_SER,
  642. bogon,
  643. bogon,
  644. SYSIO_IMAP_TIM0,
  645. SYSIO_IMAP_TIM1,
  646. bogon,
  647. bogon,
  648. SYSIO_IMAP_UE,
  649. SYSIO_IMAP_CE,
  650. SYSIO_IMAP_SBERR,
  651. SYSIO_IMAP_PMGMT,
  652. };
  653. #undef bogon
  654. #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
  655. /* Convert Interrupt Mapping register pointer to associated
  656. * Interrupt Clear register pointer, SYSIO specific version.
  657. */
  658. #define SYSIO_ICLR_UNUSED0 0x3400UL
  659. #define SYSIO_ICLR_SLOT0 0x340cUL
  660. #define SYSIO_ICLR_SLOT1 0x344cUL
  661. #define SYSIO_ICLR_SLOT2 0x348cUL
  662. #define SYSIO_ICLR_SLOT3 0x34ccUL
  663. static unsigned long sysio_imap_to_iclr(unsigned long imap)
  664. {
  665. unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
  666. return imap + diff;
  667. }
  668. unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
  669. {
  670. struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
  671. struct sbus_iommu *iommu = sbus->iommu;
  672. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  673. unsigned long imap, iclr;
  674. int sbus_level = 0;
  675. imap = sysio_irq_offsets[ino];
  676. if (imap == ((unsigned long)-1)) {
  677. prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
  678. ino);
  679. prom_halt();
  680. }
  681. imap += reg_base;
  682. /* SYSIO inconsistency. For external SLOTS, we have to select
  683. * the right ICLR register based upon the lower SBUS irq level
  684. * bits.
  685. */
  686. if (ino >= 0x20) {
  687. iclr = sysio_imap_to_iclr(imap);
  688. } else {
  689. int sbus_slot = (ino & 0x18)>>3;
  690. sbus_level = ino & 0x7;
  691. switch(sbus_slot) {
  692. case 0:
  693. iclr = reg_base + SYSIO_ICLR_SLOT0;
  694. break;
  695. case 1:
  696. iclr = reg_base + SYSIO_ICLR_SLOT1;
  697. break;
  698. case 2:
  699. iclr = reg_base + SYSIO_ICLR_SLOT2;
  700. break;
  701. default:
  702. case 3:
  703. iclr = reg_base + SYSIO_ICLR_SLOT3;
  704. break;
  705. };
  706. iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
  707. }
  708. return build_irq(sbus_level, iclr, imap);
  709. }
  710. /* Error interrupt handling. */
  711. #define SYSIO_UE_AFSR 0x0030UL
  712. #define SYSIO_UE_AFAR 0x0038UL
  713. #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  714. #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  715. #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  716. #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
  717. #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  718. #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  719. #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  720. #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
  721. #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  722. #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  723. #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  724. static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
  725. {
  726. struct sbus_bus *sbus = dev_id;
  727. struct sbus_iommu *iommu = sbus->iommu;
  728. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  729. unsigned long afsr_reg, afar_reg;
  730. unsigned long afsr, afar, error_bits;
  731. int reported;
  732. afsr_reg = reg_base + SYSIO_UE_AFSR;
  733. afar_reg = reg_base + SYSIO_UE_AFAR;
  734. /* Latch error status. */
  735. afsr = upa_readq(afsr_reg);
  736. afar = upa_readq(afar_reg);
  737. /* Clear primary/secondary error status bits. */
  738. error_bits = afsr &
  739. (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
  740. SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
  741. upa_writeq(error_bits, afsr_reg);
  742. /* Log the error. */
  743. printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
  744. sbus->portid,
  745. (((error_bits & SYSIO_UEAFSR_PPIO) ?
  746. "PIO" :
  747. ((error_bits & SYSIO_UEAFSR_PDRD) ?
  748. "DVMA Read" :
  749. ((error_bits & SYSIO_UEAFSR_PDWR) ?
  750. "DVMA Write" : "???")))));
  751. printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
  752. sbus->portid,
  753. (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
  754. (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
  755. (afsr & SYSIO_UEAFSR_MID) >> 37UL);
  756. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  757. printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
  758. reported = 0;
  759. if (afsr & SYSIO_UEAFSR_SPIO) {
  760. reported++;
  761. printk("(PIO)");
  762. }
  763. if (afsr & SYSIO_UEAFSR_SDRD) {
  764. reported++;
  765. printk("(DVMA Read)");
  766. }
  767. if (afsr & SYSIO_UEAFSR_SDWR) {
  768. reported++;
  769. printk("(DVMA Write)");
  770. }
  771. if (!reported)
  772. printk("(none)");
  773. printk("]\n");
  774. return IRQ_HANDLED;
  775. }
  776. #define SYSIO_CE_AFSR 0x0040UL
  777. #define SYSIO_CE_AFAR 0x0048UL
  778. #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  779. #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  780. #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  781. #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
  782. #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  783. #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  784. #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
  785. #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
  786. #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
  787. #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  788. #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  789. #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  790. static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
  791. {
  792. struct sbus_bus *sbus = dev_id;
  793. struct sbus_iommu *iommu = sbus->iommu;
  794. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  795. unsigned long afsr_reg, afar_reg;
  796. unsigned long afsr, afar, error_bits;
  797. int reported;
  798. afsr_reg = reg_base + SYSIO_CE_AFSR;
  799. afar_reg = reg_base + SYSIO_CE_AFAR;
  800. /* Latch error status. */
  801. afsr = upa_readq(afsr_reg);
  802. afar = upa_readq(afar_reg);
  803. /* Clear primary/secondary error status bits. */
  804. error_bits = afsr &
  805. (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
  806. SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
  807. upa_writeq(error_bits, afsr_reg);
  808. printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
  809. sbus->portid,
  810. (((error_bits & SYSIO_CEAFSR_PPIO) ?
  811. "PIO" :
  812. ((error_bits & SYSIO_CEAFSR_PDRD) ?
  813. "DVMA Read" :
  814. ((error_bits & SYSIO_CEAFSR_PDWR) ?
  815. "DVMA Write" : "???")))));
  816. /* XXX Use syndrome and afar to print out module string just like
  817. * XXX UDB CE trap handler does... -DaveM
  818. */
  819. printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
  820. sbus->portid,
  821. (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
  822. (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
  823. (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
  824. (afsr & SYSIO_CEAFSR_MID) >> 37UL);
  825. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  826. printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
  827. reported = 0;
  828. if (afsr & SYSIO_CEAFSR_SPIO) {
  829. reported++;
  830. printk("(PIO)");
  831. }
  832. if (afsr & SYSIO_CEAFSR_SDRD) {
  833. reported++;
  834. printk("(DVMA Read)");
  835. }
  836. if (afsr & SYSIO_CEAFSR_SDWR) {
  837. reported++;
  838. printk("(DVMA Write)");
  839. }
  840. if (!reported)
  841. printk("(none)");
  842. printk("]\n");
  843. return IRQ_HANDLED;
  844. }
  845. #define SYSIO_SBUS_AFSR 0x2010UL
  846. #define SYSIO_SBUS_AFAR 0x2018UL
  847. #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
  848. #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
  849. #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
  850. #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
  851. #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
  852. #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
  853. #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  854. #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
  855. #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
  856. #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
  857. #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
  858. #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
  859. static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
  860. {
  861. struct sbus_bus *sbus = dev_id;
  862. struct sbus_iommu *iommu = sbus->iommu;
  863. unsigned long afsr_reg, afar_reg, reg_base;
  864. unsigned long afsr, afar, error_bits;
  865. int reported;
  866. reg_base = iommu->sbus_control_reg - 0x2000UL;
  867. afsr_reg = reg_base + SYSIO_SBUS_AFSR;
  868. afar_reg = reg_base + SYSIO_SBUS_AFAR;
  869. afsr = upa_readq(afsr_reg);
  870. afar = upa_readq(afar_reg);
  871. /* Clear primary/secondary error status bits. */
  872. error_bits = afsr &
  873. (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
  874. SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
  875. upa_writeq(error_bits, afsr_reg);
  876. /* Log the error. */
  877. printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
  878. sbus->portid,
  879. (((error_bits & SYSIO_SBAFSR_PLE) ?
  880. "Late PIO Error" :
  881. ((error_bits & SYSIO_SBAFSR_PTO) ?
  882. "Time Out" :
  883. ((error_bits & SYSIO_SBAFSR_PBERR) ?
  884. "Error Ack" : "???")))),
  885. (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
  886. printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
  887. sbus->portid,
  888. (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
  889. (afsr & SYSIO_SBAFSR_MID) >> 37UL);
  890. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  891. printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
  892. reported = 0;
  893. if (afsr & SYSIO_SBAFSR_SLE) {
  894. reported++;
  895. printk("(Late PIO Error)");
  896. }
  897. if (afsr & SYSIO_SBAFSR_STO) {
  898. reported++;
  899. printk("(Time Out)");
  900. }
  901. if (afsr & SYSIO_SBAFSR_SBERR) {
  902. reported++;
  903. printk("(Error Ack)");
  904. }
  905. if (!reported)
  906. printk("(none)");
  907. printk("]\n");
  908. /* XXX check iommu/strbuf for further error status XXX */
  909. return IRQ_HANDLED;
  910. }
  911. #define ECC_CONTROL 0x0020UL
  912. #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
  913. #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
  914. #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
  915. #define SYSIO_UE_INO 0x34
  916. #define SYSIO_CE_INO 0x35
  917. #define SYSIO_SBUSERR_INO 0x36
  918. static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
  919. {
  920. struct sbus_iommu *iommu = sbus->iommu;
  921. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  922. unsigned int irq;
  923. u64 control;
  924. irq = sbus_build_irq(sbus, SYSIO_UE_INO);
  925. if (request_irq(irq, sysio_ue_handler,
  926. IRQF_SHARED, "SYSIO UE", sbus) < 0) {
  927. prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
  928. sbus->portid);
  929. prom_halt();
  930. }
  931. irq = sbus_build_irq(sbus, SYSIO_CE_INO);
  932. if (request_irq(irq, sysio_ce_handler,
  933. IRQF_SHARED, "SYSIO CE", sbus) < 0) {
  934. prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
  935. sbus->portid);
  936. prom_halt();
  937. }
  938. irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
  939. if (request_irq(irq, sysio_sbus_error_handler,
  940. IRQF_SHARED, "SYSIO SBUS Error", sbus) < 0) {
  941. prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
  942. sbus->portid);
  943. prom_halt();
  944. }
  945. /* Now turn the error interrupts on and also enable ECC checking. */
  946. upa_writeq((SYSIO_ECNTRL_ECCEN |
  947. SYSIO_ECNTRL_UEEN |
  948. SYSIO_ECNTRL_CEEN),
  949. reg_base + ECC_CONTROL);
  950. control = upa_readq(iommu->sbus_control_reg);
  951. control |= 0x100UL; /* SBUS Error Interrupt Enable */
  952. upa_writeq(control, iommu->sbus_control_reg);
  953. }
  954. /* Boot time initialization. */
  955. static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
  956. {
  957. struct linux_prom64_registers *pr;
  958. struct device_node *dp;
  959. struct sbus_iommu *iommu;
  960. unsigned long regs, tsb_base;
  961. u64 control;
  962. int i;
  963. dp = of_find_node_by_phandle(__node);
  964. sbus->portid = of_getintprop_default(dp, "upa-portid", -1);
  965. pr = of_get_property(dp, "reg", NULL);
  966. if (!pr) {
  967. prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
  968. prom_halt();
  969. }
  970. regs = pr->phys_addr;
  971. iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
  972. if (iommu == NULL) {
  973. prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
  974. prom_halt();
  975. }
  976. /* Align on E$ line boundary. */
  977. iommu = (struct sbus_iommu *)
  978. (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
  979. ~(SMP_CACHE_BYTES - 1UL));
  980. memset(iommu, 0, sizeof(*iommu));
  981. /* We start with no consistent mappings. */
  982. iommu->lowest_consistent_map = CLUSTER_NPAGES;
  983. for (i = 0; i < NCLUSTERS; i++) {
  984. iommu->alloc_info[i].flush = 0;
  985. iommu->alloc_info[i].next = 0;
  986. }
  987. /* Setup spinlock. */
  988. spin_lock_init(&iommu->lock);
  989. /* Init register offsets. */
  990. iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
  991. iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
  992. /* The SYSIO SBUS control register is used for dummy reads
  993. * in order to ensure write completion.
  994. */
  995. iommu->sbus_control_reg = regs + 0x2000UL;
  996. /* Link into SYSIO software state. */
  997. sbus->iommu = iommu;
  998. printk("SYSIO: UPA portID %x, at %016lx\n",
  999. sbus->portid, regs);
  1000. /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
  1001. control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
  1002. control = ((7UL << 16UL) |
  1003. (0UL << 2UL) |
  1004. (1UL << 1UL) |
  1005. (1UL << 0UL));
  1006. /* Using the above configuration we need 1MB iommu page
  1007. * table (128K ioptes * 8 bytes per iopte). This is
  1008. * page order 7 on UltraSparc.
  1009. */
  1010. tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
  1011. if (tsb_base == 0UL) {
  1012. prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
  1013. prom_halt();
  1014. }
  1015. iommu->page_table = (iopte_t *) tsb_base;
  1016. memset(iommu->page_table, 0, IO_TSB_SIZE);
  1017. upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
  1018. /* Clean out any cruft in the IOMMU using
  1019. * diagnostic accesses.
  1020. */
  1021. for (i = 0; i < 16; i++) {
  1022. unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
  1023. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  1024. dram += (unsigned long)i * 8UL;
  1025. tag += (unsigned long)i * 8UL;
  1026. upa_writeq(0, dram);
  1027. upa_writeq(0, tag);
  1028. }
  1029. upa_readq(iommu->sbus_control_reg);
  1030. /* Give the TSB to SYSIO. */
  1031. upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
  1032. /* Setup streaming buffer, DE=1 SB_EN=1 */
  1033. control = (1UL << 1UL) | (1UL << 0UL);
  1034. upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
  1035. /* Clear out the tags using diagnostics. */
  1036. for (i = 0; i < 16; i++) {
  1037. unsigned long ptag, ltag;
  1038. ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
  1039. ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
  1040. ptag += (unsigned long)i * 8UL;
  1041. ltag += (unsigned long)i * 8UL;
  1042. upa_writeq(0UL, ptag);
  1043. upa_writeq(0UL, ltag);
  1044. }
  1045. /* Enable DVMA arbitration for all devices/slots. */
  1046. control = upa_readq(iommu->sbus_control_reg);
  1047. control |= 0x3fUL;
  1048. upa_writeq(control, iommu->sbus_control_reg);
  1049. /* Now some Xfire specific grot... */
  1050. if (this_is_starfire)
  1051. starfire_hookup(sbus->portid);
  1052. sysio_register_error_handlers(sbus);
  1053. }
  1054. void sbus_fill_device_irq(struct sbus_dev *sdev)
  1055. {
  1056. struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
  1057. struct linux_prom_irqs *irqs;
  1058. irqs = of_get_property(dp, "interrupts", NULL);
  1059. if (!irqs) {
  1060. sdev->irqs[0] = 0;
  1061. sdev->num_irqs = 0;
  1062. } else {
  1063. unsigned int pri = irqs[0].pri;
  1064. sdev->num_irqs = 1;
  1065. if (pri < 0x20)
  1066. pri += sdev->slot * 8;
  1067. sdev->irqs[0] = sbus_build_irq(sdev->bus, pri);
  1068. }
  1069. }
  1070. void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
  1071. {
  1072. }
  1073. void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
  1074. {
  1075. sbus_iommu_init(dp->node, sbus);
  1076. }
  1077. void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
  1078. {
  1079. }
  1080. int __init sbus_arch_preinit(void)
  1081. {
  1082. return 0;
  1083. }
  1084. void __init sbus_arch_postinit(void)
  1085. {
  1086. extern void firetruck_init(void);
  1087. firetruck_init();
  1088. }