sbus.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266
  1. /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
  2. * sbus.c: UltraSparc SBUS controller support.
  3. *
  4. * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/mm.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <asm/page.h>
  14. #include <asm/sbus.h>
  15. #include <asm/io.h>
  16. #include <asm/upa.h>
  17. #include <asm/cache.h>
  18. #include <asm/dma.h>
  19. #include <asm/irq.h>
  20. #include <asm/starfire.h>
  21. #include "iommu_common.h"
  22. /* These should be allocated on an SMP_CACHE_BYTES
  23. * aligned boundary for optimal performance.
  24. *
  25. * On SYSIO, using an 8K page size we have 1GB of SBUS
  26. * DMA space mapped. We divide this space into equally
  27. * sized clusters. We allocate a DMA mapping from the
  28. * cluster that matches the order of the allocation, or
  29. * if the order is greater than the number of clusters,
  30. * we try to allocate from the last cluster.
  31. */
  32. #define NCLUSTERS 8UL
  33. #define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
  34. #define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
  35. #define CLUSTER_MASK (CLUSTER_SIZE - 1)
  36. #define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
  37. #define MAP_BASE ((u32)0xc0000000)
  38. struct sbus_iommu {
  39. /*0x00*/spinlock_t lock;
  40. /*0x08*/iopte_t *page_table;
  41. /*0x10*/unsigned long strbuf_regs;
  42. /*0x18*/unsigned long iommu_regs;
  43. /*0x20*/unsigned long sbus_control_reg;
  44. /*0x28*/volatile unsigned long strbuf_flushflag;
  45. /* If NCLUSTERS is ever decresed to 4 or lower,
  46. * you must increase the size of the type of
  47. * these counters. You have been duly warned. -DaveM
  48. */
  49. /*0x30*/struct {
  50. u16 next;
  51. u16 flush;
  52. } alloc_info[NCLUSTERS];
  53. /* The lowest used consistent mapping entry. Since
  54. * we allocate consistent maps out of cluster 0 this
  55. * is relative to the beginning of closter 0.
  56. */
  57. /*0x50*/u32 lowest_consistent_map;
  58. };
  59. /* Offsets from iommu_regs */
  60. #define SYSIO_IOMMUREG_BASE 0x2400UL
  61. #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
  62. #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
  63. #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
  64. #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
  65. #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
  66. #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
  67. #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
  68. #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
  69. #define IOMMU_DRAM_VALID (1UL << 30UL)
  70. static void __iommu_flushall(struct sbus_iommu *iommu)
  71. {
  72. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  73. int entry;
  74. for (entry = 0; entry < 16; entry++) {
  75. upa_writeq(0, tag);
  76. tag += 8UL;
  77. }
  78. upa_readq(iommu->sbus_control_reg);
  79. for (entry = 0; entry < NCLUSTERS; entry++) {
  80. iommu->alloc_info[entry].flush =
  81. iommu->alloc_info[entry].next;
  82. }
  83. }
  84. static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  85. {
  86. while (npages--)
  87. upa_writeq(base + (npages << IO_PAGE_SHIFT),
  88. iommu->iommu_regs + IOMMU_FLUSH);
  89. upa_readq(iommu->sbus_control_reg);
  90. }
  91. /* Offsets from strbuf_regs */
  92. #define SYSIO_STRBUFREG_BASE 0x2800UL
  93. #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
  94. #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
  95. #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
  96. #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
  97. #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
  98. #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
  99. #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
  100. #define STRBUF_TAG_VALID 0x02UL
  101. static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
  102. {
  103. unsigned long n;
  104. int limit;
  105. n = npages;
  106. while (n--)
  107. upa_writeq(base + (n << IO_PAGE_SHIFT),
  108. iommu->strbuf_regs + STRBUF_PFLUSH);
  109. /* If the device could not have possibly put dirty data into
  110. * the streaming cache, no flush-flag synchronization needs
  111. * to be performed.
  112. */
  113. if (direction == SBUS_DMA_TODEVICE)
  114. return;
  115. iommu->strbuf_flushflag = 0UL;
  116. /* Whoopee cushion! */
  117. upa_writeq(__pa(&iommu->strbuf_flushflag),
  118. iommu->strbuf_regs + STRBUF_FSYNC);
  119. upa_readq(iommu->sbus_control_reg);
  120. limit = 100000;
  121. while (iommu->strbuf_flushflag == 0UL) {
  122. limit--;
  123. if (!limit)
  124. break;
  125. udelay(1);
  126. rmb();
  127. }
  128. if (!limit)
  129. printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
  130. "vaddr[%08x] npages[%ld]\n",
  131. base, npages);
  132. }
  133. static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
  134. {
  135. iopte_t *iopte, *limit, *first, *cluster;
  136. unsigned long cnum, ent, nent, flush_point, found;
  137. cnum = 0;
  138. nent = 1;
  139. while ((1UL << cnum) < npages)
  140. cnum++;
  141. if(cnum >= NCLUSTERS) {
  142. nent = 1UL << (cnum - NCLUSTERS);
  143. cnum = NCLUSTERS - 1;
  144. }
  145. iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
  146. if (cnum == 0)
  147. limit = (iommu->page_table +
  148. iommu->lowest_consistent_map);
  149. else
  150. limit = (iopte + CLUSTER_NPAGES);
  151. iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
  152. flush_point = iommu->alloc_info[cnum].flush;
  153. first = iopte;
  154. cluster = NULL;
  155. found = 0;
  156. for (;;) {
  157. if (iopte_val(*iopte) == 0UL) {
  158. found++;
  159. if (!cluster)
  160. cluster = iopte;
  161. } else {
  162. /* Used cluster in the way */
  163. cluster = NULL;
  164. found = 0;
  165. }
  166. if (found == nent)
  167. break;
  168. iopte += (1 << cnum);
  169. ent++;
  170. if (iopte >= limit) {
  171. iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
  172. ent = 0;
  173. /* Multiple cluster allocations must not wrap */
  174. cluster = NULL;
  175. found = 0;
  176. }
  177. if (ent == flush_point)
  178. __iommu_flushall(iommu);
  179. if (iopte == first)
  180. goto bad;
  181. }
  182. /* ent/iopte points to the last cluster entry we're going to use,
  183. * so save our place for the next allocation.
  184. */
  185. if ((iopte + (1 << cnum)) >= limit)
  186. ent = 0;
  187. else
  188. ent = ent + 1;
  189. iommu->alloc_info[cnum].next = ent;
  190. if (ent == flush_point)
  191. __iommu_flushall(iommu);
  192. /* I've got your streaming cluster right here buddy boy... */
  193. return cluster;
  194. bad:
  195. printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
  196. npages);
  197. return NULL;
  198. }
  199. static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  200. {
  201. unsigned long cnum, ent, nent;
  202. iopte_t *iopte;
  203. cnum = 0;
  204. nent = 1;
  205. while ((1UL << cnum) < npages)
  206. cnum++;
  207. if(cnum >= NCLUSTERS) {
  208. nent = 1UL << (cnum - NCLUSTERS);
  209. cnum = NCLUSTERS - 1;
  210. }
  211. ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
  212. iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
  213. do {
  214. iopte_val(*iopte) = 0UL;
  215. iopte += 1 << cnum;
  216. } while(--nent);
  217. /* If the global flush might not have caught this entry,
  218. * adjust the flush point such that we will flush before
  219. * ever trying to reuse it.
  220. */
  221. #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
  222. if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
  223. iommu->alloc_info[cnum].flush = ent;
  224. #undef between
  225. }
  226. /* We allocate consistent mappings from the end of cluster zero. */
  227. static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
  228. {
  229. iopte_t *iopte;
  230. iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
  231. while (iopte > iommu->page_table) {
  232. iopte--;
  233. if (!(iopte_val(*iopte) & IOPTE_VALID)) {
  234. unsigned long tmp = npages;
  235. while (--tmp) {
  236. iopte--;
  237. if (iopte_val(*iopte) & IOPTE_VALID)
  238. break;
  239. }
  240. if (tmp == 0) {
  241. u32 entry = (iopte - iommu->page_table);
  242. if (entry < iommu->lowest_consistent_map)
  243. iommu->lowest_consistent_map = entry;
  244. return iopte;
  245. }
  246. }
  247. }
  248. return NULL;
  249. }
  250. static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
  251. {
  252. iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
  253. if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
  254. iopte_t *walk = iopte + npages;
  255. iopte_t *limit;
  256. limit = iommu->page_table + CLUSTER_NPAGES;
  257. while (walk < limit) {
  258. if (iopte_val(*walk) != 0UL)
  259. break;
  260. walk++;
  261. }
  262. iommu->lowest_consistent_map =
  263. (walk - iommu->page_table);
  264. }
  265. while (npages--)
  266. *iopte++ = __iopte(0UL);
  267. }
  268. void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
  269. {
  270. unsigned long order, first_page, flags;
  271. struct sbus_iommu *iommu;
  272. iopte_t *iopte;
  273. void *ret;
  274. int npages;
  275. if (size <= 0 || sdev == NULL || dvma_addr == NULL)
  276. return NULL;
  277. size = IO_PAGE_ALIGN(size);
  278. order = get_order(size);
  279. if (order >= 10)
  280. return NULL;
  281. first_page = __get_free_pages(GFP_KERNEL, order);
  282. if (first_page == 0UL)
  283. return NULL;
  284. memset((char *)first_page, 0, PAGE_SIZE << order);
  285. iommu = sdev->bus->iommu;
  286. spin_lock_irqsave(&iommu->lock, flags);
  287. iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
  288. if (iopte == NULL) {
  289. spin_unlock_irqrestore(&iommu->lock, flags);
  290. free_pages(first_page, order);
  291. return NULL;
  292. }
  293. /* Ok, we're committed at this point. */
  294. *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  295. ret = (void *) first_page;
  296. npages = size >> IO_PAGE_SHIFT;
  297. while (npages--) {
  298. *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
  299. (__pa(first_page) & IOPTE_PAGE));
  300. first_page += IO_PAGE_SIZE;
  301. }
  302. iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
  303. spin_unlock_irqrestore(&iommu->lock, flags);
  304. return ret;
  305. }
  306. void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
  307. {
  308. unsigned long order, npages;
  309. struct sbus_iommu *iommu;
  310. if (size <= 0 || sdev == NULL || cpu == NULL)
  311. return;
  312. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  313. iommu = sdev->bus->iommu;
  314. spin_lock_irq(&iommu->lock);
  315. free_consistent_cluster(iommu, dvma, npages);
  316. iommu_flush(iommu, dvma, npages);
  317. spin_unlock_irq(&iommu->lock);
  318. order = get_order(size);
  319. if (order < 10)
  320. free_pages((unsigned long)cpu, order);
  321. }
  322. dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
  323. {
  324. struct sbus_iommu *iommu = sdev->bus->iommu;
  325. unsigned long npages, pbase, flags;
  326. iopte_t *iopte;
  327. u32 dma_base, offset;
  328. unsigned long iopte_bits;
  329. if (dir == SBUS_DMA_NONE)
  330. BUG();
  331. pbase = (unsigned long) ptr;
  332. offset = (u32) (pbase & ~IO_PAGE_MASK);
  333. size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
  334. pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
  335. spin_lock_irqsave(&iommu->lock, flags);
  336. npages = size >> IO_PAGE_SHIFT;
  337. iopte = alloc_streaming_cluster(iommu, npages);
  338. if (iopte == NULL)
  339. goto bad;
  340. dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  341. npages = size >> IO_PAGE_SHIFT;
  342. iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  343. if (dir != SBUS_DMA_TODEVICE)
  344. iopte_bits |= IOPTE_WRITE;
  345. while (npages--) {
  346. *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
  347. pbase += IO_PAGE_SIZE;
  348. }
  349. npages = size >> IO_PAGE_SHIFT;
  350. spin_unlock_irqrestore(&iommu->lock, flags);
  351. return (dma_base | offset);
  352. bad:
  353. spin_unlock_irqrestore(&iommu->lock, flags);
  354. BUG();
  355. return 0;
  356. }
  357. void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
  358. {
  359. struct sbus_iommu *iommu = sdev->bus->iommu;
  360. u32 dma_base = dma_addr & IO_PAGE_MASK;
  361. unsigned long flags;
  362. size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
  363. spin_lock_irqsave(&iommu->lock, flags);
  364. free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
  365. sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction);
  366. spin_unlock_irqrestore(&iommu->lock, flags);
  367. }
  368. #define SG_ENT_PHYS_ADDRESS(SG) \
  369. (__pa(page_address((SG)->page)) + (SG)->offset)
  370. static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
  371. {
  372. struct scatterlist *dma_sg = sg;
  373. struct scatterlist *sg_end = sg + nelems;
  374. int i;
  375. for (i = 0; i < nused; i++) {
  376. unsigned long pteval = ~0UL;
  377. u32 dma_npages;
  378. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  379. dma_sg->dma_length +
  380. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  381. do {
  382. unsigned long offset;
  383. signed int len;
  384. /* If we are here, we know we have at least one
  385. * more page to map. So walk forward until we
  386. * hit a page crossing, and begin creating new
  387. * mappings from that spot.
  388. */
  389. for (;;) {
  390. unsigned long tmp;
  391. tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
  392. len = sg->length;
  393. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  394. pteval = tmp & IO_PAGE_MASK;
  395. offset = tmp & (IO_PAGE_SIZE - 1UL);
  396. break;
  397. }
  398. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  399. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  400. offset = 0UL;
  401. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  402. break;
  403. }
  404. sg++;
  405. }
  406. pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
  407. while (len > 0) {
  408. *iopte++ = __iopte(pteval);
  409. pteval += IO_PAGE_SIZE;
  410. len -= (IO_PAGE_SIZE - offset);
  411. offset = 0;
  412. dma_npages--;
  413. }
  414. pteval = (pteval & IOPTE_PAGE) + len;
  415. sg++;
  416. /* Skip over any tail mappings we've fully mapped,
  417. * adjusting pteval along the way. Stop when we
  418. * detect a page crossing event.
  419. */
  420. while (sg < sg_end &&
  421. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  422. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  423. ((pteval ^
  424. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  425. pteval += sg->length;
  426. sg++;
  427. }
  428. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  429. pteval = ~0UL;
  430. } while (dma_npages != 0);
  431. dma_sg++;
  432. }
  433. }
  434. int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
  435. {
  436. struct sbus_iommu *iommu = sdev->bus->iommu;
  437. unsigned long flags, npages;
  438. iopte_t *iopte;
  439. u32 dma_base;
  440. struct scatterlist *sgtmp;
  441. int used;
  442. unsigned long iopte_bits;
  443. if (dir == SBUS_DMA_NONE)
  444. BUG();
  445. /* Fast path single entry scatterlists. */
  446. if (nents == 1) {
  447. sg->dma_address =
  448. sbus_map_single(sdev,
  449. (page_address(sg->page) + sg->offset),
  450. sg->length, dir);
  451. sg->dma_length = sg->length;
  452. return 1;
  453. }
  454. npages = prepare_sg(sg, nents);
  455. spin_lock_irqsave(&iommu->lock, flags);
  456. iopte = alloc_streaming_cluster(iommu, npages);
  457. if (iopte == NULL)
  458. goto bad;
  459. dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
  460. /* Normalize DVMA addresses. */
  461. sgtmp = sg;
  462. used = nents;
  463. while (used && sgtmp->dma_length) {
  464. sgtmp->dma_address += dma_base;
  465. sgtmp++;
  466. used--;
  467. }
  468. used = nents - used;
  469. iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  470. if (dir != SBUS_DMA_TODEVICE)
  471. iopte_bits |= IOPTE_WRITE;
  472. fill_sg(iopte, sg, used, nents, iopte_bits);
  473. #ifdef VERIFY_SG
  474. verify_sglist(sg, nents, iopte, npages);
  475. #endif
  476. spin_unlock_irqrestore(&iommu->lock, flags);
  477. return used;
  478. bad:
  479. spin_unlock_irqrestore(&iommu->lock, flags);
  480. BUG();
  481. return 0;
  482. }
  483. void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  484. {
  485. unsigned long size, flags;
  486. struct sbus_iommu *iommu;
  487. u32 dvma_base;
  488. int i;
  489. /* Fast path single entry scatterlists. */
  490. if (nents == 1) {
  491. sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
  492. return;
  493. }
  494. dvma_base = sg[0].dma_address & IO_PAGE_MASK;
  495. for (i = 0; i < nents; i++) {
  496. if (sg[i].dma_length == 0)
  497. break;
  498. }
  499. i--;
  500. size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
  501. iommu = sdev->bus->iommu;
  502. spin_lock_irqsave(&iommu->lock, flags);
  503. free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
  504. sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction);
  505. spin_unlock_irqrestore(&iommu->lock, flags);
  506. }
  507. void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  508. {
  509. struct sbus_iommu *iommu = sdev->bus->iommu;
  510. unsigned long flags;
  511. size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
  512. spin_lock_irqsave(&iommu->lock, flags);
  513. sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction);
  514. spin_unlock_irqrestore(&iommu->lock, flags);
  515. }
  516. void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  517. {
  518. }
  519. void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  520. {
  521. struct sbus_iommu *iommu = sdev->bus->iommu;
  522. unsigned long flags, size;
  523. u32 base;
  524. int i;
  525. base = sg[0].dma_address & IO_PAGE_MASK;
  526. for (i = 0; i < nents; i++) {
  527. if (sg[i].dma_length == 0)
  528. break;
  529. }
  530. i--;
  531. size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
  532. spin_lock_irqsave(&iommu->lock, flags);
  533. sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction);
  534. spin_unlock_irqrestore(&iommu->lock, flags);
  535. }
  536. void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  537. {
  538. }
  539. /* Enable 64-bit DVMA mode for the given device. */
  540. void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
  541. {
  542. struct sbus_iommu *iommu = sdev->bus->iommu;
  543. int slot = sdev->slot;
  544. unsigned long cfg_reg;
  545. u64 val;
  546. cfg_reg = iommu->sbus_control_reg;
  547. switch (slot) {
  548. case 0:
  549. cfg_reg += 0x20UL;
  550. break;
  551. case 1:
  552. cfg_reg += 0x28UL;
  553. break;
  554. case 2:
  555. cfg_reg += 0x30UL;
  556. break;
  557. case 3:
  558. cfg_reg += 0x38UL;
  559. break;
  560. case 13:
  561. cfg_reg += 0x40UL;
  562. break;
  563. case 14:
  564. cfg_reg += 0x48UL;
  565. break;
  566. case 15:
  567. cfg_reg += 0x50UL;
  568. break;
  569. default:
  570. return;
  571. };
  572. val = upa_readq(cfg_reg);
  573. if (val & (1UL << 14UL)) {
  574. /* Extended transfer mode already enabled. */
  575. return;
  576. }
  577. val |= (1UL << 14UL);
  578. if (bursts & DMA_BURST8)
  579. val |= (1UL << 1UL);
  580. if (bursts & DMA_BURST16)
  581. val |= (1UL << 2UL);
  582. if (bursts & DMA_BURST32)
  583. val |= (1UL << 3UL);
  584. if (bursts & DMA_BURST64)
  585. val |= (1UL << 4UL);
  586. upa_writeq(val, cfg_reg);
  587. }
  588. /* SBUS SYSIO INO number to Sparc PIL level. */
  589. static unsigned char sysio_ino_to_pil[] = {
  590. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
  591. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
  592. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
  593. 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
  594. 4, /* Onboard SCSI */
  595. 5, /* Onboard Ethernet */
  596. /*XXX*/ 8, /* Onboard BPP */
  597. 0, /* Bogon */
  598. 13, /* Audio */
  599. /*XXX*/15, /* PowerFail */
  600. 0, /* Bogon */
  601. 0, /* Bogon */
  602. 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
  603. 11, /* Floppy */
  604. 0, /* Spare Hardware (bogon for now) */
  605. 0, /* Keyboard (bogon for now) */
  606. 0, /* Mouse (bogon for now) */
  607. 0, /* Serial (bogon for now) */
  608. 0, 0, /* Bogon, Bogon */
  609. 10, /* Timer 0 */
  610. 11, /* Timer 1 */
  611. 0, 0, /* Bogon, Bogon */
  612. 15, /* Uncorrectable SBUS Error */
  613. 15, /* Correctable SBUS Error */
  614. 15, /* SBUS Error */
  615. /*XXX*/ 0, /* Power Management (bogon for now) */
  616. };
  617. /* INO number to IMAP register offset for SYSIO external IRQ's.
  618. * This should conform to both Sunfire/Wildfire server and Fusion
  619. * desktop designs.
  620. */
  621. #define SYSIO_IMAP_SLOT0 0x2c04UL
  622. #define SYSIO_IMAP_SLOT1 0x2c0cUL
  623. #define SYSIO_IMAP_SLOT2 0x2c14UL
  624. #define SYSIO_IMAP_SLOT3 0x2c1cUL
  625. #define SYSIO_IMAP_SCSI 0x3004UL
  626. #define SYSIO_IMAP_ETH 0x300cUL
  627. #define SYSIO_IMAP_BPP 0x3014UL
  628. #define SYSIO_IMAP_AUDIO 0x301cUL
  629. #define SYSIO_IMAP_PFAIL 0x3024UL
  630. #define SYSIO_IMAP_KMS 0x302cUL
  631. #define SYSIO_IMAP_FLPY 0x3034UL
  632. #define SYSIO_IMAP_SHW 0x303cUL
  633. #define SYSIO_IMAP_KBD 0x3044UL
  634. #define SYSIO_IMAP_MS 0x304cUL
  635. #define SYSIO_IMAP_SER 0x3054UL
  636. #define SYSIO_IMAP_TIM0 0x3064UL
  637. #define SYSIO_IMAP_TIM1 0x306cUL
  638. #define SYSIO_IMAP_UE 0x3074UL
  639. #define SYSIO_IMAP_CE 0x307cUL
  640. #define SYSIO_IMAP_SBERR 0x3084UL
  641. #define SYSIO_IMAP_PMGMT 0x308cUL
  642. #define SYSIO_IMAP_GFX 0x3094UL
  643. #define SYSIO_IMAP_EUPA 0x309cUL
  644. #define bogon ((unsigned long) -1)
  645. static unsigned long sysio_irq_offsets[] = {
  646. /* SBUS Slot 0 --> 3, level 1 --> 7 */
  647. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  648. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  649. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  650. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  651. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  652. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  653. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  654. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  655. /* Onboard devices (not relevant/used on SunFire). */
  656. SYSIO_IMAP_SCSI,
  657. SYSIO_IMAP_ETH,
  658. SYSIO_IMAP_BPP,
  659. bogon,
  660. SYSIO_IMAP_AUDIO,
  661. SYSIO_IMAP_PFAIL,
  662. bogon,
  663. bogon,
  664. SYSIO_IMAP_KMS,
  665. SYSIO_IMAP_FLPY,
  666. SYSIO_IMAP_SHW,
  667. SYSIO_IMAP_KBD,
  668. SYSIO_IMAP_MS,
  669. SYSIO_IMAP_SER,
  670. bogon,
  671. bogon,
  672. SYSIO_IMAP_TIM0,
  673. SYSIO_IMAP_TIM1,
  674. bogon,
  675. bogon,
  676. SYSIO_IMAP_UE,
  677. SYSIO_IMAP_CE,
  678. SYSIO_IMAP_SBERR,
  679. SYSIO_IMAP_PMGMT,
  680. };
  681. #undef bogon
  682. #define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
  683. /* Convert Interrupt Mapping register pointer to associated
  684. * Interrupt Clear register pointer, SYSIO specific version.
  685. */
  686. #define SYSIO_ICLR_UNUSED0 0x3400UL
  687. #define SYSIO_ICLR_SLOT0 0x340cUL
  688. #define SYSIO_ICLR_SLOT1 0x344cUL
  689. #define SYSIO_ICLR_SLOT2 0x348cUL
  690. #define SYSIO_ICLR_SLOT3 0x34ccUL
  691. static unsigned long sysio_imap_to_iclr(unsigned long imap)
  692. {
  693. unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
  694. return imap + diff;
  695. }
  696. unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
  697. {
  698. struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
  699. struct sbus_iommu *iommu = sbus->iommu;
  700. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  701. unsigned long imap, iclr;
  702. int pil, sbus_level = 0;
  703. pil = sysio_ino_to_pil[ino];
  704. if (!pil) {
  705. printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
  706. panic("Bad SYSIO IRQ translations...");
  707. }
  708. if (PIL_RESERVED(pil))
  709. BUG();
  710. imap = sysio_irq_offsets[ino];
  711. if (imap == ((unsigned long)-1)) {
  712. prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
  713. ino, pil);
  714. prom_halt();
  715. }
  716. imap += reg_base;
  717. /* SYSIO inconsistency. For external SLOTS, we have to select
  718. * the right ICLR register based upon the lower SBUS irq level
  719. * bits.
  720. */
  721. if (ino >= 0x20) {
  722. iclr = sysio_imap_to_iclr(imap);
  723. } else {
  724. int sbus_slot = (ino & 0x18)>>3;
  725. sbus_level = ino & 0x7;
  726. switch(sbus_slot) {
  727. case 0:
  728. iclr = reg_base + SYSIO_ICLR_SLOT0;
  729. break;
  730. case 1:
  731. iclr = reg_base + SYSIO_ICLR_SLOT1;
  732. break;
  733. case 2:
  734. iclr = reg_base + SYSIO_ICLR_SLOT2;
  735. break;
  736. default:
  737. case 3:
  738. iclr = reg_base + SYSIO_ICLR_SLOT3;
  739. break;
  740. };
  741. iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
  742. }
  743. return build_irq(pil, sbus_level, iclr, imap);
  744. }
  745. /* Error interrupt handling. */
  746. #define SYSIO_UE_AFSR 0x0030UL
  747. #define SYSIO_UE_AFAR 0x0038UL
  748. #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  749. #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  750. #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  751. #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
  752. #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  753. #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  754. #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  755. #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
  756. #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  757. #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  758. #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  759. static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
  760. {
  761. struct sbus_bus *sbus = dev_id;
  762. struct sbus_iommu *iommu = sbus->iommu;
  763. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  764. unsigned long afsr_reg, afar_reg;
  765. unsigned long afsr, afar, error_bits;
  766. int reported;
  767. afsr_reg = reg_base + SYSIO_UE_AFSR;
  768. afar_reg = reg_base + SYSIO_UE_AFAR;
  769. /* Latch error status. */
  770. afsr = upa_readq(afsr_reg);
  771. afar = upa_readq(afar_reg);
  772. /* Clear primary/secondary error status bits. */
  773. error_bits = afsr &
  774. (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
  775. SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
  776. upa_writeq(error_bits, afsr_reg);
  777. /* Log the error. */
  778. printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
  779. sbus->portid,
  780. (((error_bits & SYSIO_UEAFSR_PPIO) ?
  781. "PIO" :
  782. ((error_bits & SYSIO_UEAFSR_PDRD) ?
  783. "DVMA Read" :
  784. ((error_bits & SYSIO_UEAFSR_PDWR) ?
  785. "DVMA Write" : "???")))));
  786. printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
  787. sbus->portid,
  788. (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
  789. (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
  790. (afsr & SYSIO_UEAFSR_MID) >> 37UL);
  791. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  792. printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
  793. reported = 0;
  794. if (afsr & SYSIO_UEAFSR_SPIO) {
  795. reported++;
  796. printk("(PIO)");
  797. }
  798. if (afsr & SYSIO_UEAFSR_SDRD) {
  799. reported++;
  800. printk("(DVMA Read)");
  801. }
  802. if (afsr & SYSIO_UEAFSR_SDWR) {
  803. reported++;
  804. printk("(DVMA Write)");
  805. }
  806. if (!reported)
  807. printk("(none)");
  808. printk("]\n");
  809. return IRQ_HANDLED;
  810. }
  811. #define SYSIO_CE_AFSR 0x0040UL
  812. #define SYSIO_CE_AFAR 0x0048UL
  813. #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  814. #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  815. #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  816. #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
  817. #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  818. #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  819. #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
  820. #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
  821. #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
  822. #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  823. #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  824. #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  825. static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
  826. {
  827. struct sbus_bus *sbus = dev_id;
  828. struct sbus_iommu *iommu = sbus->iommu;
  829. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  830. unsigned long afsr_reg, afar_reg;
  831. unsigned long afsr, afar, error_bits;
  832. int reported;
  833. afsr_reg = reg_base + SYSIO_CE_AFSR;
  834. afar_reg = reg_base + SYSIO_CE_AFAR;
  835. /* Latch error status. */
  836. afsr = upa_readq(afsr_reg);
  837. afar = upa_readq(afar_reg);
  838. /* Clear primary/secondary error status bits. */
  839. error_bits = afsr &
  840. (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
  841. SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
  842. upa_writeq(error_bits, afsr_reg);
  843. printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
  844. sbus->portid,
  845. (((error_bits & SYSIO_CEAFSR_PPIO) ?
  846. "PIO" :
  847. ((error_bits & SYSIO_CEAFSR_PDRD) ?
  848. "DVMA Read" :
  849. ((error_bits & SYSIO_CEAFSR_PDWR) ?
  850. "DVMA Write" : "???")))));
  851. /* XXX Use syndrome and afar to print out module string just like
  852. * XXX UDB CE trap handler does... -DaveM
  853. */
  854. printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
  855. sbus->portid,
  856. (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
  857. (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
  858. (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
  859. (afsr & SYSIO_CEAFSR_MID) >> 37UL);
  860. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  861. printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
  862. reported = 0;
  863. if (afsr & SYSIO_CEAFSR_SPIO) {
  864. reported++;
  865. printk("(PIO)");
  866. }
  867. if (afsr & SYSIO_CEAFSR_SDRD) {
  868. reported++;
  869. printk("(DVMA Read)");
  870. }
  871. if (afsr & SYSIO_CEAFSR_SDWR) {
  872. reported++;
  873. printk("(DVMA Write)");
  874. }
  875. if (!reported)
  876. printk("(none)");
  877. printk("]\n");
  878. return IRQ_HANDLED;
  879. }
  880. #define SYSIO_SBUS_AFSR 0x2010UL
  881. #define SYSIO_SBUS_AFAR 0x2018UL
  882. #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
  883. #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
  884. #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
  885. #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
  886. #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
  887. #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
  888. #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  889. #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
  890. #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
  891. #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
  892. #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
  893. #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
  894. static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
  895. {
  896. struct sbus_bus *sbus = dev_id;
  897. struct sbus_iommu *iommu = sbus->iommu;
  898. unsigned long afsr_reg, afar_reg, reg_base;
  899. unsigned long afsr, afar, error_bits;
  900. int reported;
  901. reg_base = iommu->sbus_control_reg - 0x2000UL;
  902. afsr_reg = reg_base + SYSIO_SBUS_AFSR;
  903. afar_reg = reg_base + SYSIO_SBUS_AFAR;
  904. afsr = upa_readq(afsr_reg);
  905. afar = upa_readq(afar_reg);
  906. /* Clear primary/secondary error status bits. */
  907. error_bits = afsr &
  908. (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
  909. SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
  910. upa_writeq(error_bits, afsr_reg);
  911. /* Log the error. */
  912. printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
  913. sbus->portid,
  914. (((error_bits & SYSIO_SBAFSR_PLE) ?
  915. "Late PIO Error" :
  916. ((error_bits & SYSIO_SBAFSR_PTO) ?
  917. "Time Out" :
  918. ((error_bits & SYSIO_SBAFSR_PBERR) ?
  919. "Error Ack" : "???")))),
  920. (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
  921. printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
  922. sbus->portid,
  923. (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
  924. (afsr & SYSIO_SBAFSR_MID) >> 37UL);
  925. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  926. printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
  927. reported = 0;
  928. if (afsr & SYSIO_SBAFSR_SLE) {
  929. reported++;
  930. printk("(Late PIO Error)");
  931. }
  932. if (afsr & SYSIO_SBAFSR_STO) {
  933. reported++;
  934. printk("(Time Out)");
  935. }
  936. if (afsr & SYSIO_SBAFSR_SBERR) {
  937. reported++;
  938. printk("(Error Ack)");
  939. }
  940. if (!reported)
  941. printk("(none)");
  942. printk("]\n");
  943. /* XXX check iommu/strbuf for further error status XXX */
  944. return IRQ_HANDLED;
  945. }
  946. #define ECC_CONTROL 0x0020UL
  947. #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
  948. #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
  949. #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
  950. #define SYSIO_UE_INO 0x34
  951. #define SYSIO_CE_INO 0x35
  952. #define SYSIO_SBUSERR_INO 0x36
  953. static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
  954. {
  955. struct sbus_iommu *iommu = sbus->iommu;
  956. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  957. unsigned int irq;
  958. u64 control;
  959. irq = sbus_build_irq(sbus, SYSIO_UE_INO);
  960. if (request_irq(irq, sysio_ue_handler,
  961. SA_SHIRQ, "SYSIO UE", sbus) < 0) {
  962. prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
  963. sbus->portid);
  964. prom_halt();
  965. }
  966. irq = sbus_build_irq(sbus, SYSIO_CE_INO);
  967. if (request_irq(irq, sysio_ce_handler,
  968. SA_SHIRQ, "SYSIO CE", sbus) < 0) {
  969. prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
  970. sbus->portid);
  971. prom_halt();
  972. }
  973. irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
  974. if (request_irq(irq, sysio_sbus_error_handler,
  975. SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
  976. prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
  977. sbus->portid);
  978. prom_halt();
  979. }
  980. /* Now turn the error interrupts on and also enable ECC checking. */
  981. upa_writeq((SYSIO_ECNTRL_ECCEN |
  982. SYSIO_ECNTRL_UEEN |
  983. SYSIO_ECNTRL_CEEN),
  984. reg_base + ECC_CONTROL);
  985. control = upa_readq(iommu->sbus_control_reg);
  986. control |= 0x100UL; /* SBUS Error Interrupt Enable */
  987. upa_writeq(control, iommu->sbus_control_reg);
  988. }
  989. /* Boot time initialization. */
  990. void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
  991. {
  992. struct linux_prom64_registers rprop;
  993. struct sbus_iommu *iommu;
  994. unsigned long regs, tsb_base;
  995. u64 control;
  996. int err, i;
  997. sbus->portid = prom_getintdefault(sbus->prom_node,
  998. "upa-portid", -1);
  999. err = prom_getproperty(prom_node, "reg",
  1000. (char *)&rprop, sizeof(rprop));
  1001. if (err < 0) {
  1002. prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
  1003. prom_halt();
  1004. }
  1005. regs = rprop.phys_addr;
  1006. iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
  1007. if (iommu == NULL) {
  1008. prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
  1009. prom_halt();
  1010. }
  1011. /* Align on E$ line boundary. */
  1012. iommu = (struct sbus_iommu *)
  1013. (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
  1014. ~(SMP_CACHE_BYTES - 1UL));
  1015. memset(iommu, 0, sizeof(*iommu));
  1016. /* We start with no consistent mappings. */
  1017. iommu->lowest_consistent_map = CLUSTER_NPAGES;
  1018. for (i = 0; i < NCLUSTERS; i++) {
  1019. iommu->alloc_info[i].flush = 0;
  1020. iommu->alloc_info[i].next = 0;
  1021. }
  1022. /* Setup spinlock. */
  1023. spin_lock_init(&iommu->lock);
  1024. /* Init register offsets. */
  1025. iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
  1026. iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
  1027. /* The SYSIO SBUS control register is used for dummy reads
  1028. * in order to ensure write completion.
  1029. */
  1030. iommu->sbus_control_reg = regs + 0x2000UL;
  1031. /* Link into SYSIO software state. */
  1032. sbus->iommu = iommu;
  1033. printk("SYSIO: UPA portID %x, at %016lx\n",
  1034. sbus->portid, regs);
  1035. /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
  1036. control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
  1037. control = ((7UL << 16UL) |
  1038. (0UL << 2UL) |
  1039. (1UL << 1UL) |
  1040. (1UL << 0UL));
  1041. /* Using the above configuration we need 1MB iommu page
  1042. * table (128K ioptes * 8 bytes per iopte). This is
  1043. * page order 7 on UltraSparc.
  1044. */
  1045. tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
  1046. if (tsb_base == 0UL) {
  1047. prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
  1048. prom_halt();
  1049. }
  1050. iommu->page_table = (iopte_t *) tsb_base;
  1051. memset(iommu->page_table, 0, IO_TSB_SIZE);
  1052. upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
  1053. /* Clean out any cruft in the IOMMU using
  1054. * diagnostic accesses.
  1055. */
  1056. for (i = 0; i < 16; i++) {
  1057. unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
  1058. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  1059. dram += (unsigned long)i * 8UL;
  1060. tag += (unsigned long)i * 8UL;
  1061. upa_writeq(0, dram);
  1062. upa_writeq(0, tag);
  1063. }
  1064. upa_readq(iommu->sbus_control_reg);
  1065. /* Give the TSB to SYSIO. */
  1066. upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
  1067. /* Setup streaming buffer, DE=1 SB_EN=1 */
  1068. control = (1UL << 1UL) | (1UL << 0UL);
  1069. upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
  1070. /* Clear out the tags using diagnostics. */
  1071. for (i = 0; i < 16; i++) {
  1072. unsigned long ptag, ltag;
  1073. ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
  1074. ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
  1075. ptag += (unsigned long)i * 8UL;
  1076. ltag += (unsigned long)i * 8UL;
  1077. upa_writeq(0UL, ptag);
  1078. upa_writeq(0UL, ltag);
  1079. }
  1080. /* Enable DVMA arbitration for all devices/slots. */
  1081. control = upa_readq(iommu->sbus_control_reg);
  1082. control |= 0x3fUL;
  1083. upa_writeq(control, iommu->sbus_control_reg);
  1084. /* Now some Xfire specific grot... */
  1085. if (this_is_starfire)
  1086. sbus->starfire_cookie = starfire_hookup(sbus->portid);
  1087. else
  1088. sbus->starfire_cookie = NULL;
  1089. sysio_register_error_handlers(sbus);
  1090. }