sbus.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
  2. * sbus.c: UltraSparc SBUS controller support.
  3. *
  4. * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/mm.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <asm/page.h>
  14. #include <asm/sbus.h>
  15. #include <asm/io.h>
  16. #include <asm/upa.h>
  17. #include <asm/cache.h>
  18. #include <asm/dma.h>
  19. #include <asm/irq.h>
  20. #include <asm/prom.h>
  21. #include <asm/starfire.h>
  22. #include "iommu_common.h"
  23. #define MAP_BASE ((u32)0xc0000000)
  24. struct sbus_iommu_arena {
  25. unsigned long *map;
  26. unsigned int hint;
  27. unsigned int limit;
  28. };
  29. struct sbus_iommu {
  30. spinlock_t lock;
  31. struct sbus_iommu_arena arena;
  32. iopte_t *page_table;
  33. unsigned long strbuf_regs;
  34. unsigned long iommu_regs;
  35. unsigned long sbus_control_reg;
  36. volatile unsigned long strbuf_flushflag;
  37. };
  38. /* Offsets from iommu_regs */
  39. #define SYSIO_IOMMUREG_BASE 0x2400UL
  40. #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
  41. #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
  42. #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
  43. #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
  44. #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
  45. #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
  46. #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
  47. #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
  48. #define IOMMU_DRAM_VALID (1UL << 30UL)
  49. static void __iommu_flushall(struct sbus_iommu *iommu)
  50. {
  51. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  52. int entry;
  53. for (entry = 0; entry < 16; entry++) {
  54. upa_writeq(0, tag);
  55. tag += 8UL;
  56. }
  57. upa_readq(iommu->sbus_control_reg);
  58. }
  59. /* Offsets from strbuf_regs */
  60. #define SYSIO_STRBUFREG_BASE 0x2800UL
  61. #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
  62. #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
  63. #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
  64. #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
  65. #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
  66. #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
  67. #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
  68. #define STRBUF_TAG_VALID 0x02UL
  69. static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction)
  70. {
  71. unsigned long n;
  72. int limit;
  73. n = npages;
  74. while (n--)
  75. upa_writeq(base + (n << IO_PAGE_SHIFT),
  76. iommu->strbuf_regs + STRBUF_PFLUSH);
  77. /* If the device could not have possibly put dirty data into
  78. * the streaming cache, no flush-flag synchronization needs
  79. * to be performed.
  80. */
  81. if (direction == SBUS_DMA_TODEVICE)
  82. return;
  83. iommu->strbuf_flushflag = 0UL;
  84. /* Whoopee cushion! */
  85. upa_writeq(__pa(&iommu->strbuf_flushflag),
  86. iommu->strbuf_regs + STRBUF_FSYNC);
  87. upa_readq(iommu->sbus_control_reg);
  88. limit = 100000;
  89. while (iommu->strbuf_flushflag == 0UL) {
  90. limit--;
  91. if (!limit)
  92. break;
  93. udelay(1);
  94. rmb();
  95. }
  96. if (!limit)
  97. printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
  98. "vaddr[%08x] npages[%ld]\n",
  99. base, npages);
  100. }
  101. /* Based largely upon the ppc64 iommu allocator. */
  102. static long sbus_arena_alloc(struct sbus_iommu *iommu, unsigned long npages)
  103. {
  104. struct sbus_iommu_arena *arena = &iommu->arena;
  105. unsigned long n, i, start, end, limit;
  106. int pass;
  107. limit = arena->limit;
  108. start = arena->hint;
  109. pass = 0;
  110. again:
  111. n = find_next_zero_bit(arena->map, limit, start);
  112. end = n + npages;
  113. if (unlikely(end >= limit)) {
  114. if (likely(pass < 1)) {
  115. limit = start;
  116. start = 0;
  117. __iommu_flushall(iommu);
  118. pass++;
  119. goto again;
  120. } else {
  121. /* Scanned the whole thing, give up. */
  122. return -1;
  123. }
  124. }
  125. for (i = n; i < end; i++) {
  126. if (test_bit(i, arena->map)) {
  127. start = i + 1;
  128. goto again;
  129. }
  130. }
  131. for (i = n; i < end; i++)
  132. __set_bit(i, arena->map);
  133. arena->hint = end;
  134. return n;
  135. }
  136. static void sbus_arena_free(struct sbus_iommu_arena *arena, unsigned long base, unsigned long npages)
  137. {
  138. unsigned long i;
  139. for (i = base; i < (base + npages); i++)
  140. __clear_bit(i, arena->map);
  141. }
  142. static void sbus_iommu_table_init(struct sbus_iommu *iommu, unsigned int tsbsize)
  143. {
  144. unsigned long tsbbase, order, sz, num_tsb_entries;
  145. num_tsb_entries = tsbsize / sizeof(iopte_t);
  146. /* Setup initial software IOMMU state. */
  147. spin_lock_init(&iommu->lock);
  148. /* Allocate and initialize the free area map. */
  149. sz = num_tsb_entries / 8;
  150. sz = (sz + 7UL) & ~7UL;
  151. iommu->arena.map = kzalloc(sz, GFP_KERNEL);
  152. if (!iommu->arena.map) {
  153. prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
  154. prom_halt();
  155. }
  156. iommu->arena.limit = num_tsb_entries;
  157. /* Now allocate and setup the IOMMU page table itself. */
  158. order = get_order(tsbsize);
  159. tsbbase = __get_free_pages(GFP_KERNEL, order);
  160. if (!tsbbase) {
  161. prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
  162. prom_halt();
  163. }
  164. iommu->page_table = (iopte_t *)tsbbase;
  165. memset(iommu->page_table, 0, tsbsize);
  166. }
  167. static inline iopte_t *alloc_npages(struct sbus_iommu *iommu, unsigned long npages)
  168. {
  169. long entry;
  170. entry = sbus_arena_alloc(iommu, npages);
  171. if (unlikely(entry < 0))
  172. return NULL;
  173. return iommu->page_table + entry;
  174. }
  175. static inline void free_npages(struct sbus_iommu *iommu, dma_addr_t base, unsigned long npages)
  176. {
  177. sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
  178. }
  179. void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
  180. {
  181. struct sbus_iommu *iommu;
  182. iopte_t *iopte;
  183. unsigned long flags, order, first_page;
  184. void *ret;
  185. int npages;
  186. size = IO_PAGE_ALIGN(size);
  187. order = get_order(size);
  188. if (order >= 10)
  189. return NULL;
  190. first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
  191. if (first_page == 0UL)
  192. return NULL;
  193. memset((char *)first_page, 0, PAGE_SIZE << order);
  194. iommu = sdev->bus->iommu;
  195. spin_lock_irqsave(&iommu->lock, flags);
  196. iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
  197. spin_unlock_irqrestore(&iommu->lock, flags);
  198. if (unlikely(iopte == NULL)) {
  199. free_pages(first_page, order);
  200. return NULL;
  201. }
  202. *dvma_addr = (MAP_BASE +
  203. ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
  204. ret = (void *) first_page;
  205. npages = size >> IO_PAGE_SHIFT;
  206. first_page = __pa(first_page);
  207. while (npages--) {
  208. iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
  209. IOPTE_WRITE |
  210. (first_page & IOPTE_PAGE));
  211. iopte++;
  212. first_page += IO_PAGE_SIZE;
  213. }
  214. return ret;
  215. }
  216. void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
  217. {
  218. struct sbus_iommu *iommu;
  219. iopte_t *iopte;
  220. unsigned long flags, order, npages;
  221. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  222. iommu = sdev->bus->iommu;
  223. iopte = iommu->page_table +
  224. ((dvma - MAP_BASE) >> IO_PAGE_SHIFT);
  225. spin_lock_irqsave(&iommu->lock, flags);
  226. free_npages(iommu, dvma - MAP_BASE, npages);
  227. spin_unlock_irqrestore(&iommu->lock, flags);
  228. order = get_order(size);
  229. if (order < 10)
  230. free_pages((unsigned long)cpu, order);
  231. }
  232. dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
  233. {
  234. struct sbus_iommu *iommu;
  235. iopte_t *base;
  236. unsigned long flags, npages, oaddr;
  237. unsigned long i, base_paddr;
  238. u32 bus_addr, ret;
  239. unsigned long iopte_protection;
  240. iommu = sdev->bus->iommu;
  241. if (unlikely(direction == SBUS_DMA_NONE))
  242. BUG();
  243. oaddr = (unsigned long)ptr;
  244. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  245. npages >>= IO_PAGE_SHIFT;
  246. spin_lock_irqsave(&iommu->lock, flags);
  247. base = alloc_npages(iommu, npages);
  248. spin_unlock_irqrestore(&iommu->lock, flags);
  249. if (unlikely(!base))
  250. BUG();
  251. bus_addr = (MAP_BASE +
  252. ((base - iommu->page_table) << IO_PAGE_SHIFT));
  253. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  254. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  255. iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  256. if (direction != SBUS_DMA_TODEVICE)
  257. iopte_protection |= IOPTE_WRITE;
  258. for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
  259. iopte_val(*base) = iopte_protection | base_paddr;
  260. return ret;
  261. }
  262. void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
  263. {
  264. struct sbus_iommu *iommu = sdev->bus->iommu;
  265. iopte_t *base;
  266. unsigned long flags, npages, i;
  267. if (unlikely(direction == SBUS_DMA_NONE))
  268. BUG();
  269. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  270. npages >>= IO_PAGE_SHIFT;
  271. base = iommu->page_table +
  272. ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
  273. bus_addr &= IO_PAGE_MASK;
  274. spin_lock_irqsave(&iommu->lock, flags);
  275. sbus_strbuf_flush(iommu, bus_addr, npages, direction);
  276. for (i = 0; i < npages; i++)
  277. iopte_val(base[i]) = 0UL;
  278. free_npages(iommu, bus_addr - MAP_BASE, npages);
  279. spin_unlock_irqrestore(&iommu->lock, flags);
  280. }
  281. #define SG_ENT_PHYS_ADDRESS(SG) \
  282. (__pa(page_address((SG)->page)) + (SG)->offset)
  283. static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
  284. int nused, int nelems, unsigned long iopte_protection)
  285. {
  286. struct scatterlist *dma_sg = sg;
  287. struct scatterlist *sg_end = sg + nelems;
  288. int i;
  289. for (i = 0; i < nused; i++) {
  290. unsigned long pteval = ~0UL;
  291. u32 dma_npages;
  292. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  293. dma_sg->dma_length +
  294. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  295. do {
  296. unsigned long offset;
  297. signed int len;
  298. /* If we are here, we know we have at least one
  299. * more page to map. So walk forward until we
  300. * hit a page crossing, and begin creating new
  301. * mappings from that spot.
  302. */
  303. for (;;) {
  304. unsigned long tmp;
  305. tmp = SG_ENT_PHYS_ADDRESS(sg);
  306. len = sg->length;
  307. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  308. pteval = tmp & IO_PAGE_MASK;
  309. offset = tmp & (IO_PAGE_SIZE - 1UL);
  310. break;
  311. }
  312. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  313. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  314. offset = 0UL;
  315. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  316. break;
  317. }
  318. sg++;
  319. }
  320. pteval = iopte_protection | (pteval & IOPTE_PAGE);
  321. while (len > 0) {
  322. *iopte++ = __iopte(pteval);
  323. pteval += IO_PAGE_SIZE;
  324. len -= (IO_PAGE_SIZE - offset);
  325. offset = 0;
  326. dma_npages--;
  327. }
  328. pteval = (pteval & IOPTE_PAGE) + len;
  329. sg++;
  330. /* Skip over any tail mappings we've fully mapped,
  331. * adjusting pteval along the way. Stop when we
  332. * detect a page crossing event.
  333. */
  334. while (sg < sg_end &&
  335. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  336. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  337. ((pteval ^
  338. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  339. pteval += sg->length;
  340. sg++;
  341. }
  342. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  343. pteval = ~0UL;
  344. } while (dma_npages != 0);
  345. dma_sg++;
  346. }
  347. }
  348. int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
  349. {
  350. struct sbus_iommu *iommu;
  351. unsigned long flags, npages, iopte_protection;
  352. iopte_t *base;
  353. u32 dma_base;
  354. struct scatterlist *sgtmp;
  355. int used;
  356. /* Fast path single entry scatterlists. */
  357. if (nelems == 1) {
  358. sglist->dma_address =
  359. sbus_map_single(sdev,
  360. (page_address(sglist->page) + sglist->offset),
  361. sglist->length, direction);
  362. sglist->dma_length = sglist->length;
  363. return 1;
  364. }
  365. iommu = sdev->bus->iommu;
  366. if (unlikely(direction == SBUS_DMA_NONE))
  367. BUG();
  368. npages = prepare_sg(sglist, nelems);
  369. spin_lock_irqsave(&iommu->lock, flags);
  370. base = alloc_npages(iommu, npages);
  371. spin_unlock_irqrestore(&iommu->lock, flags);
  372. if (unlikely(base == NULL))
  373. BUG();
  374. dma_base = MAP_BASE +
  375. ((base - iommu->page_table) << IO_PAGE_SHIFT);
  376. /* Normalize DVMA addresses. */
  377. used = nelems;
  378. sgtmp = sglist;
  379. while (used && sgtmp->dma_length) {
  380. sgtmp->dma_address += dma_base;
  381. sgtmp++;
  382. used--;
  383. }
  384. used = nelems - used;
  385. iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  386. if (direction != SBUS_DMA_TODEVICE)
  387. iopte_protection |= IOPTE_WRITE;
  388. fill_sg(base, sglist, used, nelems, iopte_protection);
  389. #ifdef VERIFY_SG
  390. verify_sglist(sglist, nelems, base, npages);
  391. #endif
  392. return used;
  393. }
  394. void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
  395. {
  396. struct sbus_iommu *iommu;
  397. iopte_t *base;
  398. unsigned long flags, i, npages;
  399. u32 bus_addr;
  400. if (unlikely(direction == SBUS_DMA_NONE))
  401. BUG();
  402. iommu = sdev->bus->iommu;
  403. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  404. for (i = 1; i < nelems; i++)
  405. if (sglist[i].dma_length == 0)
  406. break;
  407. i--;
  408. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
  409. bus_addr) >> IO_PAGE_SHIFT;
  410. base = iommu->page_table +
  411. ((bus_addr - MAP_BASE) >> IO_PAGE_SHIFT);
  412. spin_lock_irqsave(&iommu->lock, flags);
  413. sbus_strbuf_flush(iommu, bus_addr, npages, direction);
  414. for (i = 0; i < npages; i++)
  415. iopte_val(base[i]) = 0UL;
  416. free_npages(iommu, bus_addr - MAP_BASE, npages);
  417. spin_unlock_irqrestore(&iommu->lock, flags);
  418. }
  419. void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
  420. {
  421. struct sbus_iommu *iommu;
  422. unsigned long flags, npages;
  423. iommu = sdev->bus->iommu;
  424. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  425. npages >>= IO_PAGE_SHIFT;
  426. bus_addr &= IO_PAGE_MASK;
  427. spin_lock_irqsave(&iommu->lock, flags);
  428. sbus_strbuf_flush(iommu, bus_addr, npages, direction);
  429. spin_unlock_irqrestore(&iommu->lock, flags);
  430. }
  431. void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  432. {
  433. }
  434. void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
  435. {
  436. struct sbus_iommu *iommu;
  437. unsigned long flags, npages, i;
  438. u32 bus_addr;
  439. iommu = sdev->bus->iommu;
  440. bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
  441. for (i = 0; i < nelems; i++) {
  442. if (!sglist[i].dma_length)
  443. break;
  444. }
  445. i--;
  446. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
  447. - bus_addr) >> IO_PAGE_SHIFT;
  448. spin_lock_irqsave(&iommu->lock, flags);
  449. sbus_strbuf_flush(iommu, bus_addr, npages, direction);
  450. spin_unlock_irqrestore(&iommu->lock, flags);
  451. }
  452. void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  453. {
  454. }
  455. /* Enable 64-bit DVMA mode for the given device. */
  456. void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
  457. {
  458. struct sbus_iommu *iommu = sdev->bus->iommu;
  459. int slot = sdev->slot;
  460. unsigned long cfg_reg;
  461. u64 val;
  462. cfg_reg = iommu->sbus_control_reg;
  463. switch (slot) {
  464. case 0:
  465. cfg_reg += 0x20UL;
  466. break;
  467. case 1:
  468. cfg_reg += 0x28UL;
  469. break;
  470. case 2:
  471. cfg_reg += 0x30UL;
  472. break;
  473. case 3:
  474. cfg_reg += 0x38UL;
  475. break;
  476. case 13:
  477. cfg_reg += 0x40UL;
  478. break;
  479. case 14:
  480. cfg_reg += 0x48UL;
  481. break;
  482. case 15:
  483. cfg_reg += 0x50UL;
  484. break;
  485. default:
  486. return;
  487. };
  488. val = upa_readq(cfg_reg);
  489. if (val & (1UL << 14UL)) {
  490. /* Extended transfer mode already enabled. */
  491. return;
  492. }
  493. val |= (1UL << 14UL);
  494. if (bursts & DMA_BURST8)
  495. val |= (1UL << 1UL);
  496. if (bursts & DMA_BURST16)
  497. val |= (1UL << 2UL);
  498. if (bursts & DMA_BURST32)
  499. val |= (1UL << 3UL);
  500. if (bursts & DMA_BURST64)
  501. val |= (1UL << 4UL);
  502. upa_writeq(val, cfg_reg);
  503. }
  504. /* INO number to IMAP register offset for SYSIO external IRQ's.
  505. * This should conform to both Sunfire/Wildfire server and Fusion
  506. * desktop designs.
  507. */
  508. #define SYSIO_IMAP_SLOT0 0x2c04UL
  509. #define SYSIO_IMAP_SLOT1 0x2c0cUL
  510. #define SYSIO_IMAP_SLOT2 0x2c14UL
  511. #define SYSIO_IMAP_SLOT3 0x2c1cUL
  512. #define SYSIO_IMAP_SCSI 0x3004UL
  513. #define SYSIO_IMAP_ETH 0x300cUL
  514. #define SYSIO_IMAP_BPP 0x3014UL
  515. #define SYSIO_IMAP_AUDIO 0x301cUL
  516. #define SYSIO_IMAP_PFAIL 0x3024UL
  517. #define SYSIO_IMAP_KMS 0x302cUL
  518. #define SYSIO_IMAP_FLPY 0x3034UL
  519. #define SYSIO_IMAP_SHW 0x303cUL
  520. #define SYSIO_IMAP_KBD 0x3044UL
  521. #define SYSIO_IMAP_MS 0x304cUL
  522. #define SYSIO_IMAP_SER 0x3054UL
  523. #define SYSIO_IMAP_TIM0 0x3064UL
  524. #define SYSIO_IMAP_TIM1 0x306cUL
  525. #define SYSIO_IMAP_UE 0x3074UL
  526. #define SYSIO_IMAP_CE 0x307cUL
  527. #define SYSIO_IMAP_SBERR 0x3084UL
  528. #define SYSIO_IMAP_PMGMT 0x308cUL
  529. #define SYSIO_IMAP_GFX 0x3094UL
  530. #define SYSIO_IMAP_EUPA 0x309cUL
  531. #define bogon ((unsigned long) -1)
  532. static unsigned long sysio_irq_offsets[] = {
  533. /* SBUS Slot 0 --> 3, level 1 --> 7 */
  534. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  535. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  536. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  537. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  538. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  539. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  540. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  541. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  542. /* Onboard devices (not relevant/used on SunFire). */
  543. SYSIO_IMAP_SCSI,
  544. SYSIO_IMAP_ETH,
  545. SYSIO_IMAP_BPP,
  546. bogon,
  547. SYSIO_IMAP_AUDIO,
  548. SYSIO_IMAP_PFAIL,
  549. bogon,
  550. bogon,
  551. SYSIO_IMAP_KMS,
  552. SYSIO_IMAP_FLPY,
  553. SYSIO_IMAP_SHW,
  554. SYSIO_IMAP_KBD,
  555. SYSIO_IMAP_MS,
  556. SYSIO_IMAP_SER,
  557. bogon,
  558. bogon,
  559. SYSIO_IMAP_TIM0,
  560. SYSIO_IMAP_TIM1,
  561. bogon,
  562. bogon,
  563. SYSIO_IMAP_UE,
  564. SYSIO_IMAP_CE,
  565. SYSIO_IMAP_SBERR,
  566. SYSIO_IMAP_PMGMT,
  567. };
  568. #undef bogon
  569. #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
  570. /* Convert Interrupt Mapping register pointer to associated
  571. * Interrupt Clear register pointer, SYSIO specific version.
  572. */
  573. #define SYSIO_ICLR_UNUSED0 0x3400UL
  574. #define SYSIO_ICLR_SLOT0 0x340cUL
  575. #define SYSIO_ICLR_SLOT1 0x344cUL
  576. #define SYSIO_ICLR_SLOT2 0x348cUL
  577. #define SYSIO_ICLR_SLOT3 0x34ccUL
  578. static unsigned long sysio_imap_to_iclr(unsigned long imap)
  579. {
  580. unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
  581. return imap + diff;
  582. }
  583. unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
  584. {
  585. struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
  586. struct sbus_iommu *iommu = sbus->iommu;
  587. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  588. unsigned long imap, iclr;
  589. int sbus_level = 0;
  590. imap = sysio_irq_offsets[ino];
  591. if (imap == ((unsigned long)-1)) {
  592. prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
  593. ino);
  594. prom_halt();
  595. }
  596. imap += reg_base;
  597. /* SYSIO inconsistency. For external SLOTS, we have to select
  598. * the right ICLR register based upon the lower SBUS irq level
  599. * bits.
  600. */
  601. if (ino >= 0x20) {
  602. iclr = sysio_imap_to_iclr(imap);
  603. } else {
  604. int sbus_slot = (ino & 0x18)>>3;
  605. sbus_level = ino & 0x7;
  606. switch(sbus_slot) {
  607. case 0:
  608. iclr = reg_base + SYSIO_ICLR_SLOT0;
  609. break;
  610. case 1:
  611. iclr = reg_base + SYSIO_ICLR_SLOT1;
  612. break;
  613. case 2:
  614. iclr = reg_base + SYSIO_ICLR_SLOT2;
  615. break;
  616. default:
  617. case 3:
  618. iclr = reg_base + SYSIO_ICLR_SLOT3;
  619. break;
  620. };
  621. iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
  622. }
  623. return build_irq(sbus_level, iclr, imap);
  624. }
  625. /* Error interrupt handling. */
  626. #define SYSIO_UE_AFSR 0x0030UL
  627. #define SYSIO_UE_AFAR 0x0038UL
  628. #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  629. #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  630. #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  631. #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
  632. #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  633. #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  634. #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  635. #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
  636. #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  637. #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  638. #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  639. static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
  640. {
  641. struct sbus_bus *sbus = dev_id;
  642. struct sbus_iommu *iommu = sbus->iommu;
  643. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  644. unsigned long afsr_reg, afar_reg;
  645. unsigned long afsr, afar, error_bits;
  646. int reported;
  647. afsr_reg = reg_base + SYSIO_UE_AFSR;
  648. afar_reg = reg_base + SYSIO_UE_AFAR;
  649. /* Latch error status. */
  650. afsr = upa_readq(afsr_reg);
  651. afar = upa_readq(afar_reg);
  652. /* Clear primary/secondary error status bits. */
  653. error_bits = afsr &
  654. (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
  655. SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
  656. upa_writeq(error_bits, afsr_reg);
  657. /* Log the error. */
  658. printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
  659. sbus->portid,
  660. (((error_bits & SYSIO_UEAFSR_PPIO) ?
  661. "PIO" :
  662. ((error_bits & SYSIO_UEAFSR_PDRD) ?
  663. "DVMA Read" :
  664. ((error_bits & SYSIO_UEAFSR_PDWR) ?
  665. "DVMA Write" : "???")))));
  666. printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
  667. sbus->portid,
  668. (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
  669. (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
  670. (afsr & SYSIO_UEAFSR_MID) >> 37UL);
  671. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  672. printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
  673. reported = 0;
  674. if (afsr & SYSIO_UEAFSR_SPIO) {
  675. reported++;
  676. printk("(PIO)");
  677. }
  678. if (afsr & SYSIO_UEAFSR_SDRD) {
  679. reported++;
  680. printk("(DVMA Read)");
  681. }
  682. if (afsr & SYSIO_UEAFSR_SDWR) {
  683. reported++;
  684. printk("(DVMA Write)");
  685. }
  686. if (!reported)
  687. printk("(none)");
  688. printk("]\n");
  689. return IRQ_HANDLED;
  690. }
  691. #define SYSIO_CE_AFSR 0x0040UL
  692. #define SYSIO_CE_AFAR 0x0048UL
  693. #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  694. #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  695. #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  696. #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
  697. #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  698. #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  699. #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
  700. #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
  701. #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
  702. #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  703. #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  704. #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  705. static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
  706. {
  707. struct sbus_bus *sbus = dev_id;
  708. struct sbus_iommu *iommu = sbus->iommu;
  709. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  710. unsigned long afsr_reg, afar_reg;
  711. unsigned long afsr, afar, error_bits;
  712. int reported;
  713. afsr_reg = reg_base + SYSIO_CE_AFSR;
  714. afar_reg = reg_base + SYSIO_CE_AFAR;
  715. /* Latch error status. */
  716. afsr = upa_readq(afsr_reg);
  717. afar = upa_readq(afar_reg);
  718. /* Clear primary/secondary error status bits. */
  719. error_bits = afsr &
  720. (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
  721. SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
  722. upa_writeq(error_bits, afsr_reg);
  723. printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
  724. sbus->portid,
  725. (((error_bits & SYSIO_CEAFSR_PPIO) ?
  726. "PIO" :
  727. ((error_bits & SYSIO_CEAFSR_PDRD) ?
  728. "DVMA Read" :
  729. ((error_bits & SYSIO_CEAFSR_PDWR) ?
  730. "DVMA Write" : "???")))));
  731. /* XXX Use syndrome and afar to print out module string just like
  732. * XXX UDB CE trap handler does... -DaveM
  733. */
  734. printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
  735. sbus->portid,
  736. (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
  737. (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
  738. (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
  739. (afsr & SYSIO_CEAFSR_MID) >> 37UL);
  740. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  741. printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
  742. reported = 0;
  743. if (afsr & SYSIO_CEAFSR_SPIO) {
  744. reported++;
  745. printk("(PIO)");
  746. }
  747. if (afsr & SYSIO_CEAFSR_SDRD) {
  748. reported++;
  749. printk("(DVMA Read)");
  750. }
  751. if (afsr & SYSIO_CEAFSR_SDWR) {
  752. reported++;
  753. printk("(DVMA Write)");
  754. }
  755. if (!reported)
  756. printk("(none)");
  757. printk("]\n");
  758. return IRQ_HANDLED;
  759. }
  760. #define SYSIO_SBUS_AFSR 0x2010UL
  761. #define SYSIO_SBUS_AFAR 0x2018UL
  762. #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
  763. #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
  764. #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
  765. #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
  766. #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
  767. #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
  768. #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  769. #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
  770. #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
  771. #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
  772. #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
  773. #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
  774. static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
  775. {
  776. struct sbus_bus *sbus = dev_id;
  777. struct sbus_iommu *iommu = sbus->iommu;
  778. unsigned long afsr_reg, afar_reg, reg_base;
  779. unsigned long afsr, afar, error_bits;
  780. int reported;
  781. reg_base = iommu->sbus_control_reg - 0x2000UL;
  782. afsr_reg = reg_base + SYSIO_SBUS_AFSR;
  783. afar_reg = reg_base + SYSIO_SBUS_AFAR;
  784. afsr = upa_readq(afsr_reg);
  785. afar = upa_readq(afar_reg);
  786. /* Clear primary/secondary error status bits. */
  787. error_bits = afsr &
  788. (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
  789. SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
  790. upa_writeq(error_bits, afsr_reg);
  791. /* Log the error. */
  792. printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
  793. sbus->portid,
  794. (((error_bits & SYSIO_SBAFSR_PLE) ?
  795. "Late PIO Error" :
  796. ((error_bits & SYSIO_SBAFSR_PTO) ?
  797. "Time Out" :
  798. ((error_bits & SYSIO_SBAFSR_PBERR) ?
  799. "Error Ack" : "???")))),
  800. (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
  801. printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
  802. sbus->portid,
  803. (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
  804. (afsr & SYSIO_SBAFSR_MID) >> 37UL);
  805. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  806. printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
  807. reported = 0;
  808. if (afsr & SYSIO_SBAFSR_SLE) {
  809. reported++;
  810. printk("(Late PIO Error)");
  811. }
  812. if (afsr & SYSIO_SBAFSR_STO) {
  813. reported++;
  814. printk("(Time Out)");
  815. }
  816. if (afsr & SYSIO_SBAFSR_SBERR) {
  817. reported++;
  818. printk("(Error Ack)");
  819. }
  820. if (!reported)
  821. printk("(none)");
  822. printk("]\n");
  823. /* XXX check iommu/strbuf for further error status XXX */
  824. return IRQ_HANDLED;
  825. }
  826. #define ECC_CONTROL 0x0020UL
  827. #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
  828. #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
  829. #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
  830. #define SYSIO_UE_INO 0x34
  831. #define SYSIO_CE_INO 0x35
  832. #define SYSIO_SBUSERR_INO 0x36
  833. static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
  834. {
  835. struct sbus_iommu *iommu = sbus->iommu;
  836. unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
  837. unsigned int irq;
  838. u64 control;
  839. irq = sbus_build_irq(sbus, SYSIO_UE_INO);
  840. if (request_irq(irq, sysio_ue_handler,
  841. IRQF_SHARED, "SYSIO UE", sbus) < 0) {
  842. prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
  843. sbus->portid);
  844. prom_halt();
  845. }
  846. irq = sbus_build_irq(sbus, SYSIO_CE_INO);
  847. if (request_irq(irq, sysio_ce_handler,
  848. IRQF_SHARED, "SYSIO CE", sbus) < 0) {
  849. prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
  850. sbus->portid);
  851. prom_halt();
  852. }
  853. irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
  854. if (request_irq(irq, sysio_sbus_error_handler,
  855. IRQF_SHARED, "SYSIO SBUS Error", sbus) < 0) {
  856. prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
  857. sbus->portid);
  858. prom_halt();
  859. }
  860. /* Now turn the error interrupts on and also enable ECC checking. */
  861. upa_writeq((SYSIO_ECNTRL_ECCEN |
  862. SYSIO_ECNTRL_UEEN |
  863. SYSIO_ECNTRL_CEEN),
  864. reg_base + ECC_CONTROL);
  865. control = upa_readq(iommu->sbus_control_reg);
  866. control |= 0x100UL; /* SBUS Error Interrupt Enable */
  867. upa_writeq(control, iommu->sbus_control_reg);
  868. }
  869. /* Boot time initialization. */
  870. static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
  871. {
  872. const struct linux_prom64_registers *pr;
  873. struct device_node *dp;
  874. struct sbus_iommu *iommu;
  875. unsigned long regs;
  876. u64 control;
  877. int i;
  878. dp = of_find_node_by_phandle(__node);
  879. sbus->portid = of_getintprop_default(dp, "upa-portid", -1);
  880. pr = of_get_property(dp, "reg", NULL);
  881. if (!pr) {
  882. prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
  883. prom_halt();
  884. }
  885. regs = pr->phys_addr;
  886. iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
  887. if (iommu == NULL) {
  888. prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
  889. prom_halt();
  890. }
  891. /* Align on E$ line boundary. */
  892. iommu = (struct sbus_iommu *)
  893. (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
  894. ~(SMP_CACHE_BYTES - 1UL));
  895. memset(iommu, 0, sizeof(*iommu));
  896. /* Setup spinlock. */
  897. spin_lock_init(&iommu->lock);
  898. /* Init register offsets. */
  899. iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
  900. iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
  901. /* The SYSIO SBUS control register is used for dummy reads
  902. * in order to ensure write completion.
  903. */
  904. iommu->sbus_control_reg = regs + 0x2000UL;
  905. /* Link into SYSIO software state. */
  906. sbus->iommu = iommu;
  907. printk("SYSIO: UPA portID %x, at %016lx\n",
  908. sbus->portid, regs);
  909. /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
  910. sbus_iommu_table_init(iommu, IO_TSB_SIZE);
  911. control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
  912. control = ((7UL << 16UL) |
  913. (0UL << 2UL) |
  914. (1UL << 1UL) |
  915. (1UL << 0UL));
  916. upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
  917. /* Clean out any cruft in the IOMMU using
  918. * diagnostic accesses.
  919. */
  920. for (i = 0; i < 16; i++) {
  921. unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
  922. unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
  923. dram += (unsigned long)i * 8UL;
  924. tag += (unsigned long)i * 8UL;
  925. upa_writeq(0, dram);
  926. upa_writeq(0, tag);
  927. }
  928. upa_readq(iommu->sbus_control_reg);
  929. /* Give the TSB to SYSIO. */
  930. upa_writeq(__pa(iommu->page_table), iommu->iommu_regs + IOMMU_TSBBASE);
  931. /* Setup streaming buffer, DE=1 SB_EN=1 */
  932. control = (1UL << 1UL) | (1UL << 0UL);
  933. upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
  934. /* Clear out the tags using diagnostics. */
  935. for (i = 0; i < 16; i++) {
  936. unsigned long ptag, ltag;
  937. ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
  938. ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
  939. ptag += (unsigned long)i * 8UL;
  940. ltag += (unsigned long)i * 8UL;
  941. upa_writeq(0UL, ptag);
  942. upa_writeq(0UL, ltag);
  943. }
  944. /* Enable DVMA arbitration for all devices/slots. */
  945. control = upa_readq(iommu->sbus_control_reg);
  946. control |= 0x3fUL;
  947. upa_writeq(control, iommu->sbus_control_reg);
  948. /* Now some Xfire specific grot... */
  949. if (this_is_starfire)
  950. starfire_hookup(sbus->portid);
  951. sysio_register_error_handlers(sbus);
  952. }
  953. void sbus_fill_device_irq(struct sbus_dev *sdev)
  954. {
  955. struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
  956. const struct linux_prom_irqs *irqs;
  957. irqs = of_get_property(dp, "interrupts", NULL);
  958. if (!irqs) {
  959. sdev->irqs[0] = 0;
  960. sdev->num_irqs = 0;
  961. } else {
  962. unsigned int pri = irqs[0].pri;
  963. sdev->num_irqs = 1;
  964. if (pri < 0x20)
  965. pri += sdev->slot * 8;
  966. sdev->irqs[0] = sbus_build_irq(sdev->bus, pri);
  967. }
  968. }
  969. void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
  970. {
  971. }
  972. void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
  973. {
  974. sbus_iommu_init(dp->node, sbus);
  975. }
  976. void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
  977. {
  978. }
  979. int __init sbus_arch_preinit(void)
  980. {
  981. return 0;
  982. }
  983. void __init sbus_arch_postinit(void)
  984. {
  985. extern void firetruck_init(void);
  986. firetruck_init();
  987. }