sbus.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205
  1. /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
  2. * sbus.c: UltraSparc SBUS controller support.
  3. *
  4. * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/mm.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/slab.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <asm/page.h>
  14. #include <asm/sbus.h>
  15. #include <asm/io.h>
  16. #include <asm/upa.h>
  17. #include <asm/cache.h>
  18. #include <asm/dma.h>
  19. #include <asm/irq.h>
  20. #include <asm/prom.h>
  21. #include <asm/starfire.h>
  22. #include "iommu_common.h"
  23. #define MAP_BASE ((u32)0xc0000000)
  24. struct sbus_info {
  25. struct iommu iommu;
  26. struct strbuf strbuf;
  27. };
  28. /* Offsets from iommu_regs */
  29. #define SYSIO_IOMMUREG_BASE 0x2400UL
  30. #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
  31. #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
  32. #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
  33. #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
  34. #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
  35. #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
  36. #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
  37. #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
  38. #define IOMMU_DRAM_VALID (1UL << 30UL)
  39. static void __iommu_flushall(struct iommu *iommu)
  40. {
  41. unsigned long tag;
  42. int entry;
  43. tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
  44. for (entry = 0; entry < 16; entry++) {
  45. upa_writeq(0, tag);
  46. tag += 8UL;
  47. }
  48. upa_readq(iommu->write_complete_reg);
  49. }
  50. /* Offsets from strbuf_regs */
  51. #define SYSIO_STRBUFREG_BASE 0x2800UL
  52. #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
  53. #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
  54. #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
  55. #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
  56. #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
  57. #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
  58. #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
  59. #define STRBUF_TAG_VALID 0x02UL
  60. static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
  61. {
  62. unsigned long n;
  63. int limit;
  64. n = npages;
  65. while (n--)
  66. upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
  67. /* If the device could not have possibly put dirty data into
  68. * the streaming cache, no flush-flag synchronization needs
  69. * to be performed.
  70. */
  71. if (direction == SBUS_DMA_TODEVICE)
  72. return;
  73. *(strbuf->strbuf_flushflag) = 0UL;
  74. /* Whoopee cushion! */
  75. upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
  76. upa_readq(iommu->write_complete_reg);
  77. limit = 100000;
  78. while (*(strbuf->strbuf_flushflag) == 0UL) {
  79. limit--;
  80. if (!limit)
  81. break;
  82. udelay(1);
  83. rmb();
  84. }
  85. if (!limit)
  86. printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
  87. "vaddr[%08x] npages[%ld]\n",
  88. base, npages);
  89. }
  90. /* Based largely upon the ppc64 iommu allocator. */
  91. static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
  92. {
  93. struct iommu_arena *arena = &iommu->arena;
  94. unsigned long n, i, start, end, limit;
  95. int pass;
  96. limit = arena->limit;
  97. start = arena->hint;
  98. pass = 0;
  99. again:
  100. n = find_next_zero_bit(arena->map, limit, start);
  101. end = n + npages;
  102. if (unlikely(end >= limit)) {
  103. if (likely(pass < 1)) {
  104. limit = start;
  105. start = 0;
  106. __iommu_flushall(iommu);
  107. pass++;
  108. goto again;
  109. } else {
  110. /* Scanned the whole thing, give up. */
  111. return -1;
  112. }
  113. }
  114. for (i = n; i < end; i++) {
  115. if (test_bit(i, arena->map)) {
  116. start = i + 1;
  117. goto again;
  118. }
  119. }
  120. for (i = n; i < end; i++)
  121. __set_bit(i, arena->map);
  122. arena->hint = end;
  123. return n;
  124. }
  125. static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
  126. {
  127. unsigned long i;
  128. for (i = base; i < (base + npages); i++)
  129. __clear_bit(i, arena->map);
  130. }
  131. static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
  132. {
  133. unsigned long tsbbase, order, sz, num_tsb_entries;
  134. num_tsb_entries = tsbsize / sizeof(iopte_t);
  135. /* Setup initial software IOMMU state. */
  136. spin_lock_init(&iommu->lock);
  137. iommu->page_table_map_base = MAP_BASE;
  138. /* Allocate and initialize the free area map. */
  139. sz = num_tsb_entries / 8;
  140. sz = (sz + 7UL) & ~7UL;
  141. iommu->arena.map = kzalloc(sz, GFP_KERNEL);
  142. if (!iommu->arena.map) {
  143. prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
  144. prom_halt();
  145. }
  146. iommu->arena.limit = num_tsb_entries;
  147. /* Now allocate and setup the IOMMU page table itself. */
  148. order = get_order(tsbsize);
  149. tsbbase = __get_free_pages(GFP_KERNEL, order);
  150. if (!tsbbase) {
  151. prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
  152. prom_halt();
  153. }
  154. iommu->page_table = (iopte_t *)tsbbase;
  155. memset(iommu->page_table, 0, tsbsize);
  156. }
  157. static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
  158. {
  159. long entry;
  160. entry = sbus_arena_alloc(iommu, npages);
  161. if (unlikely(entry < 0))
  162. return NULL;
  163. return iommu->page_table + entry;
  164. }
  165. static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
  166. {
  167. sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
  168. }
  169. void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
  170. {
  171. struct sbus_info *info;
  172. struct iommu *iommu;
  173. iopte_t *iopte;
  174. unsigned long flags, order, first_page;
  175. void *ret;
  176. int npages;
  177. size = IO_PAGE_ALIGN(size);
  178. order = get_order(size);
  179. if (order >= 10)
  180. return NULL;
  181. first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
  182. if (first_page == 0UL)
  183. return NULL;
  184. memset((char *)first_page, 0, PAGE_SIZE << order);
  185. info = sdev->bus->iommu;
  186. iommu = &info->iommu;
  187. spin_lock_irqsave(&iommu->lock, flags);
  188. iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
  189. spin_unlock_irqrestore(&iommu->lock, flags);
  190. if (unlikely(iopte == NULL)) {
  191. free_pages(first_page, order);
  192. return NULL;
  193. }
  194. *dvma_addr = (iommu->page_table_map_base +
  195. ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
  196. ret = (void *) first_page;
  197. npages = size >> IO_PAGE_SHIFT;
  198. first_page = __pa(first_page);
  199. while (npages--) {
  200. iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
  201. IOPTE_WRITE |
  202. (first_page & IOPTE_PAGE));
  203. iopte++;
  204. first_page += IO_PAGE_SIZE;
  205. }
  206. return ret;
  207. }
  208. void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
  209. {
  210. struct sbus_info *info;
  211. struct iommu *iommu;
  212. iopte_t *iopte;
  213. unsigned long flags, order, npages;
  214. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  215. info = sdev->bus->iommu;
  216. iommu = &info->iommu;
  217. iopte = iommu->page_table +
  218. ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  219. spin_lock_irqsave(&iommu->lock, flags);
  220. free_npages(iommu, dvma - iommu->page_table_map_base, npages);
  221. spin_unlock_irqrestore(&iommu->lock, flags);
  222. order = get_order(size);
  223. if (order < 10)
  224. free_pages((unsigned long)cpu, order);
  225. }
  226. dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
  227. {
  228. struct sbus_info *info;
  229. struct iommu *iommu;
  230. iopte_t *base;
  231. unsigned long flags, npages, oaddr;
  232. unsigned long i, base_paddr;
  233. u32 bus_addr, ret;
  234. unsigned long iopte_protection;
  235. info = sdev->bus->iommu;
  236. iommu = &info->iommu;
  237. if (unlikely(direction == SBUS_DMA_NONE))
  238. BUG();
  239. oaddr = (unsigned long)ptr;
  240. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  241. npages >>= IO_PAGE_SHIFT;
  242. spin_lock_irqsave(&iommu->lock, flags);
  243. base = alloc_npages(iommu, npages);
  244. spin_unlock_irqrestore(&iommu->lock, flags);
  245. if (unlikely(!base))
  246. BUG();
  247. bus_addr = (iommu->page_table_map_base +
  248. ((base - iommu->page_table) << IO_PAGE_SHIFT));
  249. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  250. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  251. iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  252. if (direction != SBUS_DMA_TODEVICE)
  253. iopte_protection |= IOPTE_WRITE;
  254. for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
  255. iopte_val(*base) = iopte_protection | base_paddr;
  256. return ret;
  257. }
  258. void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
  259. {
  260. struct sbus_info *info = sdev->bus->iommu;
  261. struct iommu *iommu = &info->iommu;
  262. struct strbuf *strbuf = &info->strbuf;
  263. iopte_t *base;
  264. unsigned long flags, npages, i;
  265. if (unlikely(direction == SBUS_DMA_NONE))
  266. BUG();
  267. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  268. npages >>= IO_PAGE_SHIFT;
  269. base = iommu->page_table +
  270. ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  271. bus_addr &= IO_PAGE_MASK;
  272. spin_lock_irqsave(&iommu->lock, flags);
  273. sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
  274. for (i = 0; i < npages; i++)
  275. iopte_val(base[i]) = 0UL;
  276. free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
  277. spin_unlock_irqrestore(&iommu->lock, flags);
  278. }
  279. #define SG_ENT_PHYS_ADDRESS(SG) \
  280. (__pa(page_address((SG)->page)) + (SG)->offset)
  281. static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
  282. int nused, int nelems, unsigned long iopte_protection)
  283. {
  284. struct scatterlist *dma_sg = sg;
  285. struct scatterlist *sg_end = sg + nelems;
  286. int i;
  287. for (i = 0; i < nused; i++) {
  288. unsigned long pteval = ~0UL;
  289. u32 dma_npages;
  290. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  291. dma_sg->dma_length +
  292. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  293. do {
  294. unsigned long offset;
  295. signed int len;
  296. /* If we are here, we know we have at least one
  297. * more page to map. So walk forward until we
  298. * hit a page crossing, and begin creating new
  299. * mappings from that spot.
  300. */
  301. for (;;) {
  302. unsigned long tmp;
  303. tmp = SG_ENT_PHYS_ADDRESS(sg);
  304. len = sg->length;
  305. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  306. pteval = tmp & IO_PAGE_MASK;
  307. offset = tmp & (IO_PAGE_SIZE - 1UL);
  308. break;
  309. }
  310. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  311. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  312. offset = 0UL;
  313. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  314. break;
  315. }
  316. sg++;
  317. }
  318. pteval = iopte_protection | (pteval & IOPTE_PAGE);
  319. while (len > 0) {
  320. *iopte++ = __iopte(pteval);
  321. pteval += IO_PAGE_SIZE;
  322. len -= (IO_PAGE_SIZE - offset);
  323. offset = 0;
  324. dma_npages--;
  325. }
  326. pteval = (pteval & IOPTE_PAGE) + len;
  327. sg++;
  328. /* Skip over any tail mappings we've fully mapped,
  329. * adjusting pteval along the way. Stop when we
  330. * detect a page crossing event.
  331. */
  332. while (sg < sg_end &&
  333. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  334. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  335. ((pteval ^
  336. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  337. pteval += sg->length;
  338. sg++;
  339. }
  340. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  341. pteval = ~0UL;
  342. } while (dma_npages != 0);
  343. dma_sg++;
  344. }
  345. }
  346. int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
  347. {
  348. struct sbus_info *info;
  349. struct iommu *iommu;
  350. unsigned long flags, npages, iopte_protection;
  351. iopte_t *base;
  352. u32 dma_base;
  353. struct scatterlist *sgtmp;
  354. int used;
  355. /* Fast path single entry scatterlists. */
  356. if (nelems == 1) {
  357. sglist->dma_address =
  358. sbus_map_single(sdev,
  359. (page_address(sglist->page) + sglist->offset),
  360. sglist->length, direction);
  361. sglist->dma_length = sglist->length;
  362. return 1;
  363. }
  364. info = sdev->bus->iommu;
  365. iommu = &info->iommu;
  366. if (unlikely(direction == SBUS_DMA_NONE))
  367. BUG();
  368. npages = prepare_sg(sglist, nelems);
  369. spin_lock_irqsave(&iommu->lock, flags);
  370. base = alloc_npages(iommu, npages);
  371. spin_unlock_irqrestore(&iommu->lock, flags);
  372. if (unlikely(base == NULL))
  373. BUG();
  374. dma_base = iommu->page_table_map_base +
  375. ((base - iommu->page_table) << IO_PAGE_SHIFT);
  376. /* Normalize DVMA addresses. */
  377. used = nelems;
  378. sgtmp = sglist;
  379. while (used && sgtmp->dma_length) {
  380. sgtmp->dma_address += dma_base;
  381. sgtmp++;
  382. used--;
  383. }
  384. used = nelems - used;
  385. iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
  386. if (direction != SBUS_DMA_TODEVICE)
  387. iopte_protection |= IOPTE_WRITE;
  388. fill_sg(base, sglist, used, nelems, iopte_protection);
  389. #ifdef VERIFY_SG
  390. verify_sglist(sglist, nelems, base, npages);
  391. #endif
  392. return used;
  393. }
  394. void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
  395. {
  396. struct sbus_info *info;
  397. struct iommu *iommu;
  398. struct strbuf *strbuf;
  399. iopte_t *base;
  400. unsigned long flags, i, npages;
  401. u32 bus_addr;
  402. if (unlikely(direction == SBUS_DMA_NONE))
  403. BUG();
  404. info = sdev->bus->iommu;
  405. iommu = &info->iommu;
  406. strbuf = &info->strbuf;
  407. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  408. for (i = 1; i < nelems; i++)
  409. if (sglist[i].dma_length == 0)
  410. break;
  411. i--;
  412. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
  413. bus_addr) >> IO_PAGE_SHIFT;
  414. base = iommu->page_table +
  415. ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  416. spin_lock_irqsave(&iommu->lock, flags);
  417. sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
  418. for (i = 0; i < npages; i++)
  419. iopte_val(base[i]) = 0UL;
  420. free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
  421. spin_unlock_irqrestore(&iommu->lock, flags);
  422. }
  423. void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
  424. {
  425. struct sbus_info *info;
  426. struct iommu *iommu;
  427. struct strbuf *strbuf;
  428. unsigned long flags, npages;
  429. info = sdev->bus->iommu;
  430. iommu = &info->iommu;
  431. strbuf = &info->strbuf;
  432. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  433. npages >>= IO_PAGE_SHIFT;
  434. bus_addr &= IO_PAGE_MASK;
  435. spin_lock_irqsave(&iommu->lock, flags);
  436. sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
  437. spin_unlock_irqrestore(&iommu->lock, flags);
  438. }
  439. void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
  440. {
  441. }
  442. void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
  443. {
  444. struct sbus_info *info;
  445. struct iommu *iommu;
  446. struct strbuf *strbuf;
  447. unsigned long flags, npages, i;
  448. u32 bus_addr;
  449. info = sdev->bus->iommu;
  450. iommu = &info->iommu;
  451. strbuf = &info->strbuf;
  452. bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
  453. for (i = 0; i < nelems; i++) {
  454. if (!sglist[i].dma_length)
  455. break;
  456. }
  457. i--;
  458. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
  459. - bus_addr) >> IO_PAGE_SHIFT;
  460. spin_lock_irqsave(&iommu->lock, flags);
  461. sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
  462. spin_unlock_irqrestore(&iommu->lock, flags);
  463. }
  464. void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
  465. {
  466. }
  467. /* Enable 64-bit DVMA mode for the given device. */
  468. void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
  469. {
  470. struct sbus_info *info = sdev->bus->iommu;
  471. struct iommu *iommu = &info->iommu;
  472. int slot = sdev->slot;
  473. unsigned long cfg_reg;
  474. u64 val;
  475. cfg_reg = iommu->write_complete_reg;
  476. switch (slot) {
  477. case 0:
  478. cfg_reg += 0x20UL;
  479. break;
  480. case 1:
  481. cfg_reg += 0x28UL;
  482. break;
  483. case 2:
  484. cfg_reg += 0x30UL;
  485. break;
  486. case 3:
  487. cfg_reg += 0x38UL;
  488. break;
  489. case 13:
  490. cfg_reg += 0x40UL;
  491. break;
  492. case 14:
  493. cfg_reg += 0x48UL;
  494. break;
  495. case 15:
  496. cfg_reg += 0x50UL;
  497. break;
  498. default:
  499. return;
  500. };
  501. val = upa_readq(cfg_reg);
  502. if (val & (1UL << 14UL)) {
  503. /* Extended transfer mode already enabled. */
  504. return;
  505. }
  506. val |= (1UL << 14UL);
  507. if (bursts & DMA_BURST8)
  508. val |= (1UL << 1UL);
  509. if (bursts & DMA_BURST16)
  510. val |= (1UL << 2UL);
  511. if (bursts & DMA_BURST32)
  512. val |= (1UL << 3UL);
  513. if (bursts & DMA_BURST64)
  514. val |= (1UL << 4UL);
  515. upa_writeq(val, cfg_reg);
  516. }
  517. /* INO number to IMAP register offset for SYSIO external IRQ's.
  518. * This should conform to both Sunfire/Wildfire server and Fusion
  519. * desktop designs.
  520. */
  521. #define SYSIO_IMAP_SLOT0 0x2c04UL
  522. #define SYSIO_IMAP_SLOT1 0x2c0cUL
  523. #define SYSIO_IMAP_SLOT2 0x2c14UL
  524. #define SYSIO_IMAP_SLOT3 0x2c1cUL
  525. #define SYSIO_IMAP_SCSI 0x3004UL
  526. #define SYSIO_IMAP_ETH 0x300cUL
  527. #define SYSIO_IMAP_BPP 0x3014UL
  528. #define SYSIO_IMAP_AUDIO 0x301cUL
  529. #define SYSIO_IMAP_PFAIL 0x3024UL
  530. #define SYSIO_IMAP_KMS 0x302cUL
  531. #define SYSIO_IMAP_FLPY 0x3034UL
  532. #define SYSIO_IMAP_SHW 0x303cUL
  533. #define SYSIO_IMAP_KBD 0x3044UL
  534. #define SYSIO_IMAP_MS 0x304cUL
  535. #define SYSIO_IMAP_SER 0x3054UL
  536. #define SYSIO_IMAP_TIM0 0x3064UL
  537. #define SYSIO_IMAP_TIM1 0x306cUL
  538. #define SYSIO_IMAP_UE 0x3074UL
  539. #define SYSIO_IMAP_CE 0x307cUL
  540. #define SYSIO_IMAP_SBERR 0x3084UL
  541. #define SYSIO_IMAP_PMGMT 0x308cUL
  542. #define SYSIO_IMAP_GFX 0x3094UL
  543. #define SYSIO_IMAP_EUPA 0x309cUL
  544. #define bogon ((unsigned long) -1)
  545. static unsigned long sysio_irq_offsets[] = {
  546. /* SBUS Slot 0 --> 3, level 1 --> 7 */
  547. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  548. SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
  549. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  550. SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
  551. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  552. SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
  553. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  554. SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
  555. /* Onboard devices (not relevant/used on SunFire). */
  556. SYSIO_IMAP_SCSI,
  557. SYSIO_IMAP_ETH,
  558. SYSIO_IMAP_BPP,
  559. bogon,
  560. SYSIO_IMAP_AUDIO,
  561. SYSIO_IMAP_PFAIL,
  562. bogon,
  563. bogon,
  564. SYSIO_IMAP_KMS,
  565. SYSIO_IMAP_FLPY,
  566. SYSIO_IMAP_SHW,
  567. SYSIO_IMAP_KBD,
  568. SYSIO_IMAP_MS,
  569. SYSIO_IMAP_SER,
  570. bogon,
  571. bogon,
  572. SYSIO_IMAP_TIM0,
  573. SYSIO_IMAP_TIM1,
  574. bogon,
  575. bogon,
  576. SYSIO_IMAP_UE,
  577. SYSIO_IMAP_CE,
  578. SYSIO_IMAP_SBERR,
  579. SYSIO_IMAP_PMGMT,
  580. };
  581. #undef bogon
  582. #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
  583. /* Convert Interrupt Mapping register pointer to associated
  584. * Interrupt Clear register pointer, SYSIO specific version.
  585. */
  586. #define SYSIO_ICLR_UNUSED0 0x3400UL
  587. #define SYSIO_ICLR_SLOT0 0x340cUL
  588. #define SYSIO_ICLR_SLOT1 0x344cUL
  589. #define SYSIO_ICLR_SLOT2 0x348cUL
  590. #define SYSIO_ICLR_SLOT3 0x34ccUL
  591. static unsigned long sysio_imap_to_iclr(unsigned long imap)
  592. {
  593. unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
  594. return imap + diff;
  595. }
  596. unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
  597. {
  598. struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
  599. struct sbus_info *info = sbus->iommu;
  600. struct iommu *iommu = &info->iommu;
  601. unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
  602. unsigned long imap, iclr;
  603. int sbus_level = 0;
  604. imap = sysio_irq_offsets[ino];
  605. if (imap == ((unsigned long)-1)) {
  606. prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
  607. ino);
  608. prom_halt();
  609. }
  610. imap += reg_base;
  611. /* SYSIO inconsistency. For external SLOTS, we have to select
  612. * the right ICLR register based upon the lower SBUS irq level
  613. * bits.
  614. */
  615. if (ino >= 0x20) {
  616. iclr = sysio_imap_to_iclr(imap);
  617. } else {
  618. int sbus_slot = (ino & 0x18)>>3;
  619. sbus_level = ino & 0x7;
  620. switch(sbus_slot) {
  621. case 0:
  622. iclr = reg_base + SYSIO_ICLR_SLOT0;
  623. break;
  624. case 1:
  625. iclr = reg_base + SYSIO_ICLR_SLOT1;
  626. break;
  627. case 2:
  628. iclr = reg_base + SYSIO_ICLR_SLOT2;
  629. break;
  630. default:
  631. case 3:
  632. iclr = reg_base + SYSIO_ICLR_SLOT3;
  633. break;
  634. };
  635. iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
  636. }
  637. return build_irq(sbus_level, iclr, imap);
  638. }
  639. /* Error interrupt handling. */
  640. #define SYSIO_UE_AFSR 0x0030UL
  641. #define SYSIO_UE_AFAR 0x0038UL
  642. #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  643. #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  644. #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  645. #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
  646. #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  647. #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  648. #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  649. #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
  650. #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  651. #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  652. #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  653. static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
  654. {
  655. struct sbus_bus *sbus = dev_id;
  656. struct sbus_info *info = sbus->iommu;
  657. struct iommu *iommu = &info->iommu;
  658. unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
  659. unsigned long afsr_reg, afar_reg;
  660. unsigned long afsr, afar, error_bits;
  661. int reported;
  662. afsr_reg = reg_base + SYSIO_UE_AFSR;
  663. afar_reg = reg_base + SYSIO_UE_AFAR;
  664. /* Latch error status. */
  665. afsr = upa_readq(afsr_reg);
  666. afar = upa_readq(afar_reg);
  667. /* Clear primary/secondary error status bits. */
  668. error_bits = afsr &
  669. (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
  670. SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
  671. upa_writeq(error_bits, afsr_reg);
  672. /* Log the error. */
  673. printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
  674. sbus->portid,
  675. (((error_bits & SYSIO_UEAFSR_PPIO) ?
  676. "PIO" :
  677. ((error_bits & SYSIO_UEAFSR_PDRD) ?
  678. "DVMA Read" :
  679. ((error_bits & SYSIO_UEAFSR_PDWR) ?
  680. "DVMA Write" : "???")))));
  681. printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
  682. sbus->portid,
  683. (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
  684. (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
  685. (afsr & SYSIO_UEAFSR_MID) >> 37UL);
  686. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  687. printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
  688. reported = 0;
  689. if (afsr & SYSIO_UEAFSR_SPIO) {
  690. reported++;
  691. printk("(PIO)");
  692. }
  693. if (afsr & SYSIO_UEAFSR_SDRD) {
  694. reported++;
  695. printk("(DVMA Read)");
  696. }
  697. if (afsr & SYSIO_UEAFSR_SDWR) {
  698. reported++;
  699. printk("(DVMA Write)");
  700. }
  701. if (!reported)
  702. printk("(none)");
  703. printk("]\n");
  704. return IRQ_HANDLED;
  705. }
  706. #define SYSIO_CE_AFSR 0x0040UL
  707. #define SYSIO_CE_AFAR 0x0048UL
  708. #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
  709. #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
  710. #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
  711. #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
  712. #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
  713. #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
  714. #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
  715. #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
  716. #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
  717. #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
  718. #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
  719. #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
  720. static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
  721. {
  722. struct sbus_bus *sbus = dev_id;
  723. struct sbus_info *info = sbus->iommu;
  724. struct iommu *iommu = &info->iommu;
  725. unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
  726. unsigned long afsr_reg, afar_reg;
  727. unsigned long afsr, afar, error_bits;
  728. int reported;
  729. afsr_reg = reg_base + SYSIO_CE_AFSR;
  730. afar_reg = reg_base + SYSIO_CE_AFAR;
  731. /* Latch error status. */
  732. afsr = upa_readq(afsr_reg);
  733. afar = upa_readq(afar_reg);
  734. /* Clear primary/secondary error status bits. */
  735. error_bits = afsr &
  736. (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
  737. SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
  738. upa_writeq(error_bits, afsr_reg);
  739. printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
  740. sbus->portid,
  741. (((error_bits & SYSIO_CEAFSR_PPIO) ?
  742. "PIO" :
  743. ((error_bits & SYSIO_CEAFSR_PDRD) ?
  744. "DVMA Read" :
  745. ((error_bits & SYSIO_CEAFSR_PDWR) ?
  746. "DVMA Write" : "???")))));
  747. /* XXX Use syndrome and afar to print out module string just like
  748. * XXX UDB CE trap handler does... -DaveM
  749. */
  750. printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
  751. sbus->portid,
  752. (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
  753. (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
  754. (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
  755. (afsr & SYSIO_CEAFSR_MID) >> 37UL);
  756. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  757. printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
  758. reported = 0;
  759. if (afsr & SYSIO_CEAFSR_SPIO) {
  760. reported++;
  761. printk("(PIO)");
  762. }
  763. if (afsr & SYSIO_CEAFSR_SDRD) {
  764. reported++;
  765. printk("(DVMA Read)");
  766. }
  767. if (afsr & SYSIO_CEAFSR_SDWR) {
  768. reported++;
  769. printk("(DVMA Write)");
  770. }
  771. if (!reported)
  772. printk("(none)");
  773. printk("]\n");
  774. return IRQ_HANDLED;
  775. }
  776. #define SYSIO_SBUS_AFSR 0x2010UL
  777. #define SYSIO_SBUS_AFAR 0x2018UL
  778. #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
  779. #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
  780. #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
  781. #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
  782. #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
  783. #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
  784. #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
  785. #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
  786. #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
  787. #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
  788. #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
  789. #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
  790. static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
  791. {
  792. struct sbus_bus *sbus = dev_id;
  793. struct sbus_info *info = sbus->iommu;
  794. struct iommu *iommu = &info->iommu;
  795. unsigned long afsr_reg, afar_reg, reg_base;
  796. unsigned long afsr, afar, error_bits;
  797. int reported;
  798. reg_base = iommu->write_complete_reg - 0x2000UL;
  799. afsr_reg = reg_base + SYSIO_SBUS_AFSR;
  800. afar_reg = reg_base + SYSIO_SBUS_AFAR;
  801. afsr = upa_readq(afsr_reg);
  802. afar = upa_readq(afar_reg);
  803. /* Clear primary/secondary error status bits. */
  804. error_bits = afsr &
  805. (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
  806. SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
  807. upa_writeq(error_bits, afsr_reg);
  808. /* Log the error. */
  809. printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
  810. sbus->portid,
  811. (((error_bits & SYSIO_SBAFSR_PLE) ?
  812. "Late PIO Error" :
  813. ((error_bits & SYSIO_SBAFSR_PTO) ?
  814. "Time Out" :
  815. ((error_bits & SYSIO_SBAFSR_PBERR) ?
  816. "Error Ack" : "???")))),
  817. (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
  818. printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
  819. sbus->portid,
  820. (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
  821. (afsr & SYSIO_SBAFSR_MID) >> 37UL);
  822. printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
  823. printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
  824. reported = 0;
  825. if (afsr & SYSIO_SBAFSR_SLE) {
  826. reported++;
  827. printk("(Late PIO Error)");
  828. }
  829. if (afsr & SYSIO_SBAFSR_STO) {
  830. reported++;
  831. printk("(Time Out)");
  832. }
  833. if (afsr & SYSIO_SBAFSR_SBERR) {
  834. reported++;
  835. printk("(Error Ack)");
  836. }
  837. if (!reported)
  838. printk("(none)");
  839. printk("]\n");
  840. /* XXX check iommu/strbuf for further error status XXX */
  841. return IRQ_HANDLED;
  842. }
  843. #define ECC_CONTROL 0x0020UL
  844. #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
  845. #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
  846. #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
  847. #define SYSIO_UE_INO 0x34
  848. #define SYSIO_CE_INO 0x35
  849. #define SYSIO_SBUSERR_INO 0x36
  850. static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
  851. {
  852. struct sbus_info *info = sbus->iommu;
  853. struct iommu *iommu = &info->iommu;
  854. unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
  855. unsigned int irq;
  856. u64 control;
  857. irq = sbus_build_irq(sbus, SYSIO_UE_INO);
  858. if (request_irq(irq, sysio_ue_handler, 0,
  859. "SYSIO_UE", sbus) < 0) {
  860. prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
  861. sbus->portid);
  862. prom_halt();
  863. }
  864. irq = sbus_build_irq(sbus, SYSIO_CE_INO);
  865. if (request_irq(irq, sysio_ce_handler, 0,
  866. "SYSIO_CE", sbus) < 0) {
  867. prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
  868. sbus->portid);
  869. prom_halt();
  870. }
  871. irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
  872. if (request_irq(irq, sysio_sbus_error_handler, 0,
  873. "SYSIO_SBERR", sbus) < 0) {
  874. prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
  875. sbus->portid);
  876. prom_halt();
  877. }
  878. /* Now turn the error interrupts on and also enable ECC checking. */
  879. upa_writeq((SYSIO_ECNTRL_ECCEN |
  880. SYSIO_ECNTRL_UEEN |
  881. SYSIO_ECNTRL_CEEN),
  882. reg_base + ECC_CONTROL);
  883. control = upa_readq(iommu->write_complete_reg);
  884. control |= 0x100UL; /* SBUS Error Interrupt Enable */
  885. upa_writeq(control, iommu->write_complete_reg);
  886. }
  887. /* Boot time initialization. */
  888. static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
  889. {
  890. const struct linux_prom64_registers *pr;
  891. struct device_node *dp;
  892. struct sbus_info *info;
  893. struct iommu *iommu;
  894. struct strbuf *strbuf;
  895. unsigned long regs, reg_base;
  896. u64 control;
  897. int i;
  898. dp = of_find_node_by_phandle(__node);
  899. sbus->portid = of_getintprop_default(dp, "upa-portid", -1);
  900. pr = of_get_property(dp, "reg", NULL);
  901. if (!pr) {
  902. prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
  903. prom_halt();
  904. }
  905. regs = pr->phys_addr;
  906. info = kzalloc(sizeof(*info), GFP_ATOMIC);
  907. if (info == NULL) {
  908. prom_printf("sbus_iommu_init: Fatal error, "
  909. "kmalloc(info) failed\n");
  910. prom_halt();
  911. }
  912. iommu = &info->iommu;
  913. strbuf = &info->strbuf;
  914. reg_base = regs + SYSIO_IOMMUREG_BASE;
  915. iommu->iommu_control = reg_base + IOMMU_CONTROL;
  916. iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
  917. iommu->iommu_flush = reg_base + IOMMU_FLUSH;
  918. reg_base = regs + SYSIO_STRBUFREG_BASE;
  919. strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
  920. strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
  921. strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
  922. strbuf->strbuf_enabled = 1;
  923. strbuf->strbuf_flushflag = (volatile unsigned long *)
  924. ((((unsigned long)&strbuf->__flushflag_buf[0])
  925. + 63UL)
  926. & ~63UL);
  927. strbuf->strbuf_flushflag_pa = (unsigned long)
  928. __pa(strbuf->strbuf_flushflag);
  929. /* The SYSIO SBUS control register is used for dummy reads
  930. * in order to ensure write completion.
  931. */
  932. iommu->write_complete_reg = regs + 0x2000UL;
  933. /* Link into SYSIO software state. */
  934. sbus->iommu = info;
  935. printk("SYSIO: UPA portID %x, at %016lx\n",
  936. sbus->portid, regs);
  937. /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
  938. sbus_iommu_table_init(iommu, IO_TSB_SIZE);
  939. control = upa_readq(iommu->iommu_control);
  940. control = ((7UL << 16UL) |
  941. (0UL << 2UL) |
  942. (1UL << 1UL) |
  943. (1UL << 0UL));
  944. upa_writeq(control, iommu->iommu_control);
  945. /* Clean out any cruft in the IOMMU using
  946. * diagnostic accesses.
  947. */
  948. for (i = 0; i < 16; i++) {
  949. unsigned long dram, tag;
  950. dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
  951. tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
  952. dram += (unsigned long)i * 8UL;
  953. tag += (unsigned long)i * 8UL;
  954. upa_writeq(0, dram);
  955. upa_writeq(0, tag);
  956. }
  957. upa_readq(iommu->write_complete_reg);
  958. /* Give the TSB to SYSIO. */
  959. upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
  960. /* Setup streaming buffer, DE=1 SB_EN=1 */
  961. control = (1UL << 1UL) | (1UL << 0UL);
  962. upa_writeq(control, strbuf->strbuf_control);
  963. /* Clear out the tags using diagnostics. */
  964. for (i = 0; i < 16; i++) {
  965. unsigned long ptag, ltag;
  966. ptag = strbuf->strbuf_control +
  967. (STRBUF_PTAGDIAG - STRBUF_CONTROL);
  968. ltag = strbuf->strbuf_control +
  969. (STRBUF_LTAGDIAG - STRBUF_CONTROL);
  970. ptag += (unsigned long)i * 8UL;
  971. ltag += (unsigned long)i * 8UL;
  972. upa_writeq(0UL, ptag);
  973. upa_writeq(0UL, ltag);
  974. }
  975. /* Enable DVMA arbitration for all devices/slots. */
  976. control = upa_readq(iommu->write_complete_reg);
  977. control |= 0x3fUL;
  978. upa_writeq(control, iommu->write_complete_reg);
  979. /* Now some Xfire specific grot... */
  980. if (this_is_starfire)
  981. starfire_hookup(sbus->portid);
  982. sysio_register_error_handlers(sbus);
  983. }
  984. void sbus_fill_device_irq(struct sbus_dev *sdev)
  985. {
  986. struct device_node *dp = of_find_node_by_phandle(sdev->prom_node);
  987. const struct linux_prom_irqs *irqs;
  988. irqs = of_get_property(dp, "interrupts", NULL);
  989. if (!irqs) {
  990. sdev->irqs[0] = 0;
  991. sdev->num_irqs = 0;
  992. } else {
  993. unsigned int pri = irqs[0].pri;
  994. sdev->num_irqs = 1;
  995. if (pri < 0x20)
  996. pri += sdev->slot * 8;
  997. sdev->irqs[0] = sbus_build_irq(sdev->bus, pri);
  998. }
  999. }
  1000. void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus)
  1001. {
  1002. }
  1003. void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp)
  1004. {
  1005. sbus_iommu_init(dp->node, sbus);
  1006. }
  1007. void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
  1008. {
  1009. }
  1010. int __init sbus_arch_preinit(void)
  1011. {
  1012. return 0;
  1013. }
  1014. void __init sbus_arch_postinit(void)
  1015. {
  1016. extern void firetruck_init(void);
  1017. firetruck_init();
  1018. }