pci_sun4v.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. /* pci_sun4v.c: SUN4V specific PCI controller support.
  2. *
  3. * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/pci.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <asm/pbm.h>
  13. #include <asm/iommu.h>
  14. #include <asm/irq.h>
  15. #include <asm/upa.h>
  16. #include <asm/pstate.h>
  17. #include <asm/oplib.h>
  18. #include <asm/hypervisor.h>
  19. #include "pci_impl.h"
  20. #include "iommu_common.h"
  21. #include "pci_sun4v.h"
  22. #define PGLIST_NENTS 2048
  23. struct sun4v_pglist {
  24. u64 pglist[PGLIST_NENTS];
  25. };
  26. static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
  27. static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
  28. {
  29. unsigned long n, i, start, end, limit;
  30. int pass;
  31. limit = arena->limit;
  32. start = arena->hint;
  33. pass = 0;
  34. again:
  35. n = find_next_zero_bit(arena->map, limit, start);
  36. end = n + npages;
  37. if (unlikely(end >= limit)) {
  38. if (likely(pass < 1)) {
  39. limit = start;
  40. start = 0;
  41. pass++;
  42. goto again;
  43. } else {
  44. /* Scanned the whole thing, give up. */
  45. return -1;
  46. }
  47. }
  48. for (i = n; i < end; i++) {
  49. if (test_bit(i, arena->map)) {
  50. start = i + 1;
  51. goto again;
  52. }
  53. }
  54. for (i = n; i < end; i++)
  55. __set_bit(i, arena->map);
  56. arena->hint = end;
  57. return n;
  58. }
  59. static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
  60. {
  61. unsigned long i;
  62. for (i = base; i < (base + npages); i++)
  63. __clear_bit(i, arena->map);
  64. }
  65. static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
  66. {
  67. struct pcidev_cookie *pcp;
  68. struct pci_iommu *iommu;
  69. unsigned long devhandle, flags, order, first_page, npages, n;
  70. void *ret;
  71. long entry;
  72. u64 *pglist;
  73. int cpu;
  74. size = IO_PAGE_ALIGN(size);
  75. order = get_order(size);
  76. if (order >= MAX_ORDER)
  77. return NULL;
  78. npages = size >> IO_PAGE_SHIFT;
  79. if (npages > PGLIST_NENTS)
  80. return NULL;
  81. first_page = __get_free_pages(GFP_ATOMIC, order);
  82. if (first_page == 0UL)
  83. return NULL;
  84. memset((char *)first_page, 0, PAGE_SIZE << order);
  85. pcp = pdev->sysdata;
  86. devhandle = pcp->pbm->devhandle;
  87. iommu = pcp->pbm->iommu;
  88. spin_lock_irqsave(&iommu->lock, flags);
  89. entry = pci_arena_alloc(&iommu->arena, npages);
  90. spin_unlock_irqrestore(&iommu->lock, flags);
  91. if (unlikely(entry < 0L)) {
  92. free_pages(first_page, order);
  93. return NULL;
  94. }
  95. *dma_addrp = (iommu->page_table_map_base +
  96. (entry << IO_PAGE_SHIFT));
  97. ret = (void *) first_page;
  98. first_page = __pa(first_page);
  99. cpu = get_cpu();
  100. pglist = &__get_cpu_var(iommu_pglists).pglist[0];
  101. for (n = 0; n < npages; n++)
  102. pglist[n] = first_page + (n * PAGE_SIZE);
  103. do {
  104. unsigned long num;
  105. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  106. npages,
  107. (HV_PCI_MAP_ATTR_READ |
  108. HV_PCI_MAP_ATTR_WRITE),
  109. __pa(pglist));
  110. entry += num;
  111. npages -= num;
  112. pglist += num;
  113. } while (npages != 0);
  114. put_cpu();
  115. return ret;
  116. }
  117. static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
  118. {
  119. struct pcidev_cookie *pcp;
  120. struct pci_iommu *iommu;
  121. unsigned long flags, order, npages, entry, devhandle;
  122. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  123. pcp = pdev->sysdata;
  124. iommu = pcp->pbm->iommu;
  125. devhandle = pcp->pbm->devhandle;
  126. entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  127. spin_lock_irqsave(&iommu->lock, flags);
  128. pci_arena_free(&iommu->arena, entry, npages);
  129. do {
  130. unsigned long num;
  131. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  132. npages);
  133. entry += num;
  134. npages -= num;
  135. } while (npages != 0);
  136. spin_unlock_irqrestore(&iommu->lock, flags);
  137. order = get_order(size);
  138. if (order < 10)
  139. free_pages((unsigned long)cpu, order);
  140. }
  141. static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
  142. {
  143. struct pcidev_cookie *pcp;
  144. struct pci_iommu *iommu;
  145. unsigned long flags, npages, oaddr;
  146. unsigned long i, base_paddr, devhandle;
  147. u32 bus_addr, ret;
  148. unsigned long prot;
  149. long entry;
  150. u64 *pglist;
  151. int cpu;
  152. pcp = pdev->sysdata;
  153. iommu = pcp->pbm->iommu;
  154. devhandle = pcp->pbm->devhandle;
  155. if (unlikely(direction == PCI_DMA_NONE))
  156. goto bad;
  157. oaddr = (unsigned long)ptr;
  158. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  159. npages >>= IO_PAGE_SHIFT;
  160. if (unlikely(npages > PGLIST_NENTS))
  161. goto bad;
  162. spin_lock_irqsave(&iommu->lock, flags);
  163. entry = pci_arena_alloc(&iommu->arena, npages);
  164. spin_unlock_irqrestore(&iommu->lock, flags);
  165. if (unlikely(entry < 0L))
  166. goto bad;
  167. bus_addr = (iommu->page_table_map_base +
  168. (entry << IO_PAGE_SHIFT));
  169. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  170. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  171. prot = HV_PCI_MAP_ATTR_READ;
  172. if (direction != PCI_DMA_TODEVICE)
  173. prot |= HV_PCI_MAP_ATTR_WRITE;
  174. cpu = get_cpu();
  175. pglist = &__get_cpu_var(iommu_pglists).pglist[0];
  176. for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
  177. pglist[i] = base_paddr;
  178. do {
  179. unsigned long num;
  180. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  181. npages, prot,
  182. __pa(pglist));
  183. entry += num;
  184. npages -= num;
  185. pglist += num;
  186. } while (npages != 0);
  187. put_cpu();
  188. return ret;
  189. bad:
  190. if (printk_ratelimit())
  191. WARN_ON(1);
  192. return PCI_DMA_ERROR_CODE;
  193. }
  194. static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  195. {
  196. struct pcidev_cookie *pcp;
  197. struct pci_iommu *iommu;
  198. unsigned long flags, npages, devhandle;
  199. long entry;
  200. if (unlikely(direction == PCI_DMA_NONE)) {
  201. if (printk_ratelimit())
  202. WARN_ON(1);
  203. return;
  204. }
  205. pcp = pdev->sysdata;
  206. iommu = pcp->pbm->iommu;
  207. devhandle = pcp->pbm->devhandle;
  208. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  209. npages >>= IO_PAGE_SHIFT;
  210. bus_addr &= IO_PAGE_MASK;
  211. spin_lock_irqsave(&iommu->lock, flags);
  212. entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
  213. pci_arena_free(&iommu->arena, entry, npages);
  214. do {
  215. unsigned long num;
  216. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  217. npages);
  218. entry += num;
  219. npages -= num;
  220. } while (npages != 0);
  221. spin_unlock_irqrestore(&iommu->lock, flags);
  222. }
  223. #define SG_ENT_PHYS_ADDRESS(SG) \
  224. (__pa(page_address((SG)->page)) + (SG)->offset)
  225. static inline void fill_sg(long entry, unsigned long devhandle,
  226. struct scatterlist *sg,
  227. int nused, int nelems, unsigned long prot)
  228. {
  229. struct scatterlist *dma_sg = sg;
  230. struct scatterlist *sg_end = sg + nelems;
  231. int i, cpu, pglist_ent;
  232. u64 *pglist;
  233. cpu = get_cpu();
  234. pglist = &__get_cpu_var(iommu_pglists).pglist[0];
  235. pglist_ent = 0;
  236. for (i = 0; i < nused; i++) {
  237. unsigned long pteval = ~0UL;
  238. u32 dma_npages;
  239. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  240. dma_sg->dma_length +
  241. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  242. do {
  243. unsigned long offset;
  244. signed int len;
  245. /* If we are here, we know we have at least one
  246. * more page to map. So walk forward until we
  247. * hit a page crossing, and begin creating new
  248. * mappings from that spot.
  249. */
  250. for (;;) {
  251. unsigned long tmp;
  252. tmp = SG_ENT_PHYS_ADDRESS(sg);
  253. len = sg->length;
  254. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  255. pteval = tmp & IO_PAGE_MASK;
  256. offset = tmp & (IO_PAGE_SIZE - 1UL);
  257. break;
  258. }
  259. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  260. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  261. offset = 0UL;
  262. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  263. break;
  264. }
  265. sg++;
  266. }
  267. pteval = (pteval & IOPTE_PAGE);
  268. while (len > 0) {
  269. pglist[pglist_ent++] = pteval;
  270. pteval += IO_PAGE_SIZE;
  271. len -= (IO_PAGE_SIZE - offset);
  272. offset = 0;
  273. dma_npages--;
  274. }
  275. pteval = (pteval & IOPTE_PAGE) + len;
  276. sg++;
  277. /* Skip over any tail mappings we've fully mapped,
  278. * adjusting pteval along the way. Stop when we
  279. * detect a page crossing event.
  280. */
  281. while (sg < sg_end &&
  282. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  283. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  284. ((pteval ^
  285. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  286. pteval += sg->length;
  287. sg++;
  288. }
  289. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  290. pteval = ~0UL;
  291. } while (dma_npages != 0);
  292. dma_sg++;
  293. }
  294. BUG_ON(pglist_ent == 0);
  295. do {
  296. unsigned long num;
  297. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  298. pglist_ent);
  299. entry += num;
  300. pglist_ent -= num;
  301. } while (pglist_ent != 0);
  302. put_cpu();
  303. }
  304. static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  305. {
  306. struct pcidev_cookie *pcp;
  307. struct pci_iommu *iommu;
  308. unsigned long flags, npages, prot, devhandle;
  309. u32 dma_base;
  310. struct scatterlist *sgtmp;
  311. long entry;
  312. int used;
  313. /* Fast path single entry scatterlists. */
  314. if (nelems == 1) {
  315. sglist->dma_address =
  316. pci_4v_map_single(pdev,
  317. (page_address(sglist->page) + sglist->offset),
  318. sglist->length, direction);
  319. if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
  320. return 0;
  321. sglist->dma_length = sglist->length;
  322. return 1;
  323. }
  324. pcp = pdev->sysdata;
  325. iommu = pcp->pbm->iommu;
  326. devhandle = pcp->pbm->devhandle;
  327. if (unlikely(direction == PCI_DMA_NONE))
  328. goto bad;
  329. /* Step 1: Prepare scatter list. */
  330. npages = prepare_sg(sglist, nelems);
  331. if (unlikely(npages > PGLIST_NENTS))
  332. goto bad;
  333. /* Step 2: Allocate a cluster and context, if necessary. */
  334. spin_lock_irqsave(&iommu->lock, flags);
  335. entry = pci_arena_alloc(&iommu->arena, npages);
  336. spin_unlock_irqrestore(&iommu->lock, flags);
  337. if (unlikely(entry < 0L))
  338. goto bad;
  339. dma_base = iommu->page_table_map_base +
  340. (entry << IO_PAGE_SHIFT);
  341. /* Step 3: Normalize DMA addresses. */
  342. used = nelems;
  343. sgtmp = sglist;
  344. while (used && sgtmp->dma_length) {
  345. sgtmp->dma_address += dma_base;
  346. sgtmp++;
  347. used--;
  348. }
  349. used = nelems - used;
  350. /* Step 4: Create the mappings. */
  351. prot = HV_PCI_MAP_ATTR_READ;
  352. if (direction != PCI_DMA_TODEVICE)
  353. prot |= HV_PCI_MAP_ATTR_WRITE;
  354. fill_sg(entry, devhandle, sglist, used, nelems, prot);
  355. return used;
  356. bad:
  357. if (printk_ratelimit())
  358. WARN_ON(1);
  359. return 0;
  360. }
  361. static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  362. {
  363. struct pcidev_cookie *pcp;
  364. struct pci_iommu *iommu;
  365. unsigned long flags, i, npages, devhandle;
  366. long entry;
  367. u32 bus_addr;
  368. if (unlikely(direction == PCI_DMA_NONE)) {
  369. if (printk_ratelimit())
  370. WARN_ON(1);
  371. }
  372. pcp = pdev->sysdata;
  373. iommu = pcp->pbm->iommu;
  374. devhandle = pcp->pbm->devhandle;
  375. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  376. for (i = 1; i < nelems; i++)
  377. if (sglist[i].dma_length == 0)
  378. break;
  379. i--;
  380. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
  381. bus_addr) >> IO_PAGE_SHIFT;
  382. entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  383. spin_lock_irqsave(&iommu->lock, flags);
  384. pci_arena_free(&iommu->arena, entry, npages);
  385. do {
  386. unsigned long num;
  387. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  388. npages);
  389. entry += num;
  390. npages -= num;
  391. } while (npages != 0);
  392. spin_unlock_irqrestore(&iommu->lock, flags);
  393. }
  394. static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  395. {
  396. /* Nothing to do... */
  397. }
  398. static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  399. {
  400. /* Nothing to do... */
  401. }
  402. struct pci_iommu_ops pci_sun4v_iommu_ops = {
  403. .alloc_consistent = pci_4v_alloc_consistent,
  404. .free_consistent = pci_4v_free_consistent,
  405. .map_single = pci_4v_map_single,
  406. .unmap_single = pci_4v_unmap_single,
  407. .map_sg = pci_4v_map_sg,
  408. .unmap_sg = pci_4v_unmap_sg,
  409. .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
  410. .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
  411. };
  412. /* SUN4V PCI configuration space accessors. */
  413. static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus)
  414. {
  415. if (bus < pbm->pci_first_busno ||
  416. bus > pbm->pci_last_busno)
  417. return 1;
  418. return 0;
  419. }
  420. static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  421. int where, int size, u32 *value)
  422. {
  423. struct pci_pbm_info *pbm = bus_dev->sysdata;
  424. u32 devhandle = pbm->devhandle;
  425. unsigned int bus = bus_dev->number;
  426. unsigned int device = PCI_SLOT(devfn);
  427. unsigned int func = PCI_FUNC(devfn);
  428. unsigned long ret;
  429. if (pci_sun4v_out_of_range(pbm, bus)) {
  430. ret = ~0UL;
  431. } else {
  432. ret = pci_sun4v_config_get(devhandle,
  433. HV_PCI_DEVICE_BUILD(bus, device, func),
  434. where, size);
  435. #if 0
  436. printk("read_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] "
  437. "== [%016lx]\n",
  438. devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
  439. where, size, ret);
  440. #endif
  441. }
  442. switch (size) {
  443. case 1:
  444. *value = ret & 0xff;
  445. break;
  446. case 2:
  447. *value = ret & 0xffff;
  448. break;
  449. case 4:
  450. *value = ret & 0xffffffff;
  451. break;
  452. };
  453. return PCIBIOS_SUCCESSFUL;
  454. }
  455. static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  456. int where, int size, u32 value)
  457. {
  458. struct pci_pbm_info *pbm = bus_dev->sysdata;
  459. u32 devhandle = pbm->devhandle;
  460. unsigned int bus = bus_dev->number;
  461. unsigned int device = PCI_SLOT(devfn);
  462. unsigned int func = PCI_FUNC(devfn);
  463. unsigned long ret;
  464. if (pci_sun4v_out_of_range(pbm, bus)) {
  465. /* Do nothing. */
  466. } else {
  467. ret = pci_sun4v_config_put(devhandle,
  468. HV_PCI_DEVICE_BUILD(bus, device, func),
  469. where, size, value);
  470. #if 0
  471. printk("write_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] "
  472. "val[%08x] == [%016lx]\n",
  473. devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
  474. where, size, value, ret);
  475. #endif
  476. }
  477. return PCIBIOS_SUCCESSFUL;
  478. }
  479. static struct pci_ops pci_sun4v_ops = {
  480. .read = pci_sun4v_read_pci_cfg,
  481. .write = pci_sun4v_write_pci_cfg,
  482. };
  483. static void pbm_scan_bus(struct pci_controller_info *p,
  484. struct pci_pbm_info *pbm)
  485. {
  486. struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
  487. if (!cookie) {
  488. prom_printf("%s: Critical allocation failure.\n", pbm->name);
  489. prom_halt();
  490. }
  491. /* All we care about is the PBM. */
  492. memset(cookie, 0, sizeof(*cookie));
  493. cookie->pbm = pbm;
  494. pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
  495. p->pci_ops,
  496. pbm);
  497. #if 0
  498. pci_fixup_host_bridge_self(pbm->pci_bus);
  499. pbm->pci_bus->self->sysdata = cookie;
  500. #endif
  501. pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
  502. prom_getchild(pbm->prom_node));
  503. pci_record_assignments(pbm, pbm->pci_bus);
  504. pci_assign_unassigned(pbm, pbm->pci_bus);
  505. pci_fixup_irq(pbm, pbm->pci_bus);
  506. pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
  507. pci_setup_busmastering(pbm, pbm->pci_bus);
  508. }
  509. static void pci_sun4v_scan_bus(struct pci_controller_info *p)
  510. {
  511. if (p->pbm_A.prom_node) {
  512. p->pbm_A.is_66mhz_capable =
  513. prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
  514. pbm_scan_bus(p, &p->pbm_A);
  515. }
  516. if (p->pbm_B.prom_node) {
  517. p->pbm_B.is_66mhz_capable =
  518. prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
  519. pbm_scan_bus(p, &p->pbm_B);
  520. }
  521. /* XXX register error interrupt handlers XXX */
  522. }
  523. static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
  524. struct pci_dev *pdev,
  525. unsigned int devino)
  526. {
  527. u32 devhandle = pbm->devhandle;
  528. int pil;
  529. pil = 4;
  530. if (pdev) {
  531. switch ((pdev->class >> 16) & 0xff) {
  532. case PCI_BASE_CLASS_STORAGE:
  533. pil = 4;
  534. break;
  535. case PCI_BASE_CLASS_NETWORK:
  536. pil = 6;
  537. break;
  538. case PCI_BASE_CLASS_DISPLAY:
  539. pil = 9;
  540. break;
  541. case PCI_BASE_CLASS_MULTIMEDIA:
  542. case PCI_BASE_CLASS_MEMORY:
  543. case PCI_BASE_CLASS_BRIDGE:
  544. case PCI_BASE_CLASS_SERIAL:
  545. pil = 10;
  546. break;
  547. default:
  548. pil = 4;
  549. break;
  550. };
  551. }
  552. BUG_ON(PIL_RESERVED(pil));
  553. return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
  554. }
  555. static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
  556. {
  557. struct pcidev_cookie *pcp = pdev->sysdata;
  558. struct pci_pbm_info *pbm = pcp->pbm;
  559. struct resource *res, *root;
  560. u32 reg;
  561. int where, size, is_64bit;
  562. res = &pdev->resource[resource];
  563. if (resource < 6) {
  564. where = PCI_BASE_ADDRESS_0 + (resource * 4);
  565. } else if (resource == PCI_ROM_RESOURCE) {
  566. where = pdev->rom_base_reg;
  567. } else {
  568. /* Somebody might have asked allocation of a non-standard resource */
  569. return;
  570. }
  571. /* XXX 64-bit MEM handling is not %100 correct... XXX */
  572. is_64bit = 0;
  573. if (res->flags & IORESOURCE_IO)
  574. root = &pbm->io_space;
  575. else {
  576. root = &pbm->mem_space;
  577. if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
  578. == PCI_BASE_ADDRESS_MEM_TYPE_64)
  579. is_64bit = 1;
  580. }
  581. size = res->end - res->start;
  582. pci_read_config_dword(pdev, where, &reg);
  583. reg = ((reg & size) |
  584. (((u32)(res->start - root->start)) & ~size));
  585. if (resource == PCI_ROM_RESOURCE) {
  586. reg |= PCI_ROM_ADDRESS_ENABLE;
  587. res->flags |= IORESOURCE_ROM_ENABLE;
  588. }
  589. pci_write_config_dword(pdev, where, reg);
  590. /* This knows that the upper 32-bits of the address
  591. * must be zero. Our PCI common layer enforces this.
  592. */
  593. if (is_64bit)
  594. pci_write_config_dword(pdev, where + 4, 0);
  595. }
  596. static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
  597. struct resource *res,
  598. struct resource *root)
  599. {
  600. res->start += root->start;
  601. res->end += root->start;
  602. }
  603. /* Use ranges property to determine where PCI MEM, I/O, and Config
  604. * space are for this PCI bus module.
  605. */
  606. static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
  607. {
  608. int i, saw_mem, saw_io;
  609. saw_mem = saw_io = 0;
  610. for (i = 0; i < pbm->num_pbm_ranges; i++) {
  611. struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
  612. unsigned long a;
  613. int type;
  614. type = (pr->child_phys_hi >> 24) & 0x3;
  615. a = (((unsigned long)pr->parent_phys_hi << 32UL) |
  616. ((unsigned long)pr->parent_phys_lo << 0UL));
  617. switch (type) {
  618. case 1:
  619. /* 16-bit IO space, 16MB */
  620. pbm->io_space.start = a;
  621. pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
  622. pbm->io_space.flags = IORESOURCE_IO;
  623. saw_io = 1;
  624. break;
  625. case 2:
  626. /* 32-bit MEM space, 2GB */
  627. pbm->mem_space.start = a;
  628. pbm->mem_space.end = a + (0x80000000UL - 1UL);
  629. pbm->mem_space.flags = IORESOURCE_MEM;
  630. saw_mem = 1;
  631. break;
  632. case 3:
  633. /* XXX 64-bit MEM handling XXX */
  634. default:
  635. break;
  636. };
  637. }
  638. if (!saw_io || !saw_mem) {
  639. prom_printf("%s: Fatal error, missing %s PBM range.\n",
  640. pbm->name,
  641. (!saw_io ? "IO" : "MEM"));
  642. prom_halt();
  643. }
  644. printk("%s: PCI IO[%lx] MEM[%lx]\n",
  645. pbm->name,
  646. pbm->io_space.start,
  647. pbm->mem_space.start);
  648. }
  649. static void pbm_register_toplevel_resources(struct pci_controller_info *p,
  650. struct pci_pbm_info *pbm)
  651. {
  652. pbm->io_space.name = pbm->mem_space.name = pbm->name;
  653. request_resource(&ioport_resource, &pbm->io_space);
  654. request_resource(&iomem_resource, &pbm->mem_space);
  655. pci_register_legacy_regions(&pbm->io_space,
  656. &pbm->mem_space);
  657. }
  658. static void probe_existing_entries(struct pci_pbm_info *pbm,
  659. struct pci_iommu *iommu)
  660. {
  661. struct pci_iommu_arena *arena = &iommu->arena;
  662. unsigned long i, devhandle;
  663. devhandle = pbm->devhandle;
  664. for (i = 0; i < arena->limit; i++) {
  665. unsigned long ret, io_attrs, ra;
  666. ret = pci_sun4v_iommu_getmap(devhandle,
  667. HV_PCI_TSBID(0, i),
  668. &io_attrs, &ra);
  669. if (ret == HV_EOK)
  670. __set_bit(i, arena->map);
  671. }
  672. }
  673. static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
  674. {
  675. struct pci_iommu *iommu = pbm->iommu;
  676. unsigned long num_tsb_entries, sz;
  677. u32 vdma[2], dma_mask, dma_offset;
  678. int err, tsbsize;
  679. err = prom_getproperty(pbm->prom_node, "virtual-dma",
  680. (char *)&vdma[0], sizeof(vdma));
  681. if (err == 0 || err == -1) {
  682. /* No property, use default values. */
  683. vdma[0] = 0x80000000;
  684. vdma[1] = 0x80000000;
  685. }
  686. dma_mask = vdma[0];
  687. switch (vdma[1]) {
  688. case 0x20000000:
  689. dma_mask |= 0x1fffffff;
  690. tsbsize = 64;
  691. break;
  692. case 0x40000000:
  693. dma_mask |= 0x3fffffff;
  694. tsbsize = 128;
  695. break;
  696. case 0x80000000:
  697. dma_mask |= 0x7fffffff;
  698. tsbsize = 128;
  699. break;
  700. default:
  701. prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
  702. prom_halt();
  703. };
  704. num_tsb_entries = tsbsize / sizeof(iopte_t);
  705. dma_offset = vdma[0];
  706. /* Setup initial software IOMMU state. */
  707. spin_lock_init(&iommu->lock);
  708. iommu->ctx_lowest_free = 1;
  709. iommu->page_table_map_base = dma_offset;
  710. iommu->dma_addr_mask = dma_mask;
  711. /* Allocate and initialize the free area map. */
  712. sz = num_tsb_entries / 8;
  713. sz = (sz + 7UL) & ~7UL;
  714. iommu->arena.map = kmalloc(sz, GFP_KERNEL);
  715. if (!iommu->arena.map) {
  716. prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
  717. prom_halt();
  718. }
  719. memset(iommu->arena.map, 0, sz);
  720. iommu->arena.limit = num_tsb_entries;
  721. probe_existing_entries(pbm, iommu);
  722. }
  723. /* Don't get this from the root nexus, get it from the "pci@0" node below. */
  724. static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
  725. {
  726. unsigned int busrange[2];
  727. int prom_node = pbm->prom_node;
  728. int err;
  729. prom_node = prom_getchild(prom_node);
  730. if (prom_node == 0) {
  731. prom_printf("%s: Fatal error, no child OBP node.\n", pbm->name);
  732. prom_halt();
  733. }
  734. err = prom_getproperty(prom_node, "bus-range",
  735. (char *)&busrange[0],
  736. sizeof(busrange));
  737. if (err == 0 || err == -1) {
  738. prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
  739. prom_halt();
  740. }
  741. pbm->pci_first_busno = busrange[0];
  742. pbm->pci_last_busno = busrange[1];
  743. }
  744. static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle)
  745. {
  746. struct pci_pbm_info *pbm;
  747. int err, i;
  748. if (devhandle & 0x40)
  749. pbm = &p->pbm_B;
  750. else
  751. pbm = &p->pbm_A;
  752. pbm->parent = p;
  753. pbm->prom_node = prom_node;
  754. pbm->pci_first_slot = 1;
  755. pbm->devhandle = devhandle;
  756. sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
  757. p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
  758. printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
  759. prom_getstring(prom_node, "name",
  760. pbm->prom_name, sizeof(pbm->prom_name));
  761. err = prom_getproperty(prom_node, "ranges",
  762. (char *) pbm->pbm_ranges,
  763. sizeof(pbm->pbm_ranges));
  764. if (err == 0 || err == -1) {
  765. prom_printf("%s: Fatal error, no ranges property.\n",
  766. pbm->name);
  767. prom_halt();
  768. }
  769. pbm->num_pbm_ranges =
  770. (err / sizeof(struct linux_prom_pci_ranges));
  771. /* Mask out the top 8 bits of the ranges, leaving the real
  772. * physical address.
  773. */
  774. for (i = 0; i < pbm->num_pbm_ranges; i++)
  775. pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
  776. pci_sun4v_determine_mem_io_space(pbm);
  777. pbm_register_toplevel_resources(p, pbm);
  778. err = prom_getproperty(prom_node, "interrupt-map",
  779. (char *)pbm->pbm_intmap,
  780. sizeof(pbm->pbm_intmap));
  781. if (err != -1) {
  782. pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
  783. err = prom_getproperty(prom_node, "interrupt-map-mask",
  784. (char *)&pbm->pbm_intmask,
  785. sizeof(pbm->pbm_intmask));
  786. if (err == -1) {
  787. prom_printf("%s: Fatal error, no "
  788. "interrupt-map-mask.\n", pbm->name);
  789. prom_halt();
  790. }
  791. } else {
  792. pbm->num_pbm_intmap = 0;
  793. memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
  794. }
  795. pci_sun4v_get_bus_range(pbm);
  796. pci_sun4v_iommu_init(pbm);
  797. }
  798. void sun4v_pci_init(int node, char *model_name)
  799. {
  800. struct pci_controller_info *p;
  801. struct pci_iommu *iommu;
  802. struct linux_prom64_registers regs;
  803. unsigned int devhandle;
  804. prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
  805. devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;;
  806. for (p = pci_controller_root; p; p = p->next) {
  807. struct pci_pbm_info *pbm;
  808. if (p->pbm_A.prom_node && p->pbm_B.prom_node)
  809. continue;
  810. pbm = (p->pbm_A.prom_node ?
  811. &p->pbm_A :
  812. &p->pbm_B);
  813. if (pbm->devhandle == (devhandle ^ 0x40)) {
  814. pci_sun4v_pbm_init(p, node, devhandle);
  815. return;
  816. }
  817. }
  818. p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
  819. if (!p) {
  820. prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
  821. prom_halt();
  822. }
  823. memset(p, 0, sizeof(*p));
  824. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  825. if (!iommu) {
  826. prom_printf("SCHIZO: Fatal memory allocation error.\n");
  827. prom_halt();
  828. }
  829. memset(iommu, 0, sizeof(*iommu));
  830. p->pbm_A.iommu = iommu;
  831. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  832. if (!iommu) {
  833. prom_printf("SCHIZO: Fatal memory allocation error.\n");
  834. prom_halt();
  835. }
  836. memset(iommu, 0, sizeof(*iommu));
  837. p->pbm_B.iommu = iommu;
  838. p->next = pci_controller_root;
  839. pci_controller_root = p;
  840. p->index = pci_num_controllers++;
  841. p->pbms_same_domain = 0;
  842. p->scan_bus = pci_sun4v_scan_bus;
  843. p->irq_build = pci_sun4v_irq_build;
  844. p->base_address_update = pci_sun4v_base_address_update;
  845. p->resource_adjust = pci_sun4v_resource_adjust;
  846. p->pci_ops = &pci_sun4v_ops;
  847. /* Like PSYCHO and SCHIZO we have a 2GB aligned area
  848. * for memory space.
  849. */
  850. pci_memspace_mask = 0x7fffffffUL;
  851. pci_sun4v_pbm_init(p, node, devhandle);
  852. }