pci_sun4v.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. /* pci_sun4v.c: SUN4V specific PCI controller support.
  2. *
  3. * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/pci.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <asm/pbm.h>
  13. #include <asm/iommu.h>
  14. #include <asm/irq.h>
  15. #include <asm/upa.h>
  16. #include <asm/pstate.h>
  17. #include <asm/oplib.h>
  18. #include <asm/hypervisor.h>
  19. #include "pci_impl.h"
  20. #include "iommu_common.h"
  21. #include "pci_sun4v.h"
  22. #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
  23. struct sun4v_pglist {
  24. u64 *pglist;
  25. };
  26. static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
  27. static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
  28. {
  29. unsigned long n, i, start, end, limit;
  30. int pass;
  31. limit = arena->limit;
  32. start = arena->hint;
  33. pass = 0;
  34. again:
  35. n = find_next_zero_bit(arena->map, limit, start);
  36. end = n + npages;
  37. if (unlikely(end >= limit)) {
  38. if (likely(pass < 1)) {
  39. limit = start;
  40. start = 0;
  41. pass++;
  42. goto again;
  43. } else {
  44. /* Scanned the whole thing, give up. */
  45. return -1;
  46. }
  47. }
  48. for (i = n; i < end; i++) {
  49. if (test_bit(i, arena->map)) {
  50. start = i + 1;
  51. goto again;
  52. }
  53. }
  54. for (i = n; i < end; i++)
  55. __set_bit(i, arena->map);
  56. arena->hint = end;
  57. return n;
  58. }
  59. static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
  60. {
  61. unsigned long i;
  62. for (i = base; i < (base + npages); i++)
  63. __clear_bit(i, arena->map);
  64. }
  65. static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
  66. {
  67. struct pcidev_cookie *pcp;
  68. struct pci_iommu *iommu;
  69. unsigned long flags, order, first_page, npages, n;
  70. void *ret;
  71. long entry;
  72. u64 *pglist;
  73. u32 devhandle;
  74. int cpu;
  75. size = IO_PAGE_ALIGN(size);
  76. order = get_order(size);
  77. if (order >= MAX_ORDER)
  78. return NULL;
  79. npages = size >> IO_PAGE_SHIFT;
  80. if (npages > PGLIST_NENTS)
  81. return NULL;
  82. first_page = __get_free_pages(GFP_ATOMIC, order);
  83. if (first_page == 0UL)
  84. return NULL;
  85. memset((char *)first_page, 0, PAGE_SIZE << order);
  86. pcp = pdev->sysdata;
  87. devhandle = pcp->pbm->devhandle;
  88. iommu = pcp->pbm->iommu;
  89. spin_lock_irqsave(&iommu->lock, flags);
  90. entry = pci_arena_alloc(&iommu->arena, npages);
  91. spin_unlock_irqrestore(&iommu->lock, flags);
  92. if (unlikely(entry < 0L)) {
  93. free_pages(first_page, order);
  94. return NULL;
  95. }
  96. *dma_addrp = (iommu->page_table_map_base +
  97. (entry << IO_PAGE_SHIFT));
  98. ret = (void *) first_page;
  99. first_page = __pa(first_page);
  100. cpu = get_cpu();
  101. pglist = __get_cpu_var(iommu_pglists).pglist;
  102. for (n = 0; n < npages; n++)
  103. pglist[n] = first_page + (n * PAGE_SIZE);
  104. do {
  105. unsigned long num;
  106. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  107. npages,
  108. (HV_PCI_MAP_ATTR_READ |
  109. HV_PCI_MAP_ATTR_WRITE),
  110. __pa(pglist));
  111. entry += num;
  112. npages -= num;
  113. pglist += num;
  114. } while (npages != 0);
  115. put_cpu();
  116. return ret;
  117. }
  118. static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
  119. {
  120. struct pcidev_cookie *pcp;
  121. struct pci_iommu *iommu;
  122. unsigned long flags, order, npages, entry;
  123. u32 devhandle;
  124. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  125. pcp = pdev->sysdata;
  126. iommu = pcp->pbm->iommu;
  127. devhandle = pcp->pbm->devhandle;
  128. entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  129. spin_lock_irqsave(&iommu->lock, flags);
  130. pci_arena_free(&iommu->arena, entry, npages);
  131. do {
  132. unsigned long num;
  133. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  134. npages);
  135. entry += num;
  136. npages -= num;
  137. } while (npages != 0);
  138. spin_unlock_irqrestore(&iommu->lock, flags);
  139. order = get_order(size);
  140. if (order < 10)
  141. free_pages((unsigned long)cpu, order);
  142. }
  143. static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
  144. {
  145. struct pcidev_cookie *pcp;
  146. struct pci_iommu *iommu;
  147. unsigned long flags, npages, oaddr;
  148. unsigned long i, base_paddr;
  149. u32 devhandle, bus_addr, ret;
  150. unsigned long prot;
  151. long entry;
  152. u64 *pglist;
  153. int cpu;
  154. pcp = pdev->sysdata;
  155. iommu = pcp->pbm->iommu;
  156. devhandle = pcp->pbm->devhandle;
  157. if (unlikely(direction == PCI_DMA_NONE))
  158. goto bad;
  159. oaddr = (unsigned long)ptr;
  160. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  161. npages >>= IO_PAGE_SHIFT;
  162. if (unlikely(npages > PGLIST_NENTS))
  163. goto bad;
  164. spin_lock_irqsave(&iommu->lock, flags);
  165. entry = pci_arena_alloc(&iommu->arena, npages);
  166. spin_unlock_irqrestore(&iommu->lock, flags);
  167. if (unlikely(entry < 0L))
  168. goto bad;
  169. bus_addr = (iommu->page_table_map_base +
  170. (entry << IO_PAGE_SHIFT));
  171. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  172. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  173. prot = HV_PCI_MAP_ATTR_READ;
  174. if (direction != PCI_DMA_TODEVICE)
  175. prot |= HV_PCI_MAP_ATTR_WRITE;
  176. cpu = get_cpu();
  177. pglist = __get_cpu_var(iommu_pglists).pglist;
  178. for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
  179. pglist[i] = base_paddr;
  180. do {
  181. unsigned long num;
  182. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  183. npages, prot,
  184. __pa(pglist));
  185. entry += num;
  186. npages -= num;
  187. pglist += num;
  188. } while (npages != 0);
  189. put_cpu();
  190. return ret;
  191. bad:
  192. if (printk_ratelimit())
  193. WARN_ON(1);
  194. return PCI_DMA_ERROR_CODE;
  195. }
  196. static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  197. {
  198. struct pcidev_cookie *pcp;
  199. struct pci_iommu *iommu;
  200. unsigned long flags, npages;
  201. long entry;
  202. u32 devhandle;
  203. if (unlikely(direction == PCI_DMA_NONE)) {
  204. if (printk_ratelimit())
  205. WARN_ON(1);
  206. return;
  207. }
  208. pcp = pdev->sysdata;
  209. iommu = pcp->pbm->iommu;
  210. devhandle = pcp->pbm->devhandle;
  211. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  212. npages >>= IO_PAGE_SHIFT;
  213. bus_addr &= IO_PAGE_MASK;
  214. spin_lock_irqsave(&iommu->lock, flags);
  215. entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
  216. pci_arena_free(&iommu->arena, entry, npages);
  217. do {
  218. unsigned long num;
  219. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  220. npages);
  221. entry += num;
  222. npages -= num;
  223. } while (npages != 0);
  224. spin_unlock_irqrestore(&iommu->lock, flags);
  225. }
  226. #define SG_ENT_PHYS_ADDRESS(SG) \
  227. (__pa(page_address((SG)->page)) + (SG)->offset)
  228. static inline void fill_sg(long entry, u32 devhandle,
  229. struct scatterlist *sg,
  230. int nused, int nelems, unsigned long prot)
  231. {
  232. struct scatterlist *dma_sg = sg;
  233. struct scatterlist *sg_end = sg + nelems;
  234. int i, cpu, pglist_ent;
  235. u64 *pglist;
  236. cpu = get_cpu();
  237. pglist = __get_cpu_var(iommu_pglists).pglist;
  238. pglist_ent = 0;
  239. for (i = 0; i < nused; i++) {
  240. unsigned long pteval = ~0UL;
  241. u32 dma_npages;
  242. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  243. dma_sg->dma_length +
  244. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  245. do {
  246. unsigned long offset;
  247. signed int len;
  248. /* If we are here, we know we have at least one
  249. * more page to map. So walk forward until we
  250. * hit a page crossing, and begin creating new
  251. * mappings from that spot.
  252. */
  253. for (;;) {
  254. unsigned long tmp;
  255. tmp = SG_ENT_PHYS_ADDRESS(sg);
  256. len = sg->length;
  257. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  258. pteval = tmp & IO_PAGE_MASK;
  259. offset = tmp & (IO_PAGE_SIZE - 1UL);
  260. break;
  261. }
  262. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  263. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  264. offset = 0UL;
  265. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  266. break;
  267. }
  268. sg++;
  269. }
  270. pteval = (pteval & IOPTE_PAGE);
  271. while (len > 0) {
  272. pglist[pglist_ent++] = pteval;
  273. pteval += IO_PAGE_SIZE;
  274. len -= (IO_PAGE_SIZE - offset);
  275. offset = 0;
  276. dma_npages--;
  277. }
  278. pteval = (pteval & IOPTE_PAGE) + len;
  279. sg++;
  280. /* Skip over any tail mappings we've fully mapped,
  281. * adjusting pteval along the way. Stop when we
  282. * detect a page crossing event.
  283. */
  284. while (sg < sg_end &&
  285. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  286. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  287. ((pteval ^
  288. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  289. pteval += sg->length;
  290. sg++;
  291. }
  292. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  293. pteval = ~0UL;
  294. } while (dma_npages != 0);
  295. dma_sg++;
  296. }
  297. BUG_ON(pglist_ent == 0);
  298. do {
  299. unsigned long num;
  300. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  301. pglist_ent);
  302. entry += num;
  303. pglist_ent -= num;
  304. } while (pglist_ent != 0);
  305. put_cpu();
  306. }
  307. static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  308. {
  309. struct pcidev_cookie *pcp;
  310. struct pci_iommu *iommu;
  311. unsigned long flags, npages, prot;
  312. u32 devhandle, dma_base;
  313. struct scatterlist *sgtmp;
  314. long entry;
  315. int used;
  316. /* Fast path single entry scatterlists. */
  317. if (nelems == 1) {
  318. sglist->dma_address =
  319. pci_4v_map_single(pdev,
  320. (page_address(sglist->page) + sglist->offset),
  321. sglist->length, direction);
  322. if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
  323. return 0;
  324. sglist->dma_length = sglist->length;
  325. return 1;
  326. }
  327. pcp = pdev->sysdata;
  328. iommu = pcp->pbm->iommu;
  329. devhandle = pcp->pbm->devhandle;
  330. if (unlikely(direction == PCI_DMA_NONE))
  331. goto bad;
  332. /* Step 1: Prepare scatter list. */
  333. npages = prepare_sg(sglist, nelems);
  334. if (unlikely(npages > PGLIST_NENTS))
  335. goto bad;
  336. /* Step 2: Allocate a cluster and context, if necessary. */
  337. spin_lock_irqsave(&iommu->lock, flags);
  338. entry = pci_arena_alloc(&iommu->arena, npages);
  339. spin_unlock_irqrestore(&iommu->lock, flags);
  340. if (unlikely(entry < 0L))
  341. goto bad;
  342. dma_base = iommu->page_table_map_base +
  343. (entry << IO_PAGE_SHIFT);
  344. /* Step 3: Normalize DMA addresses. */
  345. used = nelems;
  346. sgtmp = sglist;
  347. while (used && sgtmp->dma_length) {
  348. sgtmp->dma_address += dma_base;
  349. sgtmp++;
  350. used--;
  351. }
  352. used = nelems - used;
  353. /* Step 4: Create the mappings. */
  354. prot = HV_PCI_MAP_ATTR_READ;
  355. if (direction != PCI_DMA_TODEVICE)
  356. prot |= HV_PCI_MAP_ATTR_WRITE;
  357. fill_sg(entry, devhandle, sglist, used, nelems, prot);
  358. return used;
  359. bad:
  360. if (printk_ratelimit())
  361. WARN_ON(1);
  362. return 0;
  363. }
  364. static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  365. {
  366. struct pcidev_cookie *pcp;
  367. struct pci_iommu *iommu;
  368. unsigned long flags, i, npages;
  369. long entry;
  370. u32 devhandle, bus_addr;
  371. if (unlikely(direction == PCI_DMA_NONE)) {
  372. if (printk_ratelimit())
  373. WARN_ON(1);
  374. }
  375. pcp = pdev->sysdata;
  376. iommu = pcp->pbm->iommu;
  377. devhandle = pcp->pbm->devhandle;
  378. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  379. for (i = 1; i < nelems; i++)
  380. if (sglist[i].dma_length == 0)
  381. break;
  382. i--;
  383. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
  384. bus_addr) >> IO_PAGE_SHIFT;
  385. entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  386. spin_lock_irqsave(&iommu->lock, flags);
  387. pci_arena_free(&iommu->arena, entry, npages);
  388. do {
  389. unsigned long num;
  390. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  391. npages);
  392. entry += num;
  393. npages -= num;
  394. } while (npages != 0);
  395. spin_unlock_irqrestore(&iommu->lock, flags);
  396. }
  397. static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  398. {
  399. /* Nothing to do... */
  400. }
  401. static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  402. {
  403. /* Nothing to do... */
  404. }
  405. struct pci_iommu_ops pci_sun4v_iommu_ops = {
  406. .alloc_consistent = pci_4v_alloc_consistent,
  407. .free_consistent = pci_4v_free_consistent,
  408. .map_single = pci_4v_map_single,
  409. .unmap_single = pci_4v_unmap_single,
  410. .map_sg = pci_4v_map_sg,
  411. .unmap_sg = pci_4v_unmap_sg,
  412. .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
  413. .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
  414. };
  415. /* SUN4V PCI configuration space accessors. */
  416. static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
  417. {
  418. if (bus == pbm->pci_first_busno) {
  419. if (device == 0 && func == 0)
  420. return 0;
  421. return 1;
  422. }
  423. if (bus < pbm->pci_first_busno ||
  424. bus > pbm->pci_last_busno)
  425. return 1;
  426. return 0;
  427. }
  428. static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  429. int where, int size, u32 *value)
  430. {
  431. struct pci_pbm_info *pbm = bus_dev->sysdata;
  432. u32 devhandle = pbm->devhandle;
  433. unsigned int bus = bus_dev->number;
  434. unsigned int device = PCI_SLOT(devfn);
  435. unsigned int func = PCI_FUNC(devfn);
  436. unsigned long ret;
  437. if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
  438. ret = ~0UL;
  439. } else {
  440. ret = pci_sun4v_config_get(devhandle,
  441. HV_PCI_DEVICE_BUILD(bus, device, func),
  442. where, size);
  443. #if 0
  444. printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
  445. devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
  446. where, size, ret);
  447. #endif
  448. }
  449. switch (size) {
  450. case 1:
  451. *value = ret & 0xff;
  452. break;
  453. case 2:
  454. *value = ret & 0xffff;
  455. break;
  456. case 4:
  457. *value = ret & 0xffffffff;
  458. break;
  459. };
  460. return PCIBIOS_SUCCESSFUL;
  461. }
  462. static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  463. int where, int size, u32 value)
  464. {
  465. struct pci_pbm_info *pbm = bus_dev->sysdata;
  466. u32 devhandle = pbm->devhandle;
  467. unsigned int bus = bus_dev->number;
  468. unsigned int device = PCI_SLOT(devfn);
  469. unsigned int func = PCI_FUNC(devfn);
  470. unsigned long ret;
  471. if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
  472. /* Do nothing. */
  473. } else {
  474. ret = pci_sun4v_config_put(devhandle,
  475. HV_PCI_DEVICE_BUILD(bus, device, func),
  476. where, size, value);
  477. #if 0
  478. printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
  479. devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
  480. where, size, value, ret);
  481. #endif
  482. }
  483. return PCIBIOS_SUCCESSFUL;
  484. }
  485. static struct pci_ops pci_sun4v_ops = {
  486. .read = pci_sun4v_read_pci_cfg,
  487. .write = pci_sun4v_write_pci_cfg,
  488. };
  489. static void pbm_scan_bus(struct pci_controller_info *p,
  490. struct pci_pbm_info *pbm)
  491. {
  492. struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
  493. if (!cookie) {
  494. prom_printf("%s: Critical allocation failure.\n", pbm->name);
  495. prom_halt();
  496. }
  497. /* All we care about is the PBM. */
  498. memset(cookie, 0, sizeof(*cookie));
  499. cookie->pbm = pbm;
  500. pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
  501. #if 0
  502. pci_fixup_host_bridge_self(pbm->pci_bus);
  503. pbm->pci_bus->self->sysdata = cookie;
  504. #endif
  505. pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
  506. pbm->prom_node);
  507. pci_record_assignments(pbm, pbm->pci_bus);
  508. pci_assign_unassigned(pbm, pbm->pci_bus);
  509. pci_fixup_irq(pbm, pbm->pci_bus);
  510. pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
  511. pci_setup_busmastering(pbm, pbm->pci_bus);
  512. }
  513. static void pci_sun4v_scan_bus(struct pci_controller_info *p)
  514. {
  515. if (p->pbm_A.prom_node) {
  516. p->pbm_A.is_66mhz_capable =
  517. prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
  518. pbm_scan_bus(p, &p->pbm_A);
  519. }
  520. if (p->pbm_B.prom_node) {
  521. p->pbm_B.is_66mhz_capable =
  522. prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
  523. pbm_scan_bus(p, &p->pbm_B);
  524. }
  525. /* XXX register error interrupt handlers XXX */
  526. }
  527. static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
  528. struct pci_dev *pdev,
  529. unsigned int devino)
  530. {
  531. u32 devhandle = pbm->devhandle;
  532. int pil;
  533. pil = 4;
  534. if (pdev) {
  535. switch ((pdev->class >> 16) & 0xff) {
  536. case PCI_BASE_CLASS_STORAGE:
  537. pil = 4;
  538. break;
  539. case PCI_BASE_CLASS_NETWORK:
  540. pil = 6;
  541. break;
  542. case PCI_BASE_CLASS_DISPLAY:
  543. pil = 9;
  544. break;
  545. case PCI_BASE_CLASS_MULTIMEDIA:
  546. case PCI_BASE_CLASS_MEMORY:
  547. case PCI_BASE_CLASS_BRIDGE:
  548. case PCI_BASE_CLASS_SERIAL:
  549. pil = 10;
  550. break;
  551. default:
  552. pil = 4;
  553. break;
  554. };
  555. }
  556. BUG_ON(PIL_RESERVED(pil));
  557. return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
  558. }
  559. static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
  560. {
  561. struct pcidev_cookie *pcp = pdev->sysdata;
  562. struct pci_pbm_info *pbm = pcp->pbm;
  563. struct resource *res, *root;
  564. u32 reg;
  565. int where, size, is_64bit;
  566. res = &pdev->resource[resource];
  567. if (resource < 6) {
  568. where = PCI_BASE_ADDRESS_0 + (resource * 4);
  569. } else if (resource == PCI_ROM_RESOURCE) {
  570. where = pdev->rom_base_reg;
  571. } else {
  572. /* Somebody might have asked allocation of a non-standard resource */
  573. return;
  574. }
  575. /* XXX 64-bit MEM handling is not %100 correct... XXX */
  576. is_64bit = 0;
  577. if (res->flags & IORESOURCE_IO)
  578. root = &pbm->io_space;
  579. else {
  580. root = &pbm->mem_space;
  581. if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
  582. == PCI_BASE_ADDRESS_MEM_TYPE_64)
  583. is_64bit = 1;
  584. }
  585. size = res->end - res->start;
  586. pci_read_config_dword(pdev, where, &reg);
  587. reg = ((reg & size) |
  588. (((u32)(res->start - root->start)) & ~size));
  589. if (resource == PCI_ROM_RESOURCE) {
  590. reg |= PCI_ROM_ADDRESS_ENABLE;
  591. res->flags |= IORESOURCE_ROM_ENABLE;
  592. }
  593. pci_write_config_dword(pdev, where, reg);
  594. /* This knows that the upper 32-bits of the address
  595. * must be zero. Our PCI common layer enforces this.
  596. */
  597. if (is_64bit)
  598. pci_write_config_dword(pdev, where + 4, 0);
  599. }
  600. static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
  601. struct resource *res,
  602. struct resource *root)
  603. {
  604. res->start += root->start;
  605. res->end += root->start;
  606. }
  607. /* Use ranges property to determine where PCI MEM, I/O, and Config
  608. * space are for this PCI bus module.
  609. */
  610. static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
  611. {
  612. int i, saw_mem, saw_io;
  613. saw_mem = saw_io = 0;
  614. for (i = 0; i < pbm->num_pbm_ranges; i++) {
  615. struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
  616. unsigned long a;
  617. int type;
  618. type = (pr->child_phys_hi >> 24) & 0x3;
  619. a = (((unsigned long)pr->parent_phys_hi << 32UL) |
  620. ((unsigned long)pr->parent_phys_lo << 0UL));
  621. switch (type) {
  622. case 1:
  623. /* 16-bit IO space, 16MB */
  624. pbm->io_space.start = a;
  625. pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
  626. pbm->io_space.flags = IORESOURCE_IO;
  627. saw_io = 1;
  628. break;
  629. case 2:
  630. /* 32-bit MEM space, 2GB */
  631. pbm->mem_space.start = a;
  632. pbm->mem_space.end = a + (0x80000000UL - 1UL);
  633. pbm->mem_space.flags = IORESOURCE_MEM;
  634. saw_mem = 1;
  635. break;
  636. case 3:
  637. /* XXX 64-bit MEM handling XXX */
  638. default:
  639. break;
  640. };
  641. }
  642. if (!saw_io || !saw_mem) {
  643. prom_printf("%s: Fatal error, missing %s PBM range.\n",
  644. pbm->name,
  645. (!saw_io ? "IO" : "MEM"));
  646. prom_halt();
  647. }
  648. printk("%s: PCI IO[%lx] MEM[%lx]\n",
  649. pbm->name,
  650. pbm->io_space.start,
  651. pbm->mem_space.start);
  652. }
  653. static void pbm_register_toplevel_resources(struct pci_controller_info *p,
  654. struct pci_pbm_info *pbm)
  655. {
  656. pbm->io_space.name = pbm->mem_space.name = pbm->name;
  657. request_resource(&ioport_resource, &pbm->io_space);
  658. request_resource(&iomem_resource, &pbm->mem_space);
  659. pci_register_legacy_regions(&pbm->io_space,
  660. &pbm->mem_space);
  661. }
  662. static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
  663. struct pci_iommu *iommu)
  664. {
  665. struct pci_iommu_arena *arena = &iommu->arena;
  666. unsigned long i, cnt = 0;
  667. u32 devhandle;
  668. devhandle = pbm->devhandle;
  669. for (i = 0; i < arena->limit; i++) {
  670. unsigned long ret, io_attrs, ra;
  671. ret = pci_sun4v_iommu_getmap(devhandle,
  672. HV_PCI_TSBID(0, i),
  673. &io_attrs, &ra);
  674. if (ret == HV_EOK) {
  675. cnt++;
  676. __set_bit(i, arena->map);
  677. }
  678. }
  679. return cnt;
  680. }
  681. static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
  682. {
  683. struct pci_iommu *iommu = pbm->iommu;
  684. unsigned long num_tsb_entries, sz;
  685. u32 vdma[2], dma_mask, dma_offset;
  686. int err, tsbsize;
  687. err = prom_getproperty(pbm->prom_node, "virtual-dma",
  688. (char *)&vdma[0], sizeof(vdma));
  689. if (err == 0 || err == -1) {
  690. /* No property, use default values. */
  691. vdma[0] = 0x80000000;
  692. vdma[1] = 0x80000000;
  693. }
  694. dma_mask = vdma[0];
  695. switch (vdma[1]) {
  696. case 0x20000000:
  697. dma_mask |= 0x1fffffff;
  698. tsbsize = 64;
  699. break;
  700. case 0x40000000:
  701. dma_mask |= 0x3fffffff;
  702. tsbsize = 128;
  703. break;
  704. case 0x80000000:
  705. dma_mask |= 0x7fffffff;
  706. tsbsize = 256;
  707. break;
  708. default:
  709. prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
  710. prom_halt();
  711. };
  712. tsbsize *= (8 * 1024);
  713. num_tsb_entries = tsbsize / sizeof(iopte_t);
  714. dma_offset = vdma[0];
  715. /* Setup initial software IOMMU state. */
  716. spin_lock_init(&iommu->lock);
  717. iommu->ctx_lowest_free = 1;
  718. iommu->page_table_map_base = dma_offset;
  719. iommu->dma_addr_mask = dma_mask;
  720. /* Allocate and initialize the free area map. */
  721. sz = num_tsb_entries / 8;
  722. sz = (sz + 7UL) & ~7UL;
  723. iommu->arena.map = kmalloc(sz, GFP_KERNEL);
  724. if (!iommu->arena.map) {
  725. prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
  726. prom_halt();
  727. }
  728. memset(iommu->arena.map, 0, sz);
  729. iommu->arena.limit = num_tsb_entries;
  730. sz = probe_existing_entries(pbm, iommu);
  731. printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
  732. pbm->name, num_tsb_entries, sz);
  733. }
  734. static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
  735. {
  736. unsigned int busrange[2];
  737. int prom_node = pbm->prom_node;
  738. int err;
  739. err = prom_getproperty(prom_node, "bus-range",
  740. (char *)&busrange[0],
  741. sizeof(busrange));
  742. if (err == 0 || err == -1) {
  743. prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
  744. prom_halt();
  745. }
  746. pbm->pci_first_busno = busrange[0];
  747. pbm->pci_last_busno = busrange[1];
  748. }
  749. static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
  750. {
  751. struct pci_pbm_info *pbm;
  752. int err, i;
  753. if (devhandle & 0x40)
  754. pbm = &p->pbm_B;
  755. else
  756. pbm = &p->pbm_A;
  757. pbm->parent = p;
  758. pbm->prom_node = prom_node;
  759. pbm->pci_first_slot = 1;
  760. pbm->devhandle = devhandle;
  761. sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
  762. p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
  763. printk("%s: devhandle[%x] prom_node[%x:%x]\n",
  764. pbm->name, pbm->devhandle,
  765. pbm->prom_node, prom_getchild(pbm->prom_node));
  766. prom_getstring(prom_node, "name",
  767. pbm->prom_name, sizeof(pbm->prom_name));
  768. err = prom_getproperty(prom_node, "ranges",
  769. (char *) pbm->pbm_ranges,
  770. sizeof(pbm->pbm_ranges));
  771. if (err == 0 || err == -1) {
  772. prom_printf("%s: Fatal error, no ranges property.\n",
  773. pbm->name);
  774. prom_halt();
  775. }
  776. pbm->num_pbm_ranges =
  777. (err / sizeof(struct linux_prom_pci_ranges));
  778. /* Mask out the top 8 bits of the ranges, leaving the real
  779. * physical address.
  780. */
  781. for (i = 0; i < pbm->num_pbm_ranges; i++)
  782. pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
  783. pci_sun4v_determine_mem_io_space(pbm);
  784. pbm_register_toplevel_resources(p, pbm);
  785. err = prom_getproperty(prom_node, "interrupt-map",
  786. (char *)pbm->pbm_intmap,
  787. sizeof(pbm->pbm_intmap));
  788. if (err == 0 || err == -1) {
  789. prom_printf("%s: Fatal error, no interrupt-map property.\n",
  790. pbm->name);
  791. prom_halt();
  792. }
  793. pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
  794. err = prom_getproperty(prom_node, "interrupt-map-mask",
  795. (char *)&pbm->pbm_intmask,
  796. sizeof(pbm->pbm_intmask));
  797. if (err == 0 || err == -1) {
  798. prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
  799. pbm->name);
  800. prom_halt();
  801. }
  802. pci_sun4v_get_bus_range(pbm);
  803. pci_sun4v_iommu_init(pbm);
  804. }
  805. void sun4v_pci_init(int node, char *model_name)
  806. {
  807. struct pci_controller_info *p;
  808. struct pci_iommu *iommu;
  809. struct linux_prom64_registers regs;
  810. u32 devhandle;
  811. int i;
  812. prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
  813. devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
  814. for (p = pci_controller_root; p; p = p->next) {
  815. struct pci_pbm_info *pbm;
  816. if (p->pbm_A.prom_node && p->pbm_B.prom_node)
  817. continue;
  818. pbm = (p->pbm_A.prom_node ?
  819. &p->pbm_A :
  820. &p->pbm_B);
  821. if (pbm->devhandle == (devhandle ^ 0x40)) {
  822. pci_sun4v_pbm_init(p, node, devhandle);
  823. return;
  824. }
  825. }
  826. for (i = 0; i < NR_CPUS; i++) {
  827. unsigned long page = get_zeroed_page(GFP_ATOMIC);
  828. if (!page)
  829. goto fatal_memory_error;
  830. per_cpu(iommu_pglists, i).pglist = (u64 *) page;
  831. }
  832. p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
  833. if (!p)
  834. goto fatal_memory_error;
  835. memset(p, 0, sizeof(*p));
  836. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  837. if (!iommu)
  838. goto fatal_memory_error;
  839. memset(iommu, 0, sizeof(*iommu));
  840. p->pbm_A.iommu = iommu;
  841. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  842. if (!iommu)
  843. goto fatal_memory_error;
  844. memset(iommu, 0, sizeof(*iommu));
  845. p->pbm_B.iommu = iommu;
  846. p->next = pci_controller_root;
  847. pci_controller_root = p;
  848. p->index = pci_num_controllers++;
  849. p->pbms_same_domain = 0;
  850. p->scan_bus = pci_sun4v_scan_bus;
  851. p->irq_build = pci_sun4v_irq_build;
  852. p->base_address_update = pci_sun4v_base_address_update;
  853. p->resource_adjust = pci_sun4v_resource_adjust;
  854. p->pci_ops = &pci_sun4v_ops;
  855. /* Like PSYCHO and SCHIZO we have a 2GB aligned area
  856. * for memory space.
  857. */
  858. pci_memspace_mask = 0x7fffffffUL;
  859. pci_sun4v_pbm_init(p, node, devhandle);
  860. return;
  861. fatal_memory_error:
  862. prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
  863. prom_halt();
  864. }