pci_sun4v.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909
  1. /* pci_sun4v.c: SUN4V specific PCI controller support.
  2. *
  3. * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/pci.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <asm/pbm.h>
  13. #include <asm/iommu.h>
  14. #include <asm/irq.h>
  15. #include <asm/upa.h>
  16. #include <asm/pstate.h>
  17. #include <asm/oplib.h>
  18. #include <asm/hypervisor.h>
  19. #include "pci_impl.h"
  20. #include "iommu_common.h"
  21. #include "pci_sun4v.h"
  22. #define PGLIST_NENTS 2048
  23. struct sun4v_pglist {
  24. u64 pglist[PGLIST_NENTS];
  25. };
  26. static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
  27. static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
  28. {
  29. unsigned long n, i, start, end, limit;
  30. int pass;
  31. limit = arena->limit;
  32. start = arena->hint;
  33. pass = 0;
  34. again:
  35. n = find_next_zero_bit(arena->map, limit, start);
  36. end = n + npages;
  37. if (unlikely(end >= limit)) {
  38. if (likely(pass < 1)) {
  39. limit = start;
  40. start = 0;
  41. pass++;
  42. goto again;
  43. } else {
  44. /* Scanned the whole thing, give up. */
  45. return -1;
  46. }
  47. }
  48. for (i = n; i < end; i++) {
  49. if (test_bit(i, arena->map)) {
  50. start = i + 1;
  51. goto again;
  52. }
  53. }
  54. for (i = n; i < end; i++)
  55. __set_bit(i, arena->map);
  56. arena->hint = end;
  57. return n;
  58. }
  59. static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
  60. {
  61. unsigned long i;
  62. for (i = base; i < (base + npages); i++)
  63. __clear_bit(i, arena->map);
  64. }
  65. static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
  66. {
  67. struct pcidev_cookie *pcp;
  68. struct pci_iommu *iommu;
  69. unsigned long devhandle, flags, order, first_page, npages, n;
  70. void *ret;
  71. long entry;
  72. u64 *pglist;
  73. int cpu;
  74. size = IO_PAGE_ALIGN(size);
  75. order = get_order(size);
  76. if (order >= MAX_ORDER)
  77. return NULL;
  78. npages = size >> IO_PAGE_SHIFT;
  79. if (npages > PGLIST_NENTS)
  80. return NULL;
  81. first_page = __get_free_pages(GFP_ATOMIC, order);
  82. if (first_page == 0UL)
  83. return NULL;
  84. memset((char *)first_page, 0, PAGE_SIZE << order);
  85. pcp = pdev->sysdata;
  86. devhandle = pcp->pbm->devhandle;
  87. iommu = pcp->pbm->iommu;
  88. spin_lock_irqsave(&iommu->lock, flags);
  89. entry = pci_arena_alloc(&iommu->arena, npages);
  90. spin_unlock_irqrestore(&iommu->lock, flags);
  91. if (unlikely(entry < 0L)) {
  92. free_pages(first_page, order);
  93. return NULL;
  94. }
  95. *dma_addrp = (iommu->page_table_map_base +
  96. (entry << IO_PAGE_SHIFT));
  97. ret = (void *) first_page;
  98. first_page = __pa(first_page);
  99. cpu = get_cpu();
  100. pglist = &__get_cpu_var(iommu_pglists).pglist[0];
  101. for (n = 0; n < npages; n++)
  102. pglist[n] = first_page + (n * PAGE_SIZE);
  103. do {
  104. unsigned long num;
  105. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  106. npages,
  107. (HV_PCI_MAP_ATTR_READ |
  108. HV_PCI_MAP_ATTR_WRITE),
  109. __pa(pglist));
  110. entry += num;
  111. npages -= num;
  112. pglist += num;
  113. } while (npages != 0);
  114. put_cpu();
  115. return ret;
  116. }
  117. static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
  118. {
  119. struct pcidev_cookie *pcp;
  120. struct pci_iommu *iommu;
  121. unsigned long flags, order, npages, entry, devhandle;
  122. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  123. pcp = pdev->sysdata;
  124. iommu = pcp->pbm->iommu;
  125. devhandle = pcp->pbm->devhandle;
  126. entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  127. spin_lock_irqsave(&iommu->lock, flags);
  128. pci_arena_free(&iommu->arena, entry, npages);
  129. do {
  130. unsigned long num;
  131. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  132. npages);
  133. entry += num;
  134. npages -= num;
  135. } while (npages != 0);
  136. spin_unlock_irqrestore(&iommu->lock, flags);
  137. order = get_order(size);
  138. if (order < 10)
  139. free_pages((unsigned long)cpu, order);
  140. }
  141. static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
  142. {
  143. struct pcidev_cookie *pcp;
  144. struct pci_iommu *iommu;
  145. unsigned long flags, npages, oaddr;
  146. unsigned long i, base_paddr, devhandle;
  147. u32 bus_addr, ret;
  148. unsigned long prot;
  149. long entry;
  150. u64 *pglist;
  151. int cpu;
  152. pcp = pdev->sysdata;
  153. iommu = pcp->pbm->iommu;
  154. devhandle = pcp->pbm->devhandle;
  155. if (unlikely(direction == PCI_DMA_NONE))
  156. goto bad;
  157. oaddr = (unsigned long)ptr;
  158. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  159. npages >>= IO_PAGE_SHIFT;
  160. if (unlikely(npages > PGLIST_NENTS))
  161. goto bad;
  162. spin_lock_irqsave(&iommu->lock, flags);
  163. entry = pci_arena_alloc(&iommu->arena, npages);
  164. spin_unlock_irqrestore(&iommu->lock, flags);
  165. if (unlikely(entry < 0L))
  166. goto bad;
  167. bus_addr = (iommu->page_table_map_base +
  168. (entry << IO_PAGE_SHIFT));
  169. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  170. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  171. prot = HV_PCI_MAP_ATTR_READ;
  172. if (direction != PCI_DMA_TODEVICE)
  173. prot |= HV_PCI_MAP_ATTR_WRITE;
  174. cpu = get_cpu();
  175. pglist = &__get_cpu_var(iommu_pglists).pglist[0];
  176. for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
  177. pglist[i] = base_paddr;
  178. do {
  179. unsigned long num;
  180. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  181. npages, prot,
  182. __pa(pglist));
  183. entry += num;
  184. npages -= num;
  185. pglist += num;
  186. } while (npages != 0);
  187. put_cpu();
  188. return ret;
  189. bad:
  190. if (printk_ratelimit())
  191. WARN_ON(1);
  192. return PCI_DMA_ERROR_CODE;
  193. }
  194. static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  195. {
  196. struct pcidev_cookie *pcp;
  197. struct pci_iommu *iommu;
  198. unsigned long flags, npages, devhandle;
  199. long entry;
  200. if (unlikely(direction == PCI_DMA_NONE)) {
  201. if (printk_ratelimit())
  202. WARN_ON(1);
  203. return;
  204. }
  205. pcp = pdev->sysdata;
  206. iommu = pcp->pbm->iommu;
  207. devhandle = pcp->pbm->devhandle;
  208. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  209. npages >>= IO_PAGE_SHIFT;
  210. bus_addr &= IO_PAGE_MASK;
  211. spin_lock_irqsave(&iommu->lock, flags);
  212. entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
  213. pci_arena_free(&iommu->arena, entry, npages);
  214. do {
  215. unsigned long num;
  216. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  217. npages);
  218. entry += num;
  219. npages -= num;
  220. } while (npages != 0);
  221. spin_unlock_irqrestore(&iommu->lock, flags);
  222. }
  223. #define SG_ENT_PHYS_ADDRESS(SG) \
  224. (__pa(page_address((SG)->page)) + (SG)->offset)
  225. static inline void fill_sg(long entry, unsigned long devhandle,
  226. struct scatterlist *sg,
  227. int nused, int nelems, unsigned long prot)
  228. {
  229. struct scatterlist *dma_sg = sg;
  230. struct scatterlist *sg_end = sg + nelems;
  231. int i, cpu, pglist_ent;
  232. u64 *pglist;
  233. cpu = get_cpu();
  234. pglist = &__get_cpu_var(iommu_pglists).pglist[0];
  235. pglist_ent = 0;
  236. for (i = 0; i < nused; i++) {
  237. unsigned long pteval = ~0UL;
  238. u32 dma_npages;
  239. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  240. dma_sg->dma_length +
  241. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  242. do {
  243. unsigned long offset;
  244. signed int len;
  245. /* If we are here, we know we have at least one
  246. * more page to map. So walk forward until we
  247. * hit a page crossing, and begin creating new
  248. * mappings from that spot.
  249. */
  250. for (;;) {
  251. unsigned long tmp;
  252. tmp = SG_ENT_PHYS_ADDRESS(sg);
  253. len = sg->length;
  254. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  255. pteval = tmp & IO_PAGE_MASK;
  256. offset = tmp & (IO_PAGE_SIZE - 1UL);
  257. break;
  258. }
  259. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  260. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  261. offset = 0UL;
  262. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  263. break;
  264. }
  265. sg++;
  266. }
  267. pteval = (pteval & IOPTE_PAGE);
  268. while (len > 0) {
  269. pglist[pglist_ent++] = pteval;
  270. pteval += IO_PAGE_SIZE;
  271. len -= (IO_PAGE_SIZE - offset);
  272. offset = 0;
  273. dma_npages--;
  274. }
  275. pteval = (pteval & IOPTE_PAGE) + len;
  276. sg++;
  277. /* Skip over any tail mappings we've fully mapped,
  278. * adjusting pteval along the way. Stop when we
  279. * detect a page crossing event.
  280. */
  281. while (sg < sg_end &&
  282. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  283. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  284. ((pteval ^
  285. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  286. pteval += sg->length;
  287. sg++;
  288. }
  289. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  290. pteval = ~0UL;
  291. } while (dma_npages != 0);
  292. dma_sg++;
  293. }
  294. BUG_ON(pglist_ent == 0);
  295. do {
  296. unsigned long num;
  297. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  298. pglist_ent);
  299. entry += num;
  300. pglist_ent -= num;
  301. } while (pglist_ent != 0);
  302. put_cpu();
  303. }
  304. static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  305. {
  306. struct pcidev_cookie *pcp;
  307. struct pci_iommu *iommu;
  308. unsigned long flags, npages, prot, devhandle;
  309. u32 dma_base;
  310. struct scatterlist *sgtmp;
  311. long entry;
  312. int used;
  313. /* Fast path single entry scatterlists. */
  314. if (nelems == 1) {
  315. sglist->dma_address =
  316. pci_4v_map_single(pdev,
  317. (page_address(sglist->page) + sglist->offset),
  318. sglist->length, direction);
  319. if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
  320. return 0;
  321. sglist->dma_length = sglist->length;
  322. return 1;
  323. }
  324. pcp = pdev->sysdata;
  325. iommu = pcp->pbm->iommu;
  326. devhandle = pcp->pbm->devhandle;
  327. if (unlikely(direction == PCI_DMA_NONE))
  328. goto bad;
  329. /* Step 1: Prepare scatter list. */
  330. npages = prepare_sg(sglist, nelems);
  331. if (unlikely(npages > PGLIST_NENTS))
  332. goto bad;
  333. /* Step 2: Allocate a cluster and context, if necessary. */
  334. spin_lock_irqsave(&iommu->lock, flags);
  335. entry = pci_arena_alloc(&iommu->arena, npages);
  336. spin_unlock_irqrestore(&iommu->lock, flags);
  337. if (unlikely(entry < 0L))
  338. goto bad;
  339. dma_base = iommu->page_table_map_base +
  340. (entry << IO_PAGE_SHIFT);
  341. /* Step 3: Normalize DMA addresses. */
  342. used = nelems;
  343. sgtmp = sglist;
  344. while (used && sgtmp->dma_length) {
  345. sgtmp->dma_address += dma_base;
  346. sgtmp++;
  347. used--;
  348. }
  349. used = nelems - used;
  350. /* Step 4: Create the mappings. */
  351. prot = HV_PCI_MAP_ATTR_READ;
  352. if (direction != PCI_DMA_TODEVICE)
  353. prot |= HV_PCI_MAP_ATTR_WRITE;
  354. fill_sg(entry, devhandle, sglist, used, nelems, prot);
  355. return used;
  356. bad:
  357. if (printk_ratelimit())
  358. WARN_ON(1);
  359. return 0;
  360. }
  361. static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  362. {
  363. struct pcidev_cookie *pcp;
  364. struct pci_iommu *iommu;
  365. unsigned long flags, i, npages, devhandle;
  366. long entry;
  367. u32 bus_addr;
  368. if (unlikely(direction == PCI_DMA_NONE)) {
  369. if (printk_ratelimit())
  370. WARN_ON(1);
  371. }
  372. pcp = pdev->sysdata;
  373. iommu = pcp->pbm->iommu;
  374. devhandle = pcp->pbm->devhandle;
  375. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  376. for (i = 1; i < nelems; i++)
  377. if (sglist[i].dma_length == 0)
  378. break;
  379. i--;
  380. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
  381. bus_addr) >> IO_PAGE_SHIFT;
  382. entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  383. spin_lock_irqsave(&iommu->lock, flags);
  384. pci_arena_free(&iommu->arena, entry, npages);
  385. do {
  386. unsigned long num;
  387. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  388. npages);
  389. entry += num;
  390. npages -= num;
  391. } while (npages != 0);
  392. spin_unlock_irqrestore(&iommu->lock, flags);
  393. }
  394. static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  395. {
  396. /* Nothing to do... */
  397. }
  398. static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  399. {
  400. /* Nothing to do... */
  401. }
  402. struct pci_iommu_ops pci_sun4v_iommu_ops = {
  403. .alloc_consistent = pci_4v_alloc_consistent,
  404. .free_consistent = pci_4v_free_consistent,
  405. .map_single = pci_4v_map_single,
  406. .unmap_single = pci_4v_unmap_single,
  407. .map_sg = pci_4v_map_sg,
  408. .unmap_sg = pci_4v_unmap_sg,
  409. .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
  410. .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
  411. };
  412. /* SUN4V PCI configuration space accessors. */
  413. static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  414. int where, int size, u32 *value)
  415. {
  416. struct pci_pbm_info *pbm = bus_dev->sysdata;
  417. unsigned long devhandle = pbm->devhandle;
  418. unsigned int bus = bus_dev->number;
  419. unsigned int device = PCI_SLOT(devfn);
  420. unsigned int func = PCI_FUNC(devfn);
  421. unsigned long ret;
  422. ret = pci_sun4v_config_get(devhandle,
  423. HV_PCI_DEVICE_BUILD(bus, device, func),
  424. where, size);
  425. switch (size) {
  426. case 1:
  427. *value = ret & 0xff;
  428. break;
  429. case 2:
  430. *value = ret & 0xffff;
  431. break;
  432. case 4:
  433. *value = ret & 0xffffffff;
  434. break;
  435. };
  436. return PCIBIOS_SUCCESSFUL;
  437. }
  438. static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  439. int where, int size, u32 value)
  440. {
  441. struct pci_pbm_info *pbm = bus_dev->sysdata;
  442. unsigned long devhandle = pbm->devhandle;
  443. unsigned int bus = bus_dev->number;
  444. unsigned int device = PCI_SLOT(devfn);
  445. unsigned int func = PCI_FUNC(devfn);
  446. unsigned long ret;
  447. ret = pci_sun4v_config_put(devhandle,
  448. HV_PCI_DEVICE_BUILD(bus, device, func),
  449. where, size, value);
  450. return PCIBIOS_SUCCESSFUL;
  451. }
  452. static struct pci_ops pci_sun4v_ops = {
  453. .read = pci_sun4v_read_pci_cfg,
  454. .write = pci_sun4v_write_pci_cfg,
  455. };
  456. static void pci_sun4v_scan_bus(struct pci_controller_info *p)
  457. {
  458. /* XXX Implement me! XXX */
  459. }
  460. static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
  461. struct pci_dev *pdev,
  462. unsigned int ino)
  463. {
  464. /* XXX Implement me! XXX */
  465. return 0;
  466. }
  467. /* XXX correct? XXX */
  468. static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
  469. {
  470. struct pcidev_cookie *pcp = pdev->sysdata;
  471. struct pci_pbm_info *pbm = pcp->pbm;
  472. struct resource *res, *root;
  473. u32 reg;
  474. int where, size, is_64bit;
  475. res = &pdev->resource[resource];
  476. if (resource < 6) {
  477. where = PCI_BASE_ADDRESS_0 + (resource * 4);
  478. } else if (resource == PCI_ROM_RESOURCE) {
  479. where = pdev->rom_base_reg;
  480. } else {
  481. /* Somebody might have asked allocation of a non-standard resource */
  482. return;
  483. }
  484. is_64bit = 0;
  485. if (res->flags & IORESOURCE_IO)
  486. root = &pbm->io_space;
  487. else {
  488. root = &pbm->mem_space;
  489. if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
  490. == PCI_BASE_ADDRESS_MEM_TYPE_64)
  491. is_64bit = 1;
  492. }
  493. size = res->end - res->start;
  494. pci_read_config_dword(pdev, where, &reg);
  495. reg = ((reg & size) |
  496. (((u32)(res->start - root->start)) & ~size));
  497. if (resource == PCI_ROM_RESOURCE) {
  498. reg |= PCI_ROM_ADDRESS_ENABLE;
  499. res->flags |= IORESOURCE_ROM_ENABLE;
  500. }
  501. pci_write_config_dword(pdev, where, reg);
  502. /* This knows that the upper 32-bits of the address
  503. * must be zero. Our PCI common layer enforces this.
  504. */
  505. if (is_64bit)
  506. pci_write_config_dword(pdev, where + 4, 0);
  507. }
  508. /* XXX correct? XXX */
  509. static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
  510. struct resource *res,
  511. struct resource *root)
  512. {
  513. res->start += root->start;
  514. res->end += root->start;
  515. }
  516. /* Use ranges property to determine where PCI MEM, I/O, and Config
  517. * space are for this PCI bus module.
  518. */
  519. static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
  520. {
  521. int i, saw_cfg, saw_mem, saw_io;
  522. saw_cfg = saw_mem = saw_io = 0;
  523. for (i = 0; i < pbm->num_pbm_ranges; i++) {
  524. struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
  525. unsigned long a;
  526. int type;
  527. type = (pr->child_phys_hi >> 24) & 0x3;
  528. a = (((unsigned long)pr->parent_phys_hi << 32UL) |
  529. ((unsigned long)pr->parent_phys_lo << 0UL));
  530. switch (type) {
  531. case 0:
  532. /* PCI config space, 16MB */
  533. pbm->config_space = a;
  534. saw_cfg = 1;
  535. break;
  536. case 1:
  537. /* 16-bit IO space, 16MB */
  538. pbm->io_space.start = a;
  539. pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
  540. pbm->io_space.flags = IORESOURCE_IO;
  541. saw_io = 1;
  542. break;
  543. case 2:
  544. /* 32-bit MEM space, 2GB */
  545. pbm->mem_space.start = a;
  546. pbm->mem_space.end = a + (0x80000000UL - 1UL);
  547. pbm->mem_space.flags = IORESOURCE_MEM;
  548. saw_mem = 1;
  549. break;
  550. default:
  551. break;
  552. };
  553. }
  554. if (!saw_cfg || !saw_io || !saw_mem) {
  555. prom_printf("%s: Fatal error, missing %s PBM range.\n",
  556. pbm->name,
  557. ((!saw_cfg ?
  558. "CFG" :
  559. (!saw_io ?
  560. "IO" : "MEM"))));
  561. prom_halt();
  562. }
  563. printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
  564. pbm->name,
  565. pbm->config_space,
  566. pbm->io_space.start,
  567. pbm->mem_space.start);
  568. }
  569. static void pbm_register_toplevel_resources(struct pci_controller_info *p,
  570. struct pci_pbm_info *pbm)
  571. {
  572. pbm->io_space.name = pbm->mem_space.name = pbm->name;
  573. request_resource(&ioport_resource, &pbm->io_space);
  574. request_resource(&iomem_resource, &pbm->mem_space);
  575. pci_register_legacy_regions(&pbm->io_space,
  576. &pbm->mem_space);
  577. }
  578. static void probe_existing_entries(struct pci_pbm_info *pbm,
  579. struct pci_iommu *iommu)
  580. {
  581. struct pci_iommu_arena *arena = &iommu->arena;
  582. unsigned long i, devhandle;
  583. devhandle = pbm->devhandle;
  584. for (i = 0; i < arena->limit; i++) {
  585. unsigned long ret, io_attrs, ra;
  586. ret = pci_sun4v_iommu_getmap(devhandle,
  587. HV_PCI_TSBID(0, i),
  588. &io_attrs, &ra);
  589. if (ret == HV_EOK)
  590. __set_bit(i, arena->map);
  591. }
  592. }
  593. static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
  594. {
  595. struct pci_iommu *iommu = pbm->iommu;
  596. unsigned long num_tsb_entries, sz;
  597. u32 vdma[2], dma_mask, dma_offset;
  598. int err, tsbsize;
  599. err = prom_getproperty(pbm->prom_node, "virtual-dma",
  600. (char *)&vdma[0], sizeof(vdma));
  601. if (err == 0 || err == -1) {
  602. /* No property, use default values. */
  603. vdma[0] = 0x80000000;
  604. vdma[1] = 0x80000000;
  605. }
  606. dma_mask = vdma[0];
  607. switch (vdma[1]) {
  608. case 0x20000000:
  609. dma_mask |= 0x1fffffff;
  610. tsbsize = 64;
  611. break;
  612. case 0x40000000:
  613. dma_mask |= 0x3fffffff;
  614. tsbsize = 128;
  615. break;
  616. case 0x80000000:
  617. dma_mask |= 0x7fffffff;
  618. tsbsize = 128;
  619. break;
  620. default:
  621. prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
  622. prom_halt();
  623. };
  624. num_tsb_entries = tsbsize / sizeof(iopte_t);
  625. dma_offset = vdma[0];
  626. /* Setup initial software IOMMU state. */
  627. spin_lock_init(&iommu->lock);
  628. iommu->ctx_lowest_free = 1;
  629. iommu->page_table_map_base = dma_offset;
  630. iommu->dma_addr_mask = dma_mask;
  631. /* Allocate and initialize the free area map. */
  632. sz = num_tsb_entries / 8;
  633. sz = (sz + 7UL) & ~7UL;
  634. iommu->arena.map = kmalloc(sz, GFP_KERNEL);
  635. if (!iommu->arena.map) {
  636. prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
  637. prom_halt();
  638. }
  639. memset(iommu->arena.map, 0, sz);
  640. iommu->arena.limit = num_tsb_entries;
  641. probe_existing_entries(pbm, iommu);
  642. }
  643. static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node)
  644. {
  645. struct pci_pbm_info *pbm;
  646. struct linux_prom64_registers regs;
  647. unsigned int busrange[2];
  648. int err;
  649. /* XXX */
  650. pbm = &p->pbm_A;
  651. pbm->parent = p;
  652. pbm->prom_node = prom_node;
  653. pbm->pci_first_slot = 1;
  654. prom_getproperty(prom_node, "reg", (char *)&regs, sizeof(regs));
  655. pbm->devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
  656. sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
  657. p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
  658. printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle);
  659. prom_getstring(prom_node, "name",
  660. pbm->prom_name, sizeof(pbm->prom_name));
  661. err = prom_getproperty(prom_node, "ranges",
  662. (char *) pbm->pbm_ranges,
  663. sizeof(pbm->pbm_ranges));
  664. if (err == 0 || err == -1) {
  665. prom_printf("%s: Fatal error, no ranges property.\n",
  666. pbm->name);
  667. prom_halt();
  668. }
  669. pbm->num_pbm_ranges =
  670. (err / sizeof(struct linux_prom_pci_ranges));
  671. pci_sun4v_determine_mem_io_space(pbm);
  672. pbm_register_toplevel_resources(p, pbm);
  673. err = prom_getproperty(prom_node, "interrupt-map",
  674. (char *)pbm->pbm_intmap,
  675. sizeof(pbm->pbm_intmap));
  676. if (err != -1) {
  677. pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
  678. err = prom_getproperty(prom_node, "interrupt-map-mask",
  679. (char *)&pbm->pbm_intmask,
  680. sizeof(pbm->pbm_intmask));
  681. if (err == -1) {
  682. prom_printf("%s: Fatal error, no "
  683. "interrupt-map-mask.\n", pbm->name);
  684. prom_halt();
  685. }
  686. } else {
  687. pbm->num_pbm_intmap = 0;
  688. memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
  689. }
  690. err = prom_getproperty(prom_node, "bus-range",
  691. (char *)&busrange[0],
  692. sizeof(busrange));
  693. if (err == 0 || err == -1) {
  694. prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
  695. prom_halt();
  696. }
  697. pbm->pci_first_busno = busrange[0];
  698. pbm->pci_last_busno = busrange[1];
  699. pci_sun4v_iommu_init(pbm);
  700. }
  701. void sun4v_pci_init(int node, char *model_name)
  702. {
  703. struct pci_controller_info *p;
  704. struct pci_iommu *iommu;
  705. p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
  706. if (!p) {
  707. prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
  708. prom_halt();
  709. }
  710. memset(p, 0, sizeof(*p));
  711. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  712. if (!iommu) {
  713. prom_printf("SCHIZO: Fatal memory allocation error.\n");
  714. prom_halt();
  715. }
  716. memset(iommu, 0, sizeof(*iommu));
  717. p->pbm_A.iommu = iommu;
  718. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  719. if (!iommu) {
  720. prom_printf("SCHIZO: Fatal memory allocation error.\n");
  721. prom_halt();
  722. }
  723. memset(iommu, 0, sizeof(*iommu));
  724. p->pbm_B.iommu = iommu;
  725. p->next = pci_controller_root;
  726. pci_controller_root = p;
  727. p->index = pci_num_controllers++;
  728. p->pbms_same_domain = 0;
  729. p->scan_bus = pci_sun4v_scan_bus;
  730. p->irq_build = pci_sun4v_irq_build;
  731. p->base_address_update = pci_sun4v_base_address_update;
  732. p->resource_adjust = pci_sun4v_resource_adjust;
  733. p->pci_ops = &pci_sun4v_ops;
  734. /* Like PSYCHO and SCHIZO we have a 2GB aligned area
  735. * for memory space.
  736. */
  737. pci_memspace_mask = 0x7fffffffUL;
  738. pci_sun4v_pbm_init(p, node);
  739. prom_printf("sun4v_pci_init: Implement me.\n");
  740. prom_halt();
  741. }