pci_sun4v.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /* pci_sun4v.c: SUN4V specific PCI controller support.
  2. *
  3. * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/pci.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <asm/pbm.h>
  13. #include <asm/iommu.h>
  14. #include <asm/irq.h>
  15. #include <asm/upa.h>
  16. #include <asm/pstate.h>
  17. #include <asm/oplib.h>
  18. #include <asm/hypervisor.h>
  19. #include "pci_impl.h"
  20. #include "iommu_common.h"
  21. #include "pci_sun4v.h"
  22. #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
  23. struct sun4v_pglist {
  24. u64 *pglist;
  25. };
  26. static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
  27. static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
  28. {
  29. unsigned long n, i, start, end, limit;
  30. int pass;
  31. limit = arena->limit;
  32. start = arena->hint;
  33. pass = 0;
  34. again:
  35. n = find_next_zero_bit(arena->map, limit, start);
  36. end = n + npages;
  37. if (unlikely(end >= limit)) {
  38. if (likely(pass < 1)) {
  39. limit = start;
  40. start = 0;
  41. pass++;
  42. goto again;
  43. } else {
  44. /* Scanned the whole thing, give up. */
  45. return -1;
  46. }
  47. }
  48. for (i = n; i < end; i++) {
  49. if (test_bit(i, arena->map)) {
  50. start = i + 1;
  51. goto again;
  52. }
  53. }
  54. for (i = n; i < end; i++)
  55. __set_bit(i, arena->map);
  56. arena->hint = end;
  57. return n;
  58. }
  59. static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
  60. {
  61. unsigned long i;
  62. for (i = base; i < (base + npages); i++)
  63. __clear_bit(i, arena->map);
  64. }
  65. static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
  66. {
  67. struct pcidev_cookie *pcp;
  68. struct pci_iommu *iommu;
  69. unsigned long flags, order, first_page, npages, n;
  70. void *ret;
  71. long entry;
  72. u64 *pglist;
  73. u32 devhandle;
  74. int cpu;
  75. size = IO_PAGE_ALIGN(size);
  76. order = get_order(size);
  77. if (order >= MAX_ORDER)
  78. return NULL;
  79. npages = size >> IO_PAGE_SHIFT;
  80. if (npages > PGLIST_NENTS)
  81. return NULL;
  82. first_page = __get_free_pages(GFP_ATOMIC, order);
  83. if (first_page == 0UL)
  84. return NULL;
  85. memset((char *)first_page, 0, PAGE_SIZE << order);
  86. pcp = pdev->sysdata;
  87. devhandle = pcp->pbm->devhandle;
  88. iommu = pcp->pbm->iommu;
  89. spin_lock_irqsave(&iommu->lock, flags);
  90. entry = pci_arena_alloc(&iommu->arena, npages);
  91. spin_unlock_irqrestore(&iommu->lock, flags);
  92. if (unlikely(entry < 0L)) {
  93. free_pages(first_page, order);
  94. return NULL;
  95. }
  96. *dma_addrp = (iommu->page_table_map_base +
  97. (entry << IO_PAGE_SHIFT));
  98. ret = (void *) first_page;
  99. first_page = __pa(first_page);
  100. cpu = get_cpu();
  101. pglist = __get_cpu_var(iommu_pglists).pglist;
  102. for (n = 0; n < npages; n++)
  103. pglist[n] = first_page + (n * PAGE_SIZE);
  104. do {
  105. unsigned long num;
  106. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  107. npages,
  108. (HV_PCI_MAP_ATTR_READ |
  109. HV_PCI_MAP_ATTR_WRITE),
  110. __pa(pglist));
  111. entry += num;
  112. npages -= num;
  113. pglist += num;
  114. } while (npages != 0);
  115. put_cpu();
  116. return ret;
  117. }
  118. static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
  119. {
  120. struct pcidev_cookie *pcp;
  121. struct pci_iommu *iommu;
  122. unsigned long flags, order, npages, entry;
  123. u32 devhandle;
  124. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  125. pcp = pdev->sysdata;
  126. iommu = pcp->pbm->iommu;
  127. devhandle = pcp->pbm->devhandle;
  128. entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  129. spin_lock_irqsave(&iommu->lock, flags);
  130. pci_arena_free(&iommu->arena, entry, npages);
  131. do {
  132. unsigned long num;
  133. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  134. npages);
  135. entry += num;
  136. npages -= num;
  137. } while (npages != 0);
  138. spin_unlock_irqrestore(&iommu->lock, flags);
  139. order = get_order(size);
  140. if (order < 10)
  141. free_pages((unsigned long)cpu, order);
  142. }
  143. static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
  144. {
  145. struct pcidev_cookie *pcp;
  146. struct pci_iommu *iommu;
  147. unsigned long flags, npages, oaddr;
  148. unsigned long i, base_paddr;
  149. u32 devhandle, bus_addr, ret;
  150. unsigned long prot;
  151. long entry;
  152. u64 *pglist;
  153. int cpu;
  154. pcp = pdev->sysdata;
  155. iommu = pcp->pbm->iommu;
  156. devhandle = pcp->pbm->devhandle;
  157. if (unlikely(direction == PCI_DMA_NONE))
  158. goto bad;
  159. oaddr = (unsigned long)ptr;
  160. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  161. npages >>= IO_PAGE_SHIFT;
  162. if (unlikely(npages > PGLIST_NENTS))
  163. goto bad;
  164. spin_lock_irqsave(&iommu->lock, flags);
  165. entry = pci_arena_alloc(&iommu->arena, npages);
  166. spin_unlock_irqrestore(&iommu->lock, flags);
  167. if (unlikely(entry < 0L))
  168. goto bad;
  169. bus_addr = (iommu->page_table_map_base +
  170. (entry << IO_PAGE_SHIFT));
  171. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  172. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  173. prot = HV_PCI_MAP_ATTR_READ;
  174. if (direction != PCI_DMA_TODEVICE)
  175. prot |= HV_PCI_MAP_ATTR_WRITE;
  176. cpu = get_cpu();
  177. pglist = __get_cpu_var(iommu_pglists).pglist;
  178. for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
  179. pglist[i] = base_paddr;
  180. do {
  181. unsigned long num;
  182. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  183. npages, prot,
  184. __pa(pglist));
  185. entry += num;
  186. npages -= num;
  187. pglist += num;
  188. } while (npages != 0);
  189. put_cpu();
  190. return ret;
  191. bad:
  192. if (printk_ratelimit())
  193. WARN_ON(1);
  194. return PCI_DMA_ERROR_CODE;
  195. }
  196. static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  197. {
  198. struct pcidev_cookie *pcp;
  199. struct pci_iommu *iommu;
  200. unsigned long flags, npages;
  201. long entry;
  202. u32 devhandle;
  203. if (unlikely(direction == PCI_DMA_NONE)) {
  204. if (printk_ratelimit())
  205. WARN_ON(1);
  206. return;
  207. }
  208. pcp = pdev->sysdata;
  209. iommu = pcp->pbm->iommu;
  210. devhandle = pcp->pbm->devhandle;
  211. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  212. npages >>= IO_PAGE_SHIFT;
  213. bus_addr &= IO_PAGE_MASK;
  214. spin_lock_irqsave(&iommu->lock, flags);
  215. entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
  216. pci_arena_free(&iommu->arena, entry, npages);
  217. do {
  218. unsigned long num;
  219. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  220. npages);
  221. entry += num;
  222. npages -= num;
  223. } while (npages != 0);
  224. spin_unlock_irqrestore(&iommu->lock, flags);
  225. }
  226. #define SG_ENT_PHYS_ADDRESS(SG) \
  227. (__pa(page_address((SG)->page)) + (SG)->offset)
  228. static inline void fill_sg(long entry, u32 devhandle,
  229. struct scatterlist *sg,
  230. int nused, int nelems, unsigned long prot)
  231. {
  232. struct scatterlist *dma_sg = sg;
  233. struct scatterlist *sg_end = sg + nelems;
  234. int i, cpu, pglist_ent;
  235. u64 *pglist;
  236. cpu = get_cpu();
  237. pglist = __get_cpu_var(iommu_pglists).pglist;
  238. pglist_ent = 0;
  239. for (i = 0; i < nused; i++) {
  240. unsigned long pteval = ~0UL;
  241. u32 dma_npages;
  242. dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
  243. dma_sg->dma_length +
  244. ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
  245. do {
  246. unsigned long offset;
  247. signed int len;
  248. /* If we are here, we know we have at least one
  249. * more page to map. So walk forward until we
  250. * hit a page crossing, and begin creating new
  251. * mappings from that spot.
  252. */
  253. for (;;) {
  254. unsigned long tmp;
  255. tmp = SG_ENT_PHYS_ADDRESS(sg);
  256. len = sg->length;
  257. if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
  258. pteval = tmp & IO_PAGE_MASK;
  259. offset = tmp & (IO_PAGE_SIZE - 1UL);
  260. break;
  261. }
  262. if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
  263. pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
  264. offset = 0UL;
  265. len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
  266. break;
  267. }
  268. sg++;
  269. }
  270. pteval = (pteval & IOPTE_PAGE);
  271. while (len > 0) {
  272. pglist[pglist_ent++] = pteval;
  273. pteval += IO_PAGE_SIZE;
  274. len -= (IO_PAGE_SIZE - offset);
  275. offset = 0;
  276. dma_npages--;
  277. }
  278. pteval = (pteval & IOPTE_PAGE) + len;
  279. sg++;
  280. /* Skip over any tail mappings we've fully mapped,
  281. * adjusting pteval along the way. Stop when we
  282. * detect a page crossing event.
  283. */
  284. while (sg < sg_end &&
  285. (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
  286. (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
  287. ((pteval ^
  288. (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
  289. pteval += sg->length;
  290. sg++;
  291. }
  292. if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
  293. pteval = ~0UL;
  294. } while (dma_npages != 0);
  295. dma_sg++;
  296. }
  297. BUG_ON(pglist_ent == 0);
  298. do {
  299. unsigned long num;
  300. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  301. pglist_ent);
  302. entry += num;
  303. pglist_ent -= num;
  304. } while (pglist_ent != 0);
  305. put_cpu();
  306. }
  307. static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  308. {
  309. struct pcidev_cookie *pcp;
  310. struct pci_iommu *iommu;
  311. unsigned long flags, npages, prot;
  312. u32 devhandle, dma_base;
  313. struct scatterlist *sgtmp;
  314. long entry;
  315. int used;
  316. /* Fast path single entry scatterlists. */
  317. if (nelems == 1) {
  318. sglist->dma_address =
  319. pci_4v_map_single(pdev,
  320. (page_address(sglist->page) + sglist->offset),
  321. sglist->length, direction);
  322. if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
  323. return 0;
  324. sglist->dma_length = sglist->length;
  325. return 1;
  326. }
  327. pcp = pdev->sysdata;
  328. iommu = pcp->pbm->iommu;
  329. devhandle = pcp->pbm->devhandle;
  330. if (unlikely(direction == PCI_DMA_NONE))
  331. goto bad;
  332. /* Step 1: Prepare scatter list. */
  333. npages = prepare_sg(sglist, nelems);
  334. if (unlikely(npages > PGLIST_NENTS))
  335. goto bad;
  336. /* Step 2: Allocate a cluster and context, if necessary. */
  337. spin_lock_irqsave(&iommu->lock, flags);
  338. entry = pci_arena_alloc(&iommu->arena, npages);
  339. spin_unlock_irqrestore(&iommu->lock, flags);
  340. if (unlikely(entry < 0L))
  341. goto bad;
  342. dma_base = iommu->page_table_map_base +
  343. (entry << IO_PAGE_SHIFT);
  344. /* Step 3: Normalize DMA addresses. */
  345. used = nelems;
  346. sgtmp = sglist;
  347. while (used && sgtmp->dma_length) {
  348. sgtmp->dma_address += dma_base;
  349. sgtmp++;
  350. used--;
  351. }
  352. used = nelems - used;
  353. /* Step 4: Create the mappings. */
  354. prot = HV_PCI_MAP_ATTR_READ;
  355. if (direction != PCI_DMA_TODEVICE)
  356. prot |= HV_PCI_MAP_ATTR_WRITE;
  357. fill_sg(entry, devhandle, sglist, used, nelems, prot);
  358. return used;
  359. bad:
  360. if (printk_ratelimit())
  361. WARN_ON(1);
  362. return 0;
  363. }
  364. static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  365. {
  366. struct pcidev_cookie *pcp;
  367. struct pci_iommu *iommu;
  368. unsigned long flags, i, npages;
  369. long entry;
  370. u32 devhandle, bus_addr;
  371. if (unlikely(direction == PCI_DMA_NONE)) {
  372. if (printk_ratelimit())
  373. WARN_ON(1);
  374. }
  375. pcp = pdev->sysdata;
  376. iommu = pcp->pbm->iommu;
  377. devhandle = pcp->pbm->devhandle;
  378. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  379. for (i = 1; i < nelems; i++)
  380. if (sglist[i].dma_length == 0)
  381. break;
  382. i--;
  383. npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
  384. bus_addr) >> IO_PAGE_SHIFT;
  385. entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  386. spin_lock_irqsave(&iommu->lock, flags);
  387. pci_arena_free(&iommu->arena, entry, npages);
  388. do {
  389. unsigned long num;
  390. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  391. npages);
  392. entry += num;
  393. npages -= num;
  394. } while (npages != 0);
  395. spin_unlock_irqrestore(&iommu->lock, flags);
  396. }
  397. static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
  398. {
  399. /* Nothing to do... */
  400. }
  401. static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
  402. {
  403. /* Nothing to do... */
  404. }
  405. struct pci_iommu_ops pci_sun4v_iommu_ops = {
  406. .alloc_consistent = pci_4v_alloc_consistent,
  407. .free_consistent = pci_4v_free_consistent,
  408. .map_single = pci_4v_map_single,
  409. .unmap_single = pci_4v_unmap_single,
  410. .map_sg = pci_4v_map_sg,
  411. .unmap_sg = pci_4v_unmap_sg,
  412. .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
  413. .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
  414. };
  415. /* SUN4V PCI configuration space accessors. */
  416. static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
  417. {
  418. if (bus == pbm->pci_first_busno) {
  419. if (device == 0 && func == 0)
  420. return 0;
  421. return 1;
  422. }
  423. if (bus < pbm->pci_first_busno ||
  424. bus > pbm->pci_last_busno)
  425. return 1;
  426. return 0;
  427. }
  428. static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  429. int where, int size, u32 *value)
  430. {
  431. struct pci_pbm_info *pbm = bus_dev->sysdata;
  432. u32 devhandle = pbm->devhandle;
  433. unsigned int bus = bus_dev->number;
  434. unsigned int device = PCI_SLOT(devfn);
  435. unsigned int func = PCI_FUNC(devfn);
  436. unsigned long ret;
  437. if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
  438. ret = ~0UL;
  439. } else {
  440. ret = pci_sun4v_config_get(devhandle,
  441. HV_PCI_DEVICE_BUILD(bus, device, func),
  442. where, size);
  443. #if 0
  444. printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
  445. devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
  446. where, size, ret);
  447. #endif
  448. }
  449. switch (size) {
  450. case 1:
  451. *value = ret & 0xff;
  452. break;
  453. case 2:
  454. *value = ret & 0xffff;
  455. break;
  456. case 4:
  457. *value = ret & 0xffffffff;
  458. break;
  459. };
  460. return PCIBIOS_SUCCESSFUL;
  461. }
  462. static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
  463. int where, int size, u32 value)
  464. {
  465. struct pci_pbm_info *pbm = bus_dev->sysdata;
  466. u32 devhandle = pbm->devhandle;
  467. unsigned int bus = bus_dev->number;
  468. unsigned int device = PCI_SLOT(devfn);
  469. unsigned int func = PCI_FUNC(devfn);
  470. unsigned long ret;
  471. if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
  472. /* Do nothing. */
  473. } else {
  474. ret = pci_sun4v_config_put(devhandle,
  475. HV_PCI_DEVICE_BUILD(bus, device, func),
  476. where, size, value);
  477. #if 0
  478. printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
  479. devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
  480. where, size, value, ret);
  481. #endif
  482. }
  483. return PCIBIOS_SUCCESSFUL;
  484. }
  485. static struct pci_ops pci_sun4v_ops = {
  486. .read = pci_sun4v_read_pci_cfg,
  487. .write = pci_sun4v_write_pci_cfg,
  488. };
  489. static void pbm_scan_bus(struct pci_controller_info *p,
  490. struct pci_pbm_info *pbm)
  491. {
  492. struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
  493. if (!cookie) {
  494. prom_printf("%s: Critical allocation failure.\n", pbm->name);
  495. prom_halt();
  496. }
  497. /* All we care about is the PBM. */
  498. memset(cookie, 0, sizeof(*cookie));
  499. cookie->pbm = pbm;
  500. pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
  501. #if 0
  502. pci_fixup_host_bridge_self(pbm->pci_bus);
  503. pbm->pci_bus->self->sysdata = cookie;
  504. #endif
  505. pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
  506. pbm->prom_node);
  507. pci_record_assignments(pbm, pbm->pci_bus);
  508. pci_assign_unassigned(pbm, pbm->pci_bus);
  509. pci_fixup_irq(pbm, pbm->pci_bus);
  510. pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
  511. pci_setup_busmastering(pbm, pbm->pci_bus);
  512. }
  513. static void pci_sun4v_scan_bus(struct pci_controller_info *p)
  514. {
  515. if (p->pbm_A.prom_node) {
  516. p->pbm_A.is_66mhz_capable =
  517. prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
  518. pbm_scan_bus(p, &p->pbm_A);
  519. }
  520. if (p->pbm_B.prom_node) {
  521. p->pbm_B.is_66mhz_capable =
  522. prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
  523. pbm_scan_bus(p, &p->pbm_B);
  524. }
  525. /* XXX register error interrupt handlers XXX */
  526. }
  527. static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
  528. struct pci_dev *pdev,
  529. unsigned int devino)
  530. {
  531. u32 devhandle = pbm->devhandle;
  532. int pil;
  533. pil = 4;
  534. if (pdev) {
  535. switch ((pdev->class >> 16) & 0xff) {
  536. case PCI_BASE_CLASS_STORAGE:
  537. pil = 4;
  538. break;
  539. case PCI_BASE_CLASS_NETWORK:
  540. pil = 6;
  541. break;
  542. case PCI_BASE_CLASS_DISPLAY:
  543. pil = 9;
  544. break;
  545. case PCI_BASE_CLASS_MULTIMEDIA:
  546. case PCI_BASE_CLASS_MEMORY:
  547. case PCI_BASE_CLASS_BRIDGE:
  548. case PCI_BASE_CLASS_SERIAL:
  549. pil = 10;
  550. break;
  551. default:
  552. pil = 4;
  553. break;
  554. };
  555. }
  556. BUG_ON(PIL_RESERVED(pil));
  557. return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
  558. }
  559. static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
  560. {
  561. struct pcidev_cookie *pcp = pdev->sysdata;
  562. struct pci_pbm_info *pbm = pcp->pbm;
  563. struct resource *res, *root;
  564. u32 reg;
  565. int where, size, is_64bit;
  566. res = &pdev->resource[resource];
  567. if (resource < 6) {
  568. where = PCI_BASE_ADDRESS_0 + (resource * 4);
  569. } else if (resource == PCI_ROM_RESOURCE) {
  570. where = pdev->rom_base_reg;
  571. } else {
  572. /* Somebody might have asked allocation of a non-standard resource */
  573. return;
  574. }
  575. /* XXX 64-bit MEM handling is not %100 correct... XXX */
  576. is_64bit = 0;
  577. if (res->flags & IORESOURCE_IO)
  578. root = &pbm->io_space;
  579. else {
  580. root = &pbm->mem_space;
  581. if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
  582. == PCI_BASE_ADDRESS_MEM_TYPE_64)
  583. is_64bit = 1;
  584. }
  585. size = res->end - res->start;
  586. pci_read_config_dword(pdev, where, &reg);
  587. reg = ((reg & size) |
  588. (((u32)(res->start - root->start)) & ~size));
  589. if (resource == PCI_ROM_RESOURCE) {
  590. reg |= PCI_ROM_ADDRESS_ENABLE;
  591. res->flags |= IORESOURCE_ROM_ENABLE;
  592. }
  593. pci_write_config_dword(pdev, where, reg);
  594. /* This knows that the upper 32-bits of the address
  595. * must be zero. Our PCI common layer enforces this.
  596. */
  597. if (is_64bit)
  598. pci_write_config_dword(pdev, where + 4, 0);
  599. }
  600. static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
  601. struct resource *res,
  602. struct resource *root)
  603. {
  604. res->start += root->start;
  605. res->end += root->start;
  606. }
  607. /* Use ranges property to determine where PCI MEM, I/O, and Config
  608. * space are for this PCI bus module.
  609. */
  610. static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
  611. {
  612. int i, saw_mem, saw_io;
  613. saw_mem = saw_io = 0;
  614. for (i = 0; i < pbm->num_pbm_ranges; i++) {
  615. struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
  616. unsigned long a;
  617. int type;
  618. type = (pr->child_phys_hi >> 24) & 0x3;
  619. a = (((unsigned long)pr->parent_phys_hi << 32UL) |
  620. ((unsigned long)pr->parent_phys_lo << 0UL));
  621. switch (type) {
  622. case 1:
  623. /* 16-bit IO space, 16MB */
  624. pbm->io_space.start = a;
  625. pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
  626. pbm->io_space.flags = IORESOURCE_IO;
  627. saw_io = 1;
  628. break;
  629. case 2:
  630. /* 32-bit MEM space, 2GB */
  631. pbm->mem_space.start = a;
  632. pbm->mem_space.end = a + (0x80000000UL - 1UL);
  633. pbm->mem_space.flags = IORESOURCE_MEM;
  634. saw_mem = 1;
  635. break;
  636. case 3:
  637. /* XXX 64-bit MEM handling XXX */
  638. default:
  639. break;
  640. };
  641. }
  642. if (!saw_io || !saw_mem) {
  643. prom_printf("%s: Fatal error, missing %s PBM range.\n",
  644. pbm->name,
  645. (!saw_io ? "IO" : "MEM"));
  646. prom_halt();
  647. }
  648. printk("%s: PCI IO[%lx] MEM[%lx]\n",
  649. pbm->name,
  650. pbm->io_space.start,
  651. pbm->mem_space.start);
  652. }
  653. static void pbm_register_toplevel_resources(struct pci_controller_info *p,
  654. struct pci_pbm_info *pbm)
  655. {
  656. pbm->io_space.name = pbm->mem_space.name = pbm->name;
  657. request_resource(&ioport_resource, &pbm->io_space);
  658. request_resource(&iomem_resource, &pbm->mem_space);
  659. pci_register_legacy_regions(&pbm->io_space,
  660. &pbm->mem_space);
  661. }
  662. static void probe_existing_entries(struct pci_pbm_info *pbm,
  663. struct pci_iommu *iommu)
  664. {
  665. struct pci_iommu_arena *arena = &iommu->arena;
  666. unsigned long i;
  667. u32 devhandle;
  668. devhandle = pbm->devhandle;
  669. for (i = 0; i < arena->limit; i++) {
  670. unsigned long ret, io_attrs, ra;
  671. ret = pci_sun4v_iommu_getmap(devhandle,
  672. HV_PCI_TSBID(0, i),
  673. &io_attrs, &ra);
  674. if (ret == HV_EOK)
  675. __set_bit(i, arena->map);
  676. }
  677. }
  678. static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
  679. {
  680. struct pci_iommu *iommu = pbm->iommu;
  681. unsigned long num_tsb_entries, sz;
  682. u32 vdma[2], dma_mask, dma_offset;
  683. int err, tsbsize;
  684. err = prom_getproperty(pbm->prom_node, "virtual-dma",
  685. (char *)&vdma[0], sizeof(vdma));
  686. if (err == 0 || err == -1) {
  687. /* No property, use default values. */
  688. vdma[0] = 0x80000000;
  689. vdma[1] = 0x80000000;
  690. }
  691. dma_mask = vdma[0];
  692. switch (vdma[1]) {
  693. case 0x20000000:
  694. dma_mask |= 0x1fffffff;
  695. tsbsize = 64;
  696. break;
  697. case 0x40000000:
  698. dma_mask |= 0x3fffffff;
  699. tsbsize = 128;
  700. break;
  701. case 0x80000000:
  702. dma_mask |= 0x7fffffff;
  703. tsbsize = 128;
  704. break;
  705. default:
  706. prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
  707. prom_halt();
  708. };
  709. num_tsb_entries = tsbsize / sizeof(iopte_t);
  710. dma_offset = vdma[0];
  711. /* Setup initial software IOMMU state. */
  712. spin_lock_init(&iommu->lock);
  713. iommu->ctx_lowest_free = 1;
  714. iommu->page_table_map_base = dma_offset;
  715. iommu->dma_addr_mask = dma_mask;
  716. /* Allocate and initialize the free area map. */
  717. sz = num_tsb_entries / 8;
  718. sz = (sz + 7UL) & ~7UL;
  719. iommu->arena.map = kmalloc(sz, GFP_KERNEL);
  720. if (!iommu->arena.map) {
  721. prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
  722. prom_halt();
  723. }
  724. memset(iommu->arena.map, 0, sz);
  725. iommu->arena.limit = num_tsb_entries;
  726. probe_existing_entries(pbm, iommu);
  727. }
  728. static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
  729. {
  730. unsigned int busrange[2];
  731. int prom_node = pbm->prom_node;
  732. int err;
  733. err = prom_getproperty(prom_node, "bus-range",
  734. (char *)&busrange[0],
  735. sizeof(busrange));
  736. if (err == 0 || err == -1) {
  737. prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
  738. prom_halt();
  739. }
  740. pbm->pci_first_busno = busrange[0];
  741. pbm->pci_last_busno = busrange[1];
  742. }
  743. static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
  744. {
  745. struct pci_pbm_info *pbm;
  746. int err, i;
  747. if (devhandle & 0x40)
  748. pbm = &p->pbm_B;
  749. else
  750. pbm = &p->pbm_A;
  751. pbm->parent = p;
  752. pbm->prom_node = prom_node;
  753. pbm->pci_first_slot = 1;
  754. pbm->devhandle = devhandle;
  755. sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
  756. p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
  757. printk("%s: devhandle[%x] prom_node[%x:%x]\n",
  758. pbm->name, pbm->devhandle,
  759. pbm->prom_node, prom_getchild(pbm->prom_node));
  760. prom_getstring(prom_node, "name",
  761. pbm->prom_name, sizeof(pbm->prom_name));
  762. err = prom_getproperty(prom_node, "ranges",
  763. (char *) pbm->pbm_ranges,
  764. sizeof(pbm->pbm_ranges));
  765. if (err == 0 || err == -1) {
  766. prom_printf("%s: Fatal error, no ranges property.\n",
  767. pbm->name);
  768. prom_halt();
  769. }
  770. pbm->num_pbm_ranges =
  771. (err / sizeof(struct linux_prom_pci_ranges));
  772. /* Mask out the top 8 bits of the ranges, leaving the real
  773. * physical address.
  774. */
  775. for (i = 0; i < pbm->num_pbm_ranges; i++)
  776. pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
  777. pci_sun4v_determine_mem_io_space(pbm);
  778. pbm_register_toplevel_resources(p, pbm);
  779. err = prom_getproperty(prom_node, "interrupt-map",
  780. (char *)pbm->pbm_intmap,
  781. sizeof(pbm->pbm_intmap));
  782. if (err != -1) {
  783. pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
  784. err = prom_getproperty(prom_node, "interrupt-map-mask",
  785. (char *)&pbm->pbm_intmask,
  786. sizeof(pbm->pbm_intmask));
  787. if (err == -1) {
  788. prom_printf("%s: Fatal error, no "
  789. "interrupt-map-mask.\n", pbm->name);
  790. prom_halt();
  791. }
  792. } else {
  793. pbm->num_pbm_intmap = 0;
  794. memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
  795. }
  796. pci_sun4v_get_bus_range(pbm);
  797. pci_sun4v_iommu_init(pbm);
  798. }
  799. void sun4v_pci_init(int node, char *model_name)
  800. {
  801. struct pci_controller_info *p;
  802. struct pci_iommu *iommu;
  803. struct linux_prom64_registers regs;
  804. u32 devhandle;
  805. int i;
  806. prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
  807. devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
  808. for (p = pci_controller_root; p; p = p->next) {
  809. struct pci_pbm_info *pbm;
  810. if (p->pbm_A.prom_node && p->pbm_B.prom_node)
  811. continue;
  812. pbm = (p->pbm_A.prom_node ?
  813. &p->pbm_A :
  814. &p->pbm_B);
  815. if (pbm->devhandle == (devhandle ^ 0x40)) {
  816. pci_sun4v_pbm_init(p, node, devhandle);
  817. return;
  818. }
  819. }
  820. for (i = 0; i < NR_CPUS; i++) {
  821. unsigned long page = get_zeroed_page(GFP_ATOMIC);
  822. if (!page)
  823. goto fatal_memory_error;
  824. per_cpu(iommu_pglists, i).pglist = (u64 *) page;
  825. }
  826. p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
  827. if (!p)
  828. goto fatal_memory_error;
  829. memset(p, 0, sizeof(*p));
  830. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  831. if (!iommu)
  832. goto fatal_memory_error;
  833. memset(iommu, 0, sizeof(*iommu));
  834. p->pbm_A.iommu = iommu;
  835. iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
  836. if (!iommu)
  837. goto fatal_memory_error;
  838. memset(iommu, 0, sizeof(*iommu));
  839. p->pbm_B.iommu = iommu;
  840. p->next = pci_controller_root;
  841. pci_controller_root = p;
  842. p->index = pci_num_controllers++;
  843. p->pbms_same_domain = 0;
  844. p->scan_bus = pci_sun4v_scan_bus;
  845. p->irq_build = pci_sun4v_irq_build;
  846. p->base_address_update = pci_sun4v_base_address_update;
  847. p->resource_adjust = pci_sun4v_resource_adjust;
  848. p->pci_ops = &pci_sun4v_ops;
  849. /* Like PSYCHO and SCHIZO we have a 2GB aligned area
  850. * for memory space.
  851. */
  852. pci_memspace_mask = 0x7fffffffUL;
  853. pci_sun4v_pbm_init(p, node, devhandle);
  854. return;
  855. fatal_memory_error:
  856. prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
  857. prom_halt();
  858. }